[VOL-5486] Fix deprecated versions
Change-Id: I3e03ea246020547ae75fa92ce8cf5cbba7e8f3bb
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config.go b/vendor/go.etcd.io/etcd/server/v3/embed/config.go
new file mode 100644
index 0000000..8f4a6aa
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config.go
@@ -0,0 +1,1678 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "math"
+ "net"
+ "net/http"
+ "net/netip"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc"
+ "sigs.k8s.io/yaml"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/srv"
+ "go.etcd.io/etcd/client/pkg/v3/tlsutil"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+ "go.etcd.io/etcd/pkg/v3/flags"
+ "go.etcd.io/etcd/pkg/v3/netutil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery"
+ "go.etcd.io/etcd/server/v3/features"
+)
+
+const (
+ ClusterStateFlagNew = "new"
+ ClusterStateFlagExisting = "existing"
+
+ DefaultName = "default"
+ DefaultMaxSnapshots = 5
+ DefaultMaxWALs = 5
+ DefaultMaxTxnOps = uint(128)
+ DefaultWarningApplyDuration = 100 * time.Millisecond
+ DefaultWarningUnaryRequestDuration = 300 * time.Millisecond
+ DefaultMaxRequestBytes = 1.5 * 1024 * 1024
+ DefaultMaxConcurrentStreams = math.MaxUint32
+ DefaultGRPCKeepAliveMinTime = 5 * time.Second
+ DefaultGRPCKeepAliveInterval = 2 * time.Hour
+ DefaultGRPCKeepAliveTimeout = 20 * time.Second
+ DefaultDowngradeCheckTime = 5 * time.Second
+ DefaultAutoCompactionMode = "periodic"
+ DefaultAutoCompactionRetention = "0"
+ DefaultAuthToken = "simple"
+ DefaultCompactHashCheckTime = time.Minute
+ DefaultLoggingFormat = "json"
+
+ DefaultDiscoveryDialTimeout = 2 * time.Second
+ DefaultDiscoveryRequestTimeOut = 5 * time.Second
+ DefaultDiscoveryKeepAliveTime = 2 * time.Second
+ DefaultDiscoveryKeepAliveTimeOut = 6 * time.Second
+ DefaultDiscoveryInsecureTransport = true
+ DefaultSelfSignedCertValidity = 1
+ DefaultTLSMinVersion = string(tlsutil.TLSVersion12)
+
+ DefaultListenPeerURLs = "http://localhost:2380"
+ DefaultListenClientURLs = "http://localhost:2379"
+
+ DefaultLogOutput = "default"
+ JournalLogOutput = "systemd/journal"
+ StdErrLogOutput = "stderr"
+ StdOutLogOutput = "stdout"
+
+ // DefaultLogRotationConfig is the default configuration used for log rotation.
+ // Log rotation is disabled by default.
+ // MaxSize = 100 // MB
+ // MaxAge = 0 // days (no limit)
+ // MaxBackups = 0 // no limit
+ // LocalTime = false // use computers local time, UTC by default
+ // Compress = false // compress the rotated log in gzip format
+ DefaultLogRotationConfig = `{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}`
+
+ // ExperimentalDistributedTracingAddress is the default collector address.
+ // TODO: delete in v3.7
+ // Deprecated: Use DefaultDistributedTracingAddress instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingAddress = "localhost:4317"
+ // DefaultDistributedTracingAddress is the default collector address.
+ DefaultDistributedTracingAddress = "localhost:4317"
+ // ExperimentalDistributedTracingServiceName is the default etcd service name.
+ // TODO: delete in v3.7
+ // Deprecated: Use DefaultDistributedTracingServiceName instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingServiceName = "etcd"
+ // DefaultDistributedTracingServiceName is the default etcd service name.
+ DefaultDistributedTracingServiceName = "etcd"
+
+ DefaultExperimentalTxnModeWriteWithSharedBuffer = true
+
+ // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag.
+ // It's enabled by default.
+ DefaultStrictReconfigCheck = true
+
+ // maxElectionMs specifies the maximum value of election timeout.
+ // More details are listed on etcd.io/docs > version > tuning/#time-parameters
+ maxElectionMs = 50000
+ // backend freelist map type
+ freelistArrayType = "array"
+
+ ServerFeatureGateFlagName = "feature-gates"
+)
+
+var (
+ ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
+ "Choose one of \"initial-cluster\", \"discovery\", \"discovery-endpoints\" or \"discovery-srv\"")
+ ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
+ ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined")
+
+ DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
+ DefaultAdvertiseClientURLs = "http://localhost:2379"
+
+ defaultHostname string
+ defaultHostStatus error
+
+ // indirection for testing
+ getCluster = srv.GetCluster
+
+ // in 3.6, we are migration all the --experimental flags to feature gate and flags without the prefix.
+ // This is the mapping from the non boolean `experimental-` to the new flags.
+ // TODO: delete in v3.7
+ experimentalFlagMigrationMap = map[string]string{
+ "experimental-compact-hash-check-time": "compact-hash-check-time",
+ "experimental-corrupt-check-time": "corrupt-check-time",
+ "experimental-compaction-batch-limit": "compaction-batch-limit",
+ "experimental-watch-progress-notify-interval": "watch-progress-notify-interval",
+ "experimental-warning-apply-duration": "warning-apply-duration",
+ "experimental-bootstrap-defrag-threshold-megabytes": "bootstrap-defrag-threshold-megabytes",
+ "experimental-memory-mlock": "memory-mlock",
+ "experimental-snapshot-catchup-entries": "snapshot-catchup-entries",
+ "experimental-compaction-sleep-interval": "compaction-sleep-interval",
+ "experimental-downgrade-check-time": "downgrade-check-time",
+ "experimental-peer-skip-client-san-verification": "peer-skip-client-san-verification",
+ "experimental-enable-distributed-tracing": "enable-distributed-tracing",
+ "experimental-distributed-tracing-address": "distributed-tracing-address",
+ "experimental-distributed-tracing-service-name": "distributed-tracing-service-name",
+ "experimental-distributed-tracing-instance-id": "distributed-tracing-instance-id",
+ "experimental-distributed-tracing-sampling-rate": "distributed-tracing-sampling-rate",
+ }
+)
+
+var (
+ // CompactorModePeriodic is periodic compaction mode
+ // for "Config.AutoCompactionMode" field.
+ // If "AutoCompactionMode" is CompactorModePeriodic and
+ // "AutoCompactionRetention" is "1h", it automatically compacts
+ // compacts storage every hour.
+ CompactorModePeriodic = v3compactor.ModePeriodic
+
+ // CompactorModeRevision is revision-based compaction mode
+ // for "Config.AutoCompactionMode" field.
+ // If "AutoCompactionMode" is CompactorModeRevision and
+ // "AutoCompactionRetention" is "1000", it compacts log on
+ // revision 5000 when the current revision is 6000.
+ // This runs every 5-minute if enough of logs have proceeded.
+ CompactorModeRevision = v3compactor.ModeRevision
+)
+
+func init() {
+ defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
+}
+
+// Config holds the arguments for configuring an etcd server.
+type Config struct {
+ Name string `json:"name"`
+ Dir string `json:"data-dir"`
+ //revive:disable-next-line:var-naming
+ WalDir string `json:"wal-dir"`
+
+ // SnapshotCount is the number of committed transactions that trigger a snapshot to disk.
+ // TODO: remove it in 3.7.
+ // Deprecated: Will be decommissioned in v3.7.
+ SnapshotCount uint64 `json:"snapshot-count"`
+
+ // ExperimentalSnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ // TODO: remove in v3.7.
+ // Note we made a mistake in https://github.com/etcd-io/etcd/pull/15033. The json tag
+ // `*-catch-up-*` isn't consistent with the command line flag `*-catchup-*`.
+ // Deprecated: Use SnapshotCatchUpEntries instead. Will be removed in v3.7.
+ ExperimentalSnapshotCatchUpEntries uint64 `json:"experimental-snapshot-catch-up-entries"`
+
+ // SnapshotCatchUpEntries is the number of entires for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ SnapshotCatchUpEntries uint64 `json:"snapshot-catchup-entries"`
+
+ // MaxSnapFiles is the maximum number of snapshot files.
+ // TODO: remove it in 3.7.
+ // Deprecated: Will be removed in v3.7.
+ MaxSnapFiles uint `json:"max-snapshots"`
+ //revive:disable-next-line:var-naming
+ MaxWalFiles uint `json:"max-wals"`
+
+ // TickMs is the number of milliseconds between heartbeat ticks.
+ // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
+ // make ticks a cluster wide configuration.
+ TickMs uint `json:"heartbeat-interval"`
+ ElectionMs uint `json:"election-timeout"`
+
+ // InitialElectionTickAdvance is true, then local member fast-forwards
+ // election ticks to speed up "initial" leader election trigger. This
+ // benefits the case of larger election ticks. For instance, cross
+ // datacenter deployment may require longer election timeout of 10-second.
+ // If true, local node does not need wait up to 10-second. Instead,
+ // forwards its election ticks to 8-second, and have only 2-second left
+ // before leader election.
+ //
+ // Major assumptions are that:
+ // - cluster has no active leader thus advancing ticks enables faster
+ // leader election, or
+ // - cluster already has an established leader, and rejoining follower
+ // is likely to receive heartbeats from the leader after tick advance
+ // and before election timeout.
+ //
+ // However, when network from leader to rejoining follower is congested,
+ // and the follower does not receive leader heartbeat within left election
+ // ticks, disruptive election has to happen thus affecting cluster
+ // availabilities.
+ //
+ // Disabling this would slow down initial bootstrap process for cross
+ // datacenter deployments. Make your own tradeoffs by configuring
+ // --initial-election-tick-advance at the cost of slow initial bootstrap.
+ //
+ // If single-node, it advances ticks regardless.
+ //
+ // See https://github.com/etcd-io/etcd/issues/9333 for more detail.
+ InitialElectionTickAdvance bool `json:"initial-election-tick-advance"`
+
+ // BackendBatchInterval is the maximum time before commit the backend transaction.
+ BackendBatchInterval time.Duration `json:"backend-batch-interval"`
+ // BackendBatchLimit is the maximum operations before commit the backend transaction.
+ BackendBatchLimit int `json:"backend-batch-limit"`
+ // BackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types).
+ BackendFreelistType string `json:"backend-bbolt-freelist-type"`
+ QuotaBackendBytes int64 `json:"quota-backend-bytes"`
+ MaxTxnOps uint `json:"max-txn-ops"`
+ MaxRequestBytes uint `json:"max-request-bytes"`
+
+ // MaxConcurrentStreams specifies the maximum number of concurrent
+ // streams that each client can open at a time.
+ MaxConcurrentStreams uint32 `json:"max-concurrent-streams"`
+
+ //revive:disable:var-naming
+ ListenPeerUrls, ListenClientUrls, ListenClientHttpUrls []url.URL
+ AdvertisePeerUrls, AdvertiseClientUrls []url.URL
+ //revive:enable:var-naming
+
+ ClientTLSInfo transport.TLSInfo
+ ClientAutoTLS bool
+ PeerTLSInfo transport.TLSInfo
+ PeerAutoTLS bool
+
+ // SelfSignedCertValidity specifies the validity period of the client and peer certificates
+ // that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS,
+ // the unit is year, and the default is 1
+ SelfSignedCertValidity uint `json:"self-signed-cert-validity"`
+
+ // CipherSuites is a list of supported TLS cipher suites between
+ // client/server and peers. If empty, Go auto-populates the list.
+ // Note that cipher suites are prioritized in the given order.
+ CipherSuites []string `json:"cipher-suites"`
+
+ // TlsMinVersion is the minimum accepted TLS version between client/server and peers.
+ //revive:disable-next-line:var-naming
+ TlsMinVersion string `json:"tls-min-version"`
+
+ // TlsMaxVersion is the maximum accepted TLS version between client/server and peers.
+ //revive:disable-next-line:var-naming
+ TlsMaxVersion string `json:"tls-max-version"`
+
+ ClusterState string `json:"initial-cluster-state"`
+ DNSCluster string `json:"discovery-srv"`
+ DNSClusterServiceName string `json:"discovery-srv-name"`
+ Dproxy string `json:"discovery-proxy"`
+
+ Durl string `json:"discovery"`
+ DiscoveryCfg v3discovery.DiscoveryConfig `json:"discovery-config"`
+
+ InitialCluster string `json:"initial-cluster"`
+ InitialClusterToken string `json:"initial-cluster-token"`
+ StrictReconfigCheck bool `json:"strict-reconfig-check"`
+
+ // AutoCompactionMode is either 'periodic' or 'revision'.
+ AutoCompactionMode string `json:"auto-compaction-mode"`
+ // AutoCompactionRetention is either duration string with time unit
+ // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
+ // If no time unit is provided and compaction mode is 'periodic',
+ // the unit defaults to hour. For example, '5' translates into 5-hour.
+ AutoCompactionRetention string `json:"auto-compaction-retention"`
+
+ // GRPCKeepAliveMinTime is the minimum interval that a client should
+ // wait before pinging server. When client pings "too fast", server
+ // sends goaway and closes the connection (errors: too_many_pings,
+ // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens.
+ // Server expects client pings only when there is any active streams
+ // (PermitWithoutStream is set false).
+ GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
+ // GRPCKeepAliveInterval is the frequency of server-to-client ping
+ // to check if a connection is alive. Close a non-responsive connection
+ // after an additional duration of Timeout. 0 to disable.
+ GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
+ // GRPCKeepAliveTimeout is the additional duration of wait
+ // before closing a non-responsive connection. 0 to disable.
+ GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
+
+ // GRPCAdditionalServerOptions is the additional server option hook
+ // for changing the default internal gRPC configuration. Note these
+ // additional configurations take precedence over the existing individual
+ // configurations if present. Please refer to
+ // https://github.com/etcd-io/etcd/pull/14066#issuecomment-1248682996
+ GRPCAdditionalServerOptions []grpc.ServerOption `json:"grpc-additional-server-options"`
+
+ // SocketOpts are socket options passed to listener config.
+ SocketOpts transport.SocketOpts `json:"socket-options"`
+
+ // PreVote is true to enable Raft Pre-Vote.
+ // If enabled, Raft runs an additional election phase
+ // to check whether it would get enough votes to win
+ // an election, thus minimizing disruptions.
+ PreVote bool `json:"pre-vote"`
+
+ CORS map[string]struct{}
+
+ // HostWhitelist lists acceptable hostnames from HTTP client requests.
+ // Client origin policy protects against "DNS Rebinding" attacks
+ // to insecure etcd servers. That is, any website can simply create
+ // an authorized DNS name, and direct DNS to "localhost" (or any
+ // other address). Then, all HTTP endpoints of etcd server listening
+ // on "localhost" becomes accessible, thus vulnerable to DNS rebinding
+ // attacks. See "CVE-2018-5702" for more detail.
+ //
+ // 1. If client connection is secure via HTTPS, allow any hostnames.
+ // 2. If client connection is not secure and "HostWhitelist" is not empty,
+ // only allow HTTP requests whose Host field is listed in whitelist.
+ //
+ // Note that the client origin policy is enforced whether authentication
+ // is enabled or not, for tighter controls.
+ //
+ // By default, "HostWhitelist" is "*", which allows any hostnames.
+ // Note that when specifying hostnames, loopback addresses are not added
+ // automatically. To allow loopback interfaces, leave it empty or set it "*",
+ // or add them to whitelist manually (e.g. "localhost", "127.0.0.1", etc.).
+ //
+ // CVE-2018-5702 reference:
+ // - https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2
+ // - https://github.com/transmission/transmission/pull/468
+ // - https://github.com/etcd-io/etcd/issues/9353
+ HostWhitelist map[string]struct{}
+
+ // UserHandlers is for registering users handlers and only used for
+ // embedding etcd into other applications.
+ // The map key is the route path for the handler, and
+ // you must ensure it can't be conflicted with etcd's.
+ UserHandlers map[string]http.Handler `json:"-"`
+ // ServiceRegister is for registering users' gRPC services. A simple usage example:
+ // cfg := embed.NewConfig()
+ // cfg.ServiceRegister = func(s *grpc.Server) {
+ // pb.RegisterFooServer(s, &fooServer{})
+ // pb.RegisterBarServer(s, &barServer{})
+ // }
+ // embed.StartEtcd(cfg)
+ ServiceRegister func(*grpc.Server) `json:"-"`
+
+ AuthToken string `json:"auth-token"`
+ BcryptCost uint `json:"bcrypt-cost"`
+
+ // AuthTokenTTL in seconds of the simple token
+ AuthTokenTTL uint `json:"auth-token-ttl"`
+
+ // ExperimentalInitialCorruptCheck defines to check data corrution on boot.
+ // TODO: delete in v3.7
+ // Deprecated: Use InitialCorruptCheck Feature Gate instead. Will be decommissioned in v3.7.
+ ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
+ // ExperimentalCorruptCheckTime is the duration of time between cluster corruption check passes.
+ // TODO: delete in v3.7
+ // Deprecated: Use CorruptCheckTime instead. Will be decommissioned in v3.7.
+ ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"`
+ // CorruptCheckTime is the duration of time between cluster corruption check passes.
+ CorruptCheckTime time.Duration `json:"corrupt-check-time"`
+ // ExperimentalCompactHashCheckEnabled enables leader to periodically check followers compaction hashes.
+ // TODO: delete in v3.7
+ // Deprecated: Use CompactHashCheck Feature Gate. Will be decommissioned in v3.7.
+ ExperimentalCompactHashCheckEnabled bool `json:"experimental-compact-hash-check-enabled"`
+ // ExperimentalCompactHashCheckTime is the duration of time between leader checks followers compaction hashes.
+ // TODO: delete in v3.7
+ // Deprecated: Use CompactHashCheckTime instead. Will be decommissioned in v3.7.
+ ExperimentalCompactHashCheckTime time.Duration `json:"experimental-compact-hash-check-time"`
+ // CompactHashCheckTime is the duration of time between leader checks followers compaction hashes.
+ CompactHashCheckTime time.Duration `json:"compact-hash-check-time"`
+
+ // ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.
+ ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
+ // ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
+ // Requires experimental-enable-lease-checkpoint to be enabled.
+ // TODO: Delete in v3.7
+ // Deprecated: To be decommissioned in v3.7.
+ ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"`
+ // ExperimentalCompactionBatchLimit Sets the maximum revisions deleted in each compaction batch.
+ // TODO: Delete in v3.7
+ // Deprecated: Use CompactionBatchLimit instead. Will be decommissioned in v3.7.
+ ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
+ // CompactionBatchLimit Sets the maximum revisions deleted in each compaction batch.
+ CompactionBatchLimit int `json:"compaction-batch-limit"`
+ // ExperimentalCompactionSleepInterval is the sleep interval between every etcd compaction loop.
+ // TODO: Delete in v3.7
+ // Deprecated: Use CompactionSleepInterval instead. Will be decommissioned in v3.7.
+ ExperimentalCompactionSleepInterval time.Duration `json:"experimental-compaction-sleep-interval"`
+ // CompactionSleepInterval is the sleep interval between every etcd compaction loop.
+ CompactionSleepInterval time.Duration `json:"compaction-sleep-interval"`
+ // ExperimentalWatchProgressNotifyInterval is the time duration of periodic watch progress notifications.
+ // TODO: Delete in v3.7
+ // Deprecated: Use WatchProgressNotifyInterval instead. Will be decommissioned in v3.7.
+ ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"`
+ // WatchProgressNotifyInterval is the time duration of periodic watch progress notifications.
+ WatchProgressNotifyInterval time.Duration `json:"watch-progress-notify-interval"`
+ // ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request
+ // takes more time than this value.
+ // TODO: Delete in v3.7
+ // Deprecated: Use WarningApplyDuration instead. Will be decommissioned in v3.7.
+ ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"`
+ // WarningApplyDuration is the time duration after which a warning is generated if applying request
+ WarningApplyDuration time.Duration `json:"warning-apply-duration"`
+ // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to
+ // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect.
+ // TODO: Delete in v3.7
+ // Deprecated: Use BootstrapDefragThresholdMegabytes instead. Will be decommissioned in v3.7.
+ ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"`
+ // BootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to
+ BootstrapDefragThresholdMegabytes uint `json:"bootstrap-defrag-threshold-megabytes"`
+ // WarningUnaryRequestDuration is the time duration after which a warning is generated if applying
+ // unary request takes more time than this value.
+ WarningUnaryRequestDuration time.Duration `json:"warning-unary-request-duration"`
+ // ExperimentalWarningUnaryRequestDuration is the time duration after which a warning is generated if applying
+ // TODO: Delete in v3.7
+ // Deprecated: Use WarningUnaryRequestDuration. Will be decommissioned in v3.7.
+ ExperimentalWarningUnaryRequestDuration time.Duration `json:"experimental-warning-unary-request-duration"`
+ // MaxLearners sets a limit to the number of learner members that can exist in the cluster membership.
+ MaxLearners int `json:"max-learners"`
+
+ // ForceNewCluster starts a new cluster even if previously started; unsafe.
+ ForceNewCluster bool `json:"force-new-cluster"`
+
+ EnablePprof bool `json:"enable-pprof"`
+ Metrics string `json:"metrics"`
+ ListenMetricsUrls []url.URL
+ ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
+
+ // ExperimentalEnableDistributedTracing indicates if experimental tracing using OpenTelemetry is enabled.
+ // TODO: delete in v3.7
+ // Deprecated: Use EnableDistributedTracing instead. Will be decommissioned in v3.7.
+ ExperimentalEnableDistributedTracing bool `json:"experimental-enable-distributed-tracing"`
+ // EnableDistributedTracing indicates if tracing using OpenTelemetry is enabled.
+ EnableDistributedTracing bool `json:"enable-distributed-tracing"`
+ // ExperimentalDistributedTracingAddress is the address of the OpenTelemetry Collector.
+ // Can only be set if ExperimentalEnableDistributedTracing is true.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingAddress instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingAddress string `json:"experimental-distributed-tracing-address"`
+ // DistributedTracingAddress is the address of the OpenTelemetry Collector.
+ // Can only be set if EnableDistributedTracing is true.
+ DistributedTracingAddress string `json:"distributed-tracing-address"`
+ // ExperimentalDistributedTracingServiceName is the name of the service.
+ // Can only be used if ExperimentalEnableDistributedTracing is true.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingServiceName instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingServiceName string `json:"experimental-distributed-tracing-service-name"`
+ // DistributedTracingServiceName is the name of the service.
+ // Can only be used if EnableDistributedTracing is true.
+ DistributedTracingServiceName string `json:"distributed-tracing-service-name"`
+ // ExperimentalDistributedTracingServiceInstanceID is the ID key of the service.
+ // This ID must be unique, as helps to distinguish instances of the same service
+ // that exist at the same time.
+ // Can only be used if ExperimentalEnableDistributedTracing is true.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingServiceInstanceID instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingServiceInstanceID string `json:"experimental-distributed-tracing-instance-id"`
+ // DistributedTracingServiceInstanceID is the ID key of the service.
+ // This ID must be unique, as helps to distinguish instances of the same service
+ // that exist at the same time.
+ // Can only be used if EnableDistributedTracing is true.
+ DistributedTracingServiceInstanceID string `json:"distributed-tracing-instance-id"`
+ // ExperimentalDistributedTracingSamplingRatePerMillion is the number of samples to collect per million spans.
+ // Defaults to 0.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingSamplingRatePerMillion instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingSamplingRatePerMillion int `json:"experimental-distributed-tracing-sampling-rate"`
+ // DistributedTracingSamplingRatePerMillion is the number of samples to collect per million spans.
+ // Defaults to 0.
+ DistributedTracingSamplingRatePerMillion int `json:"distributed-tracing-sampling-rate"`
+
+ // ExperimentalPeerSkipClientSanVerification determines whether to skip verification of SAN field
+ // in client certificate for peer connections.
+ // TODO: Delete in v3.7
+ // Deprecated: Use `peer-skip-client-san-verification` instead. Will be decommissioned in v3.7.
+ ExperimentalPeerSkipClientSanVerification bool `json:"experimental-peer-skip-client-san-verification"`
+
+ // Logger is logger options: currently only supports "zap".
+ // "capnslog" is removed in v3.5.
+ Logger string `json:"logger"`
+ // LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
+ LogLevel string `json:"log-level"`
+ // LogFormat set log encoding. Only supports json, console. Default is 'json'.
+ LogFormat string `json:"log-format"`
+ // LogOutputs is either:
+ // - "default" as os.Stderr,
+ // - "stderr" as os.Stderr,
+ // - "stdout" as os.Stdout,
+ // - file path to append server logs to.
+ // It can be multiple when "Logger" is zap.
+ LogOutputs []string `json:"log-outputs"`
+ // EnableLogRotation enables log rotation of a single LogOutputs file target.
+ EnableLogRotation bool `json:"enable-log-rotation"`
+ // LogRotationConfigJSON is a passthrough allowing a log rotation JSON config to be passed directly.
+ LogRotationConfigJSON string `json:"log-rotation-config-json"`
+ // ZapLoggerBuilder is used to build the zap logger.
+ ZapLoggerBuilder func(*Config) error
+
+ // logger logs server-side operations. The default is nil,
+ // and "setupLogging" must be called before starting server.
+ // Do not set logger directly.
+ loggerMu *sync.RWMutex
+ logger *zap.Logger
+ // EnableGRPCGateway enables grpc gateway.
+ // The gateway translates a RESTful HTTP API into gRPC.
+ EnableGRPCGateway bool `json:"enable-grpc-gateway"`
+
+ // UnsafeNoFsync disables all uses of fsync.
+ // Setting this is unsafe and will cause data loss.
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"`
+
+ // ExperimentalDowngradeCheckTime is the duration between two downgrade status checks (in seconds).
+ // TODO: Delete `ExperimentalDowngradeCheckTime` in v3.7.
+ // Deprecated: Use DowngradeCheckTime instead. Will be decommissioned in v3.7.
+ ExperimentalDowngradeCheckTime time.Duration `json:"experimental-downgrade-check-time"`
+ // DowngradeCheckTime is the duration between two downgrade status checks (in seconds).
+ DowngradeCheckTime time.Duration `json:"downgrade-check-time"`
+
+ // MemoryMlock enables mlocking of etcd owned memory pages.
+ // The setting improves etcd tail latency in environments were:
+ // - memory pressure might lead to swapping pages to disk
+ // - disk latency might be unstable
+ // Currently all etcd memory gets mlocked, but in future the flag can
+ // be refined to mlock in-use area of bbolt only.
+ MemoryMlock bool `json:"memory-mlock"`
+
+ // ExperimentalMemoryMlock enables mlocking of etcd owned memory pages.
+ // TODO: Delete in v3.7
+ // Deprecated: Use MemoryMlock instad. To be decommissioned in v3.7.
+ ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"`
+
+ // ExperimentalTxnModeWriteWithSharedBuffer enables write transaction to use a shared buffer in its readonly check operations.
+ // TODO: Delete in v3.7
+ // Deprecated: Use TxnModeWriteWithSharedBuffer Feature Flag. Will be decommissioned in v3.7.
+ ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"`
+
+ // ExperimentalStopGRPCServiceOnDefrag enables etcd gRPC service to stop serving client requests on defragmentation.
+ // TODO: Delete in v3.7
+ // Deprecated: Use StopGRPCServiceOnDefrag Feature Flag. Will be decommissioned in v3.7.
+ ExperimentalStopGRPCServiceOnDefrag bool `json:"experimental-stop-grpc-service-on-defrag"`
+
+ // V2Deprecation describes phase of API & Storage V2 support.
+ // Do not set this field for embedded use cases, as it has no effect. However, setting it will not cause any harm.
+ // TODO: Delete in v3.8
+ // Deprecated: The default value is enforced, to be removed in v3.8.
+ V2Deprecation config.V2DeprecationEnum `json:"v2-deprecation"`
+
+ // ServerFeatureGate is a server level feature gate
+ ServerFeatureGate featuregate.FeatureGate
+ // FlagsExplicitlySet stores if a flag is explicitly set from the cmd line or config file.
+ FlagsExplicitlySet map[string]bool
+}
+
+// configYAML holds the config suitable for yaml parsing
+type configYAML struct {
+ Config
+ configJSON
+}
+
+// configJSON has file options that are translated into Config options
+type configJSON struct {
+ ListenPeerURLs string `json:"listen-peer-urls"`
+ ListenClientURLs string `json:"listen-client-urls"`
+ ListenClientHTTPURLs string `json:"listen-client-http-urls"`
+ AdvertisePeerURLs string `json:"initial-advertise-peer-urls"`
+ AdvertiseClientURLs string `json:"advertise-client-urls"`
+
+ CORSJSON string `json:"cors"`
+ HostWhitelistJSON string `json:"host-whitelist"`
+
+ ClientSecurityJSON securityConfig `json:"client-transport-security"`
+ PeerSecurityJSON securityConfig `json:"peer-transport-security"`
+
+ ServerFeatureGatesJSON string `json:"feature-gates"`
+}
+
+type securityConfig struct {
+ CertFile string `json:"cert-file"`
+ KeyFile string `json:"key-file"`
+ ClientCertFile string `json:"client-cert-file"`
+ ClientKeyFile string `json:"client-key-file"`
+ CertAuth bool `json:"client-cert-auth"`
+ TrustedCAFile string `json:"trusted-ca-file"`
+ AutoTLS bool `json:"auto-tls"`
+ AllowedCNs []string `json:"allowed-cn"`
+ AllowedHostnames []string `json:"allowed-hostname"`
+ SkipClientSANVerify bool `json:"skip-client-san-verification,omitempty"`
+}
+
+// NewConfig creates a new Config populated with default values.
+func NewConfig() *Config {
+ lpurl, _ := url.Parse(DefaultListenPeerURLs)
+ apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
+ lcurl, _ := url.Parse(DefaultListenClientURLs)
+ acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
+ cfg := &Config{
+ MaxSnapFiles: DefaultMaxSnapshots,
+ MaxWalFiles: DefaultMaxWALs,
+
+ Name: DefaultName,
+
+ SnapshotCount: etcdserver.DefaultSnapshotCount,
+ ExperimentalSnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries,
+ SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries,
+
+ MaxTxnOps: DefaultMaxTxnOps,
+ MaxRequestBytes: DefaultMaxRequestBytes,
+ MaxConcurrentStreams: DefaultMaxConcurrentStreams,
+ WarningApplyDuration: DefaultWarningApplyDuration,
+
+ GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
+ GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
+ GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
+
+ SocketOpts: transport.SocketOpts{
+ ReusePort: false,
+ ReuseAddress: false,
+ },
+
+ TickMs: 100,
+ ElectionMs: 1000,
+ InitialElectionTickAdvance: true,
+
+ ListenPeerUrls: []url.URL{*lpurl},
+ ListenClientUrls: []url.URL{*lcurl},
+ AdvertisePeerUrls: []url.URL{*apurl},
+ AdvertiseClientUrls: []url.URL{*acurl},
+
+ ClusterState: ClusterStateFlagNew,
+ InitialClusterToken: "etcd-cluster",
+
+ StrictReconfigCheck: DefaultStrictReconfigCheck,
+ Metrics: "basic",
+
+ CORS: map[string]struct{}{"*": {}},
+ HostWhitelist: map[string]struct{}{"*": {}},
+
+ AuthToken: DefaultAuthToken,
+ BcryptCost: uint(bcrypt.DefaultCost),
+ AuthTokenTTL: 300,
+ SelfSignedCertValidity: DefaultSelfSignedCertValidity,
+ TlsMinVersion: DefaultTLSMinVersion,
+
+ PreVote: true,
+
+ loggerMu: new(sync.RWMutex),
+ logger: nil,
+ Logger: "zap",
+ LogFormat: DefaultLoggingFormat,
+ LogOutputs: []string{DefaultLogOutput},
+ LogLevel: logutil.DefaultLogLevel,
+ EnableLogRotation: false,
+ LogRotationConfigJSON: DefaultLogRotationConfig,
+ EnableGRPCGateway: true,
+
+ ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime,
+ DowngradeCheckTime: DefaultDowngradeCheckTime,
+ MemoryMlock: false,
+ // TODO: delete in v3.7
+ ExperimentalMemoryMlock: false,
+ ExperimentalStopGRPCServiceOnDefrag: false,
+ MaxLearners: membership.DefaultMaxLearners,
+
+ ExperimentalTxnModeWriteWithSharedBuffer: DefaultExperimentalTxnModeWriteWithSharedBuffer,
+ ExperimentalDistributedTracingAddress: DefaultDistributedTracingAddress,
+ DistributedTracingAddress: DefaultDistributedTracingAddress,
+ ExperimentalDistributedTracingServiceName: DefaultDistributedTracingServiceName,
+ DistributedTracingServiceName: DefaultDistributedTracingServiceName,
+
+ CompactHashCheckTime: DefaultCompactHashCheckTime,
+ // TODO: delete in v3.7
+ ExperimentalCompactHashCheckTime: DefaultCompactHashCheckTime,
+
+ V2Deprecation: config.V2DeprDefault,
+
+ DiscoveryCfg: v3discovery.DiscoveryConfig{
+ ConfigSpec: clientv3.ConfigSpec{
+ DialTimeout: DefaultDiscoveryDialTimeout,
+ RequestTimeout: DefaultDiscoveryRequestTimeOut,
+ KeepAliveTime: DefaultDiscoveryKeepAliveTime,
+ KeepAliveTimeout: DefaultDiscoveryKeepAliveTimeOut,
+
+ Secure: &clientv3.SecureConfig{
+ InsecureTransport: true,
+ },
+ Auth: &clientv3.AuthConfig{},
+ },
+ },
+
+ AutoCompactionMode: DefaultAutoCompactionMode,
+ AutoCompactionRetention: DefaultAutoCompactionRetention,
+ ServerFeatureGate: features.NewDefaultServerFeatureGate(DefaultName, nil),
+ FlagsExplicitlySet: map[string]bool{},
+ }
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ return cfg
+}
+
+func (cfg *Config) AddFlags(fs *flag.FlagSet) {
+ // member
+ fs.StringVar(&cfg.Dir, "data-dir", cfg.Dir, "Path to the data directory.")
+ fs.StringVar(&cfg.WalDir, "wal-dir", cfg.WalDir, "Path to the dedicated wal directory.")
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultListenPeerURLs, ""),
+ "listen-peer-urls",
+ "List of URLs to listen on for peer traffic.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultListenClientURLs, ""), "listen-client-urls",
+ "List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("", ""), "listen-client-http-urls",
+ "List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("", ""),
+ "listen-metrics-urls",
+ "List of URLs to listen on for the metrics and health endpoints.",
+ )
+ fs.UintVar(&cfg.MaxSnapFiles, "max-snapshots", cfg.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited). Deprecated in v3.6 and will be decommissioned in v3.7.")
+ fs.UintVar(&cfg.MaxWalFiles, "max-wals", cfg.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).")
+ fs.StringVar(&cfg.Name, "name", cfg.Name, "Human-readable name for this member.")
+ fs.Uint64Var(&cfg.SnapshotCount, "snapshot-count", cfg.SnapshotCount, "Number of committed transactions to trigger a snapshot to disk. Deprecated in v3.6 and will be decommissioned in v3.7.")
+ fs.UintVar(&cfg.TickMs, "heartbeat-interval", cfg.TickMs, "Time (in milliseconds) of a heartbeat interval.")
+ fs.UintVar(&cfg.ElectionMs, "election-timeout", cfg.ElectionMs, "Time (in milliseconds) for an election to timeout.")
+ fs.BoolVar(&cfg.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.")
+ fs.Int64Var(&cfg.QuotaBackendBytes, "quota-backend-bytes", cfg.QuotaBackendBytes, "Sets the maximum size (in bytes) that the etcd backend database may consume. Exceeding this triggers an alarm and puts etcd in read-only mode. Set to 0 to use the default 2GiB limit.")
+ fs.StringVar(&cfg.BackendFreelistType, "backend-bbolt-freelist-type", cfg.BackendFreelistType, "BackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
+ fs.DurationVar(&cfg.BackendBatchInterval, "backend-batch-interval", cfg.BackendBatchInterval, "BackendBatchInterval is the maximum time before commit the backend transaction.")
+ fs.IntVar(&cfg.BackendBatchLimit, "backend-batch-limit", cfg.BackendBatchLimit, "BackendBatchLimit is the maximum operations before commit the backend transaction.")
+ fs.UintVar(&cfg.MaxTxnOps, "max-txn-ops", cfg.MaxTxnOps, "Maximum number of operations permitted in a transaction.")
+ fs.UintVar(&cfg.MaxRequestBytes, "max-request-bytes", cfg.MaxRequestBytes, "Maximum client request size in bytes the server will accept.")
+ fs.DurationVar(&cfg.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.")
+ fs.DurationVar(&cfg.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
+ fs.DurationVar(&cfg.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
+ fs.BoolVar(&cfg.SocketOpts.ReusePort, "socket-reuse-port", cfg.SocketOpts.ReusePort, "Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use.")
+ fs.BoolVar(&cfg.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.SocketOpts.ReuseAddress, "Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in `TIME_WAIT` state.")
+
+ fs.Var(flags.NewUint32Value(cfg.MaxConcurrentStreams), "max-concurrent-streams", "Maximum concurrent streams that each client can open at a time.")
+
+ // raft connection timeouts
+ fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "Read timeout set on each rafthttp connection")
+ fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "Write timeout set on each rafthttp connection")
+
+ // clustering
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultInitialAdvertisePeerURLs, ""),
+ "initial-advertise-peer-urls",
+ "List of this member's peer URLs to advertise to the rest of the cluster.",
+ )
+
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultAdvertiseClientURLs, ""),
+ "advertise-client-urls",
+ "List of this member's client URLs to advertise to the public.",
+ )
+
+ fs.StringVar(&cfg.Durl, "discovery", cfg.Durl, "Discovery URL used to bootstrap the cluster for v2 discovery. Will be deprecated in v3.7, and be decommissioned in v3.8.")
+
+ fs.Var(
+ flags.NewUniqueStringsValue(""),
+ "discovery-endpoints",
+ "V3 discovery: List of gRPC endpoints of the discovery service.",
+ )
+ fs.StringVar(&cfg.DiscoveryCfg.Token, "discovery-token", "", "V3 discovery: discovery token for the etcd cluster to be bootstrapped.")
+ fs.DurationVar(&cfg.DiscoveryCfg.DialTimeout, "discovery-dial-timeout", cfg.DiscoveryCfg.DialTimeout, "V3 discovery: dial timeout for client connections.")
+ fs.DurationVar(&cfg.DiscoveryCfg.RequestTimeout, "discovery-request-timeout", cfg.DiscoveryCfg.RequestTimeout, "V3 discovery: timeout for discovery requests (excluding dial timeout).")
+ fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTime, "discovery-keepalive-time", cfg.DiscoveryCfg.KeepAliveTime, "V3 discovery: keepalive time for client connections.")
+ fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTimeout, "discovery-keepalive-timeout", cfg.DiscoveryCfg.KeepAliveTimeout, "V3 discovery: keepalive timeout for client connections.")
+ fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureTransport, "discovery-insecure-transport", true, "V3 discovery: disable transport security for client connections.")
+ fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureSkipVerify, "discovery-insecure-skip-tls-verify", false, "V3 discovery: skip server certificate verification (CAUTION: this option should be enabled only for testing purposes).")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Cert, "discovery-cert", "", "V3 discovery: identify secure client using this TLS certificate file.")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Key, "discovery-key", "", "V3 discovery: identify secure client using this TLS key file.")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Cacert, "discovery-cacert", "", "V3 discovery: verify certificates of TLS-enabled secure servers using this CA bundle.")
+ fs.StringVar(&cfg.DiscoveryCfg.Auth.Username, "discovery-user", "", "V3 discovery: username[:password] for authentication (prompt if password is not supplied).")
+ fs.StringVar(&cfg.DiscoveryCfg.Auth.Password, "discovery-password", "", "V3 discovery: password for authentication (if this option is used, --user option shouldn't include password).")
+
+ fs.StringVar(&cfg.Dproxy, "discovery-proxy", cfg.Dproxy, "HTTP proxy to use for traffic to discovery service. Will be deprecated in v3.7, and be decommissioned in v3.8.")
+ fs.StringVar(&cfg.DNSCluster, "discovery-srv", cfg.DNSCluster, "DNS domain used to bootstrap initial cluster.")
+ fs.StringVar(&cfg.DNSClusterServiceName, "discovery-srv-name", cfg.DNSClusterServiceName, "Service name to query when using DNS discovery.")
+ fs.StringVar(&cfg.InitialCluster, "initial-cluster", cfg.InitialCluster, "Initial cluster configuration for bootstrapping.")
+ fs.StringVar(&cfg.InitialClusterToken, "initial-cluster-token", cfg.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
+ fs.BoolVar(&cfg.StrictReconfigCheck, "strict-reconfig-check", cfg.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
+
+ fs.BoolVar(&cfg.PreVote, "pre-vote", cfg.PreVote, "Enable the raft Pre-Vote algorithm to prevent disruption when a node that has been partitioned away rejoins the cluster.")
+
+ // security
+ fs.StringVar(&cfg.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.")
+ fs.StringVar(&cfg.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.")
+ fs.StringVar(&cfg.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required.")
+ fs.StringVar(&cfg.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required.")
+ fs.BoolVar(&cfg.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.")
+ fs.StringVar(&cfg.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.")
+ fs.Var(flags.NewStringsValue(""), "client-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for client cert authentication.")
+ fs.StringVar(&cfg.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.")
+ fs.BoolVar(&cfg.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates")
+ fs.StringVar(&cfg.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.")
+ fs.StringVar(&cfg.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.")
+ fs.StringVar(&cfg.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required.")
+ fs.StringVar(&cfg.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required.")
+ fs.BoolVar(&cfg.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.")
+ fs.StringVar(&cfg.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.")
+ fs.BoolVar(&cfg.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates")
+ fs.UintVar(&cfg.SelfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the client and peer certificates, unit is year")
+ fs.StringVar(&cfg.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.")
+ fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-cn", "Comma-separated list of allowed CNs for inter-peer TLS authentication.")
+ fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for inter-peer TLS authentication.")
+ fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
+ fs.BoolVar(&cfg.ExperimentalPeerSkipClientSanVerification, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.Deprecated in v3.6 and will be decommissioned in v3.7. Use peer-skip-client-san-verification instead")
+ fs.BoolVar(&cfg.PeerTLSInfo.SkipClientSANVerify, "peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.")
+ fs.StringVar(&cfg.TlsMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.")
+ fs.StringVar(&cfg.TlsMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty defers to Go).")
+
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("*", "*"),
+ "cors",
+ "Comma-separated white list of origins for CORS, or cross-origin resource sharing, (empty or * means allow all)",
+ )
+ fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "Comma-separated acceptable hostnames from HTTP client requests, if server is not secure (empty means allow all).")
+
+ // logging
+ fs.StringVar(&cfg.Logger, "logger", "zap", "Currently only supports 'zap' for structured logging.")
+ fs.Var(flags.NewUniqueStringsValue(DefaultLogOutput), "log-outputs", "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets.")
+ fs.StringVar(&cfg.LogLevel, "log-level", logutil.DefaultLogLevel, "Configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.")
+ fs.StringVar(&cfg.LogFormat, "log-format", logutil.DefaultLogFormat, "Configures log format. Only supports json, console. Default is 'json'.")
+ fs.BoolVar(&cfg.EnableLogRotation, "enable-log-rotation", false, "Enable log rotation of a single log-outputs file target.")
+ fs.StringVar(&cfg.LogRotationConfigJSON, "log-rotation-config-json", DefaultLogRotationConfig, "Configures log rotation if enabled with a JSON logger config. Default: MaxSize=100(MB), MaxAge=0(days,no limit), MaxBackups=0(no limit), LocalTime=false(UTC), Compress=false(gzip)")
+
+ fs.StringVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.")
+ fs.StringVar(&cfg.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.")
+
+ // pprof profiler via HTTP
+ fs.BoolVar(&cfg.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"")
+
+ // additional metrics
+ fs.StringVar(&cfg.Metrics, "metrics", cfg.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics")
+
+ // experimental distributed tracing
+ fs.BoolVar(&cfg.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing. Deprecated in v3.6 and will be decommissioned in v3.7. Use --enable-distributed-tracing instead.")
+ fs.BoolVar(&cfg.EnableDistributedTracing, "enable-distributed-tracing", false, "Enable distributed tracing using OpenTelemetry Tracing.")
+
+ fs.StringVar(&cfg.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", cfg.ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-address instead.")
+ fs.StringVar(&cfg.DistributedTracingAddress, "distributed-tracing-address", cfg.DistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag).")
+
+ fs.StringVar(&cfg.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", cfg.ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd. Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-service-name instead.")
+ fs.StringVar(&cfg.DistributedTracingServiceName, "distributed-tracing-service-name", cfg.DistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.")
+
+ fs.StringVar(&cfg.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance. Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-instance-id instead.")
+ fs.StringVar(&cfg.DistributedTracingServiceInstanceID, "distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance.")
+
+ fs.IntVar(&cfg.ExperimentalDistributedTracingSamplingRatePerMillion, "experimental-distributed-tracing-sampling-rate", 0, "Number of samples to collect per million spans for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-sampling-rate instead.")
+ fs.IntVar(&cfg.DistributedTracingSamplingRatePerMillion, "distributed-tracing-sampling-rate", 0, "Number of samples to collect per million spans for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag).")
+
+ // auth
+ fs.StringVar(&cfg.AuthToken, "auth-token", cfg.AuthToken, "Specify auth token specific options.")
+ fs.UintVar(&cfg.BcryptCost, "bcrypt-cost", cfg.BcryptCost, "Specify bcrypt algorithm cost factor for auth password hashing.")
+ fs.UintVar(&cfg.AuthTokenTTL, "auth-token-ttl", cfg.AuthTokenTTL, "The lifetime in seconds of the auth token.")
+
+ // gateway
+ fs.BoolVar(&cfg.EnableGRPCGateway, "enable-grpc-gateway", cfg.EnableGRPCGateway, "Enable GRPC gateway.")
+
+ // experimental
+ fs.BoolVar(&cfg.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes. Deprecated in v3.6 and will be decommissioned in v3.7. Use --corrupt-check-time instead")
+ fs.DurationVar(&cfg.CorruptCheckTime, "corrupt-check-time", cfg.CorruptCheckTime, "Duration of time between cluster corruption check passes.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalCompactHashCheckEnabled, "experimental-compact-hash-check-enabled", cfg.ExperimentalCompactHashCheckEnabled, "Enable leader to periodically check followers compaction hashes. Deprecated in v3.6 and will be decommissioned in v3.7. Use '--feature-gates=CompactHashCheck=true' instead")
+ fs.DurationVar(&cfg.ExperimentalCompactHashCheckTime, "experimental-compact-hash-check-time", cfg.ExperimentalCompactHashCheckTime, "Duration of time between leader checks followers compaction hashes. Deprecated in v3.6 and will be decommissioned in v3.7. Use --compact-hash-check-time instead.")
+
+ fs.DurationVar(&cfg.CompactHashCheckTime, "compact-hash-check-time", cfg.CompactHashCheckTime, "Duration of time between leader checks followers compaction hashes.")
+
+ fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", false, "Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.")
+ // TODO: delete in v3.7
+ fs.IntVar(&cfg.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch. Deprecated in v3.6 and will be decommissioned in v3.7. Use --compaction-batch-limit instead.")
+ fs.IntVar(&cfg.CompactionBatchLimit, "compaction-batch-limit", cfg.CompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
+ fs.DurationVar(&cfg.ExperimentalCompactionSleepInterval, "experimental-compaction-sleep-interval", cfg.ExperimentalCompactionSleepInterval, "Sets the sleep interval between each compaction batch. Deprecated in v3.6 and will be decommissioned in v3.7. Use --compaction-sleep-interval instead.")
+ fs.DurationVar(&cfg.CompactionSleepInterval, "compaction-sleep-interval", cfg.CompactionSleepInterval, "Sets the sleep interval between each compaction batch.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications. Deprecated in v3.6 and will be decommissioned in v3.7. Use --watch-progress-notify-interval instead.")
+ fs.DurationVar(&cfg.WatchProgressNotifyInterval, "watch-progress-notify-interval", cfg.WatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
+ fs.DurationVar(&cfg.DowngradeCheckTime, "downgrade-check-time", cfg.DowngradeCheckTime, "Duration of time between two downgrade status checks.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status checks. Deprecated in v3.6 and will be decommissioned in v3.7. Use --downgrade-check-time instead.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time. Deprecated in v3.6 and will be decommissioned in v3.7. Use --warning-watch-progress-duration instead.")
+ fs.DurationVar(&cfg.WarningApplyDuration, "warning-apply-duration", cfg.WarningApplyDuration, "Time duration after which a warning is generated if watch progress takes more time.")
+ fs.DurationVar(&cfg.WarningUnaryRequestDuration, "warning-unary-request-duration", cfg.WarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time.")
+ fs.DurationVar(&cfg.ExperimentalWarningUnaryRequestDuration, "experimental-warning-unary-request-duration", cfg.ExperimentalWarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ExperimentalMemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.")
+ fs.BoolVar(&cfg.MemoryMlock, "memory-mlock", cfg.MemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.")
+ fs.BoolVar(&cfg.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "Enable the write transaction to use a shared buffer in its readonly check operations.")
+ fs.BoolVar(&cfg.ExperimentalStopGRPCServiceOnDefrag, "experimental-stop-grpc-service-on-defrag", cfg.ExperimentalStopGRPCServiceOnDefrag, "Enable etcd gRPC service to stop serving client requests on defragmentation.")
+ // TODO: delete in v3.7
+ fs.UintVar(&cfg.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect. It's deprecated, and will be decommissioned in v3.7. Use --bootstrap-defrag-threshold-megabytes instead.")
+ fs.UintVar(&cfg.BootstrapDefragThresholdMegabytes, "bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.")
+ // TODO: delete in v3.7
+ fs.IntVar(&cfg.MaxLearners, "max-learners", membership.DefaultMaxLearners, "Sets the maximum number of learners that can be available in the cluster membership.")
+ fs.Uint64Var(&cfg.ExperimentalSnapshotCatchUpEntries, "experimental-snapshot-catchup-entries", cfg.ExperimentalSnapshotCatchUpEntries, "Number of entries for a slow follower to catch up after compacting the raft storage entries. Deprecated in v3.6 and will be decommissioned in v3.7. Use --snapshot-catchup-entries instead.")
+ fs.Uint64Var(&cfg.SnapshotCatchUpEntries, "snapshot-catchup-entries", cfg.SnapshotCatchUpEntries, "Number of entries for a slow follower to catch up after compacting the raft storage entries.")
+
+ // unsafe
+ fs.BoolVar(&cfg.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
+ fs.BoolVar(&cfg.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
+
+ // featuregate
+ cfg.ServerFeatureGate.(featuregate.MutableFeatureGate).AddFlag(fs, ServerFeatureGateFlagName)
+}
+
+func ConfigFromFile(path string) (*Config, error) {
+ cfg := &configYAML{Config: *NewConfig()}
+ if err := cfg.configFromFile(path); err != nil {
+ return nil, err
+ }
+ return &cfg.Config, nil
+}
+
+func (cfg *configYAML) configFromFile(path string) error {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ defaultInitialCluster := cfg.InitialCluster
+
+ err = yaml.Unmarshal(b, cfg)
+ if err != nil {
+ return err
+ }
+
+ if cfg.configJSON.ServerFeatureGatesJSON != "" {
+ err = cfg.Config.ServerFeatureGate.(featuregate.MutableFeatureGate).Set(cfg.configJSON.ServerFeatureGatesJSON)
+ if err != nil {
+ return err
+ }
+ }
+
+ // parses the yaml bytes to raw map first, then getBoolFlagVal can get the top level bool flag value.
+ var cfgMap map[string]any
+ err = yaml.Unmarshal(b, &cfgMap)
+ if err != nil {
+ return err
+ }
+
+ for flg := range cfgMap {
+ cfg.FlagsExplicitlySet[flg] = true
+ }
+
+ if peerTransportSecurity, ok := cfgMap["peer-transport-security"]; ok {
+ peerTransportSecurityMap, isMap := peerTransportSecurity.(map[string]any)
+ if !isMap {
+ return fmt.Errorf("invalid peer-transport-security")
+ }
+ for k := range peerTransportSecurityMap {
+ cfg.FlagsExplicitlySet[fmt.Sprintf("peer-%s", k)] = true
+ }
+ }
+
+ // attempt to fix a bug introduced in https://github.com/etcd-io/etcd/pull/15033
+ // both `experimental-snapshot-catch-up-entries` and `experimental-snapshot-catchup-entries` refer to the same field,
+ // map the YAML field "experimental-snapshot-catch-up-entries" to the flag "experimental-snapshot-catchup-entries".
+ if val, ok := cfgMap["experimental-snapshot-catch-up-entries"]; ok {
+ cfgMap["experimental-snapshot-catchup-entries"] = val
+ cfg.ExperimentalSnapshotCatchUpEntries = uint64(val.(float64))
+ cfg.FlagsExplicitlySet["experimental-snapshot-catchup-entries"] = true
+ }
+
+ getBoolFlagVal := func(flagName string) *bool {
+ flagVal, ok := cfgMap[flagName]
+ if !ok {
+ return nil
+ }
+ boolVal := flagVal.(bool)
+ return &boolVal
+ }
+ err = SetFeatureGatesFromExperimentalFlags(cfg.ServerFeatureGate, getBoolFlagVal, cfg.configJSON.ServerFeatureGatesJSON)
+ if err != nil {
+ return err
+ }
+
+ if cfg.configJSON.ListenPeerURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.ListenPeerUrls = u
+ }
+
+ if cfg.configJSON.ListenClientURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.ListenClientUrls = u
+ }
+
+ if cfg.configJSON.ListenClientHTTPURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHTTPURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-http-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.ListenClientHttpUrls = u
+ }
+
+ if cfg.configJSON.AdvertisePeerURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.AdvertisePeerUrls = u
+ }
+
+ if cfg.configJSON.AdvertiseClientURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.AdvertiseClientUrls = u
+ }
+
+ if cfg.ListenMetricsUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.ListenMetricsUrls = u
+ }
+
+ if cfg.CORSJSON != "" {
+ uv := flags.NewUniqueURLsWithExceptions(cfg.CORSJSON, "*")
+ cfg.CORS = uv.Values
+ }
+
+ if cfg.HostWhitelistJSON != "" {
+ uv := flags.NewUniqueStringsValue(cfg.HostWhitelistJSON)
+ cfg.HostWhitelist = uv.Values
+ }
+
+ // If a discovery or discovery-endpoints flag is set, clear default initial cluster set by InitialClusterFromName
+ if (cfg.Durl != "" || cfg.DNSCluster != "" || len(cfg.DiscoveryCfg.Endpoints) > 0) && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = ""
+ }
+ if cfg.ClusterState == "" {
+ cfg.ClusterState = ClusterStateFlagNew
+ }
+
+ copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
+ tls.CertFile = ysc.CertFile
+ tls.KeyFile = ysc.KeyFile
+ tls.ClientCertFile = ysc.ClientCertFile
+ tls.ClientKeyFile = ysc.ClientKeyFile
+ tls.ClientCertAuth = ysc.CertAuth
+ tls.TrustedCAFile = ysc.TrustedCAFile
+ tls.AllowedCNs = ysc.AllowedCNs
+ tls.AllowedHostnames = ysc.AllowedHostnames
+ tls.SkipClientSANVerify = ysc.SkipClientSANVerify
+ }
+ copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
+ copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
+ cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
+ cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
+ if cfg.SelfSignedCertValidity == 0 {
+ cfg.SelfSignedCertValidity = 1
+ }
+ return cfg.Validate()
+}
+
+// SetFeatureGatesFromExperimentalFlags sets the feature gate values if the feature gate is not explicitly set
+// while their corresponding experimental flags are explicitly set, for all the features in ExperimentalFlagToFeatureMap.
+// TODO: remove after all experimental flags are deprecated.
+func SetFeatureGatesFromExperimentalFlags(fg featuregate.FeatureGate, getExperimentalFlagVal func(string) *bool, featureGatesVal string) error {
+ m := make(map[featuregate.Feature]bool)
+ // verify that the feature gate and its experimental flag are not both set at the same time.
+ for expFlagName, featureName := range features.ExperimentalFlagToFeatureMap {
+ flagVal := getExperimentalFlagVal(expFlagName)
+ if flagVal == nil {
+ continue
+ }
+ if strings.Contains(featureGatesVal, string(featureName)) {
+ return fmt.Errorf("cannot specify both flags: --%s=%v and --%s=%s=%v at the same time, please just use --%s=%s=%v",
+ expFlagName, *flagVal, ServerFeatureGateFlagName, featureName, fg.Enabled(featureName), ServerFeatureGateFlagName, featureName, fg.Enabled(featureName))
+ }
+ m[featureName] = *flagVal
+ }
+
+ // filter out unknown features for fg, because we could use SetFeatureGatesFromExperimentalFlags both for
+ // server and cluster level feature gates.
+ allFeatures := fg.(featuregate.MutableFeatureGate).GetAll()
+ mFiltered := make(map[string]bool)
+ for k, v := range m {
+ if _, ok := allFeatures[k]; ok {
+ mFiltered[string(k)] = v
+ }
+ }
+ return fg.(featuregate.MutableFeatureGate).SetFromMap(mFiltered)
+}
+
+func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
+ if len(tls.CipherSuites) > 0 && len(ss) > 0 {
+ return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
+ }
+ if len(ss) > 0 {
+ cs, err := tlsutil.GetCipherSuites(ss)
+ if err != nil {
+ return err
+ }
+ tls.CipherSuites = cs
+ }
+ return nil
+}
+
+func updateMinMaxVersions(info *transport.TLSInfo, min, max string) {
+ // Validate() has been called to check the user input, so it should never fail.
+ var err error
+ if info.MinVersion, err = tlsutil.GetTLSVersion(min); err != nil {
+ panic(err)
+ }
+ if info.MaxVersion, err = tlsutil.GetTLSVersion(max); err != nil {
+ panic(err)
+ }
+}
+
+// Validate ensures that '*embed.Config' fields are properly configured.
+func (cfg *Config) Validate() error {
+ // make sure there is no conflict in the flag settings in the ExperimentalNonBoolFlagMigrationMap
+ // TODO: delete in v3.7
+ for oldFlag, newFlag := range experimentalFlagMigrationMap {
+ if cfg.FlagsExplicitlySet[oldFlag] && cfg.FlagsExplicitlySet[newFlag] {
+ return fmt.Errorf("cannot set --%s and --%s at the same time, please use --%s only", oldFlag, newFlag, newFlag)
+ }
+ }
+
+ if err := cfg.setupLogging(); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenPeerUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenClientUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenClientHttpUrls); err != nil {
+ return err
+ }
+ if len(cfg.ListenClientHttpUrls) == 0 {
+ cfg.logger.Warn("Running http and grpc server on single port. This is not recommended for production.")
+ }
+ if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
+ return err
+ }
+ if err := checkHostURLs(cfg.AdvertisePeerUrls); err != nil {
+ addrs := cfg.getAdvertisePeerURLs()
+ return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err)
+ }
+ if err := checkHostURLs(cfg.AdvertiseClientUrls); err != nil {
+ addrs := cfg.getAdvertiseClientURLs()
+ return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err)
+ }
+ // Check if conflicting flags are passed.
+ nSet := 0
+ for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != "", len(cfg.DiscoveryCfg.Endpoints) > 0} {
+ if v {
+ nSet++
+ }
+ }
+
+ if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
+ return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState)
+ }
+
+ if nSet > 1 {
+ return ErrConflictBootstrapFlags
+ }
+
+ // Check if both v2 discovery and v3 discovery flags are passed.
+ v2discoveryFlagsExist := cfg.Dproxy != ""
+ v3discoveryFlagsExist := len(cfg.DiscoveryCfg.Endpoints) > 0 ||
+ cfg.DiscoveryCfg.Token != "" ||
+ cfg.DiscoveryCfg.Secure.Cert != "" ||
+ cfg.DiscoveryCfg.Secure.Key != "" ||
+ cfg.DiscoveryCfg.Secure.Cacert != "" ||
+ cfg.DiscoveryCfg.Auth.Username != "" ||
+ cfg.DiscoveryCfg.Auth.Password != ""
+
+ if v2discoveryFlagsExist && v3discoveryFlagsExist {
+ return errors.New("both v2 discovery settings (discovery, discovery-proxy) " +
+ "and v3 discovery settings (discovery-token, discovery-endpoints, discovery-cert, " +
+ "discovery-key, discovery-cacert, discovery-user, discovery-password) are set")
+ }
+
+ // If one of `discovery-token` and `discovery-endpoints` is provided,
+ // then the other one must be provided as well.
+ if (cfg.DiscoveryCfg.Token != "") != (len(cfg.DiscoveryCfg.Endpoints) > 0) {
+ return errors.New("both --discovery-token and --discovery-endpoints must be set")
+ }
+
+ for _, ep := range cfg.DiscoveryCfg.Endpoints {
+ if strings.TrimSpace(ep) == "" {
+ return errors.New("--discovery-endpoints must not contain empty endpoints")
+ }
+ }
+
+ if cfg.TickMs == 0 {
+ return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs)
+ }
+ if cfg.ElectionMs == 0 {
+ return fmt.Errorf("--election-timeout must be >0 (set to %dms)", cfg.ElectionMs)
+ }
+ if 5*cfg.TickMs > cfg.ElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
+ }
+ if cfg.ElectionMs > maxElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs)
+ }
+
+ // check this last since proxying in etcdmain may make this OK
+ if cfg.ListenClientUrls != nil && cfg.AdvertiseClientUrls == nil {
+ return ErrUnsetAdvertiseClientURLsFlag
+ }
+
+ switch cfg.AutoCompactionMode {
+ case CompactorModeRevision, CompactorModePeriodic:
+ case "":
+ return errors.New("undefined auto-compaction-mode")
+ default:
+ return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
+ }
+
+ // Validate distributed tracing configuration but only if enabled.
+ if cfg.EnableDistributedTracing {
+ if err := validateTracingConfig(cfg.DistributedTracingSamplingRatePerMillion); err != nil {
+ return fmt.Errorf("distributed tracing configurition is not valid: (%w)", err)
+ }
+ }
+
+ if !cfg.ServerFeatureGate.Enabled(features.LeaseCheckpointPersist) && cfg.ServerFeatureGate.Enabled(features.LeaseCheckpoint) {
+ cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling feature gate LeaseCheckpointPersist")
+ }
+
+ if cfg.ServerFeatureGate.Enabled(features.LeaseCheckpointPersist) && !cfg.ServerFeatureGate.Enabled(features.LeaseCheckpoint) {
+ return fmt.Errorf("enabling feature gate LeaseCheckpointPersist requires enabling feature gate LeaseCheckpoint")
+ }
+ // TODO: delete in v3.7
+ if cfg.ExperimentalCompactHashCheckTime <= 0 {
+ return fmt.Errorf("--experimental-compact-hash-check-time must be >0 (set to %v)", cfg.ExperimentalCompactHashCheckTime)
+ }
+ if cfg.CompactHashCheckTime <= 0 {
+ return fmt.Errorf("--compact-hash-check-time must be >0 (set to %v)", cfg.CompactHashCheckTime)
+ }
+
+ // If `--name` isn't configured, then multiple members may have the same "default" name.
+ // When adding a new member with the "default" name as well, etcd may regards its peerURL
+ // as one additional peerURL of the existing member which has the same "default" name,
+ // because each member can have multiple client or peer URLs.
+ // Please refer to https://github.com/etcd-io/etcd/issues/13757
+ if cfg.Name == DefaultName {
+ cfg.logger.Warn(
+ "it isn't recommended to use default name, please set a value for --name. "+
+ "Note that etcd might run into issue when multiple members have the same default name",
+ zap.String("name", cfg.Name))
+ }
+
+ minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion)
+ if err != nil {
+ return err
+ }
+ maxVersion, err := tlsutil.GetTLSVersion(cfg.TlsMaxVersion)
+ if err != nil {
+ return err
+ }
+
+ // maxVersion == 0 means that Go selects the highest available version.
+ if maxVersion != 0 && minVersion > maxVersion {
+ return fmt.Errorf("min version (%s) is greater than max version (%s)", cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ }
+
+ // Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently.
+ if minVersion == tls.VersionTLS13 && len(cfg.CipherSuites) > 0 {
+ return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled")
+ }
+
+ return nil
+}
+
+// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
+func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
+ token = cfg.InitialClusterToken
+ switch {
+ case cfg.Durl != "":
+ urlsmap = types.URLsMap{}
+ // If using v2 discovery, generate a temporary cluster based on
+ // self's advertised peer URLs
+ urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
+ token = cfg.Durl
+
+ case len(cfg.DiscoveryCfg.Endpoints) > 0:
+ urlsmap = types.URLsMap{}
+ // If using v3 discovery, generate a temporary cluster based on
+ // self's advertised peer URLs
+ urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
+ token = cfg.DiscoveryCfg.Token
+
+ case cfg.DNSCluster != "":
+ clusterStrs, cerr := cfg.GetDNSClusterNames()
+ lg := cfg.logger
+ if cerr != nil {
+ lg.Warn("failed to resolve during SRV discovery", zap.Error(cerr))
+ }
+ if len(clusterStrs) == 0 {
+ return nil, "", cerr
+ }
+ for _, s := range clusterStrs {
+ lg.Info("got bootstrap from DNS for etcd-server", zap.String("node", s))
+ }
+ clusterStr := strings.Join(clusterStrs, ",")
+ if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" {
+ cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
+ }
+ urlsmap, err = types.NewURLsMap(clusterStr)
+ // only etcd member must belong to the discovered cluster.
+ // proxy does not need to belong to the discovered cluster.
+ if which == "etcd" {
+ if _, ok := urlsmap[cfg.Name]; !ok {
+ return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
+ }
+ }
+
+ default:
+ // We're statically configured, and cluster has appropriately been set.
+ urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
+ }
+ return urlsmap, token, err
+}
+
+// GetDNSClusterNames uses DNS SRV records to get a list of initial nodes for cluster bootstrapping.
+// This function will return a list of one or more nodes, as well as any errors encountered while
+// performing service discovery.
+// Note: Because this checks multiple sets of SRV records, discovery should only be considered to have
+// failed if the returned node list is empty.
+func (cfg *Config) GetDNSClusterNames() ([]string, error) {
+ var (
+ clusterStrs []string
+ cerr error
+ serviceNameSuffix string
+ )
+ if cfg.DNSClusterServiceName != "" {
+ serviceNameSuffix = "-" + cfg.DNSClusterServiceName
+ }
+
+ lg := cfg.GetLogger()
+
+ // Use both etcd-server-ssl and etcd-server for discovery.
+ // Combine the results if both are available.
+ clusterStrs, cerr = getCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
+ if cerr != nil {
+ clusterStrs = make([]string, 0)
+ }
+ lg.Info(
+ "get cluster for etcd-server-ssl SRV",
+ zap.String("service-scheme", "https"),
+ zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix),
+ zap.String("server-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(cerr),
+ )
+
+ defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
+ if httpCerr == nil {
+ clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
+ }
+ lg.Info(
+ "get cluster for etcd-server SRV",
+ zap.String("service-scheme", "http"),
+ zap.String("service-name", "etcd-server"+serviceNameSuffix),
+ zap.String("server-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(httpCerr),
+ )
+
+ return clusterStrs, errors.Join(cerr, httpCerr)
+}
+
+func (cfg *Config) InitialClusterFromName(name string) (ret string) {
+ if len(cfg.AdvertisePeerUrls) == 0 {
+ return ""
+ }
+ n := name
+ if name == "" {
+ n = DefaultName
+ }
+ for i := range cfg.AdvertisePeerUrls {
+ ret = ret + "," + n + "=" + cfg.AdvertisePeerUrls[i].String()
+ }
+ return ret[1:]
+}
+
+// InferLocalAddr tries to determine the LocalAddr used when communicating with
+// an etcd peer. If SetMemberLocalAddr is true, then it will try to get the host
+// from AdvertisePeerUrls by searching for the first URL with a specified
+// non-loopback address. Otherwise, it defaults to empty string and the
+// LocalAddr used will be the default for the Golang HTTP client.
+func (cfg *Config) InferLocalAddr() string {
+ if !cfg.ServerFeatureGate.Enabled(features.SetMemberLocalAddr) {
+ return ""
+ }
+
+ lg := cfg.GetLogger()
+ lg.Info(
+ "searching for a suitable member local address in AdvertisePeerURLs",
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ )
+ for _, peerURL := range cfg.AdvertisePeerUrls {
+ if addr, err := netip.ParseAddr(peerURL.Hostname()); err == nil {
+ if addr.IsLoopback() || addr.IsUnspecified() {
+ continue
+ }
+ lg.Info(
+ "setting member local address",
+ zap.String("LocalAddr", addr.String()),
+ )
+ return addr.String()
+ }
+ }
+ lg.Warn(
+ "unable to set a member local address due to lack of suitable local addresses",
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ )
+ return ""
+}
+
+func (cfg *Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
+func (cfg *Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
+
+func (cfg *Config) V2DeprecationEffective() config.V2DeprecationEnum {
+ if cfg.V2Deprecation == "" {
+ return config.V2DeprDefault
+ }
+ return cfg.V2Deprecation
+}
+
+func (cfg *Config) defaultPeerHost() bool {
+ return len(cfg.AdvertisePeerUrls) == 1 && cfg.AdvertisePeerUrls[0].String() == DefaultInitialAdvertisePeerURLs
+}
+
+func (cfg *Config) defaultClientHost() bool {
+ return len(cfg.AdvertiseClientUrls) == 1 && cfg.AdvertiseClientUrls[0].String() == DefaultAdvertiseClientURLs
+}
+
+func (cfg *Config) ClientSelfCert() (err error) {
+ if !cfg.ClientAutoTLS {
+ return nil
+ }
+ if !cfg.ClientTLSInfo.Empty() {
+ cfg.logger.Warn("ignoring client auto TLS since certs given")
+ return nil
+ }
+ chosts := make([]string, 0, len(cfg.ListenClientUrls)+len(cfg.ListenClientHttpUrls))
+ for _, u := range cfg.ListenClientUrls {
+ chosts = append(chosts, u.Host)
+ }
+ for _, u := range cfg.ListenClientHttpUrls {
+ chosts = append(chosts, u.Host)
+ }
+ cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity)
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
+}
+
+func (cfg *Config) PeerSelfCert() (err error) {
+ if !cfg.PeerAutoTLS {
+ return nil
+ }
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Warn("ignoring peer auto TLS since certs given")
+ return nil
+ }
+ phosts := make([]string, len(cfg.ListenPeerUrls))
+ for i, u := range cfg.ListenPeerUrls {
+ phosts[i] = u.Host
+ }
+ cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity)
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
+}
+
+// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
+// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
+// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
+// then the advertise peer host would be updated with machine's default host,
+// while keeping the listen URL's port.
+// User can work around this by explicitly setting URL with 127.0.0.1.
+// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
+// TODO: check whether fields are set instead of whether fields have default value
+func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
+ if defaultHostname == "" || defaultHostStatus != nil {
+ // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+ return "", defaultHostStatus
+ }
+
+ used := false
+ pip, pport := cfg.ListenPeerUrls[0].Hostname(), cfg.ListenPeerUrls[0].Port()
+ if cfg.defaultPeerHost() && pip == "0.0.0.0" {
+ cfg.AdvertisePeerUrls[0] = url.URL{Scheme: cfg.AdvertisePeerUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
+ used = true
+ }
+ // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+
+ cip, cport := cfg.ListenClientUrls[0].Hostname(), cfg.ListenClientUrls[0].Port()
+ if cfg.defaultClientHost() && cip == "0.0.0.0" {
+ cfg.AdvertiseClientUrls[0] = url.URL{Scheme: cfg.AdvertiseClientUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
+ used = true
+ }
+ dhost := defaultHostname
+ if !used {
+ dhost = ""
+ }
+ return dhost, defaultHostStatus
+}
+
+// checkBindURLs returns an error if any URL uses a domain name.
+func checkBindURLs(urls []url.URL) error {
+ for _, url := range urls {
+ if url.Scheme == "unix" || url.Scheme == "unixs" {
+ continue
+ }
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "localhost" {
+ // special case for local address
+ // TODO: support /etc/hosts ?
+ continue
+ }
+ if net.ParseIP(host) == nil {
+ return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func checkHostURLs(urls []url.URL) error {
+ for _, url := range urls {
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "" {
+ return fmt.Errorf("unexpected empty host (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func (cfg *Config) getAdvertisePeerURLs() (ss []string) {
+ ss = make([]string, len(cfg.AdvertisePeerUrls))
+ for i := range cfg.AdvertisePeerUrls {
+ ss[i] = cfg.AdvertisePeerUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getListenPeerURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenPeerUrls))
+ for i := range cfg.ListenPeerUrls {
+ ss[i] = cfg.ListenPeerUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getAdvertiseClientURLs() (ss []string) {
+ ss = make([]string, len(cfg.AdvertiseClientUrls))
+ for i := range cfg.AdvertiseClientUrls {
+ ss[i] = cfg.AdvertiseClientUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getListenClientURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenClientUrls))
+ for i := range cfg.ListenClientUrls {
+ ss[i] = cfg.ListenClientUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getMetricsURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenMetricsUrls))
+ for i := range cfg.ListenMetricsUrls {
+ ss[i] = cfg.ListenMetricsUrls[i].String()
+ }
+ return ss
+}
+
+func parseBackendFreelistType(freelistType string) bolt.FreelistType {
+ if freelistType == freelistArrayType {
+ return bolt.FreelistArrayType
+ }
+
+ return bolt.FreelistMapType
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go
new file mode 100644
index 0000000..c9da626
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go
@@ -0,0 +1,282 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zapgrpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/grpclog"
+ "gopkg.in/natefinch/lumberjack.v2"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+)
+
+// GetLogger returns the logger.
+func (cfg *Config) GetLogger() *zap.Logger {
+ cfg.loggerMu.RLock()
+ l := cfg.logger
+ cfg.loggerMu.RUnlock()
+ return l
+}
+
+// setupLogging initializes etcd logging.
+// Must be called after flag parsing or finishing configuring embed.Config.
+func (cfg *Config) setupLogging() error {
+ switch cfg.Logger {
+ case "capnslog": // removed in v3.5
+ return fmt.Errorf("--logger=capnslog is removed in v3.5")
+
+ case "zap":
+ if len(cfg.LogOutputs) == 0 {
+ cfg.LogOutputs = []string{DefaultLogOutput}
+ }
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v == DefaultLogOutput {
+ return fmt.Errorf("multi logoutput for %q is not supported yet", DefaultLogOutput)
+ }
+ }
+ }
+ if cfg.EnableLogRotation {
+ if err := setupLogRotation(cfg.LogOutputs, cfg.LogRotationConfigJSON); err != nil {
+ return err
+ }
+ }
+
+ outputPaths, errOutputPaths := make([]string, 0), make([]string, 0)
+ isJournal := false
+ for _, v := range cfg.LogOutputs {
+ switch v {
+ case DefaultLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case JournalLogOutput:
+ isJournal = true
+
+ case StdErrLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case StdOutLogOutput:
+ outputPaths = append(outputPaths, StdOutLogOutput)
+ errOutputPaths = append(errOutputPaths, StdOutLogOutput)
+
+ default:
+ var path string
+ if cfg.EnableLogRotation {
+ // append rotate scheme to logs managed by lumberjack log rotation
+ if v[0:1] == "/" {
+ path = fmt.Sprintf("rotate:/%%2F%s", v[1:])
+ } else {
+ path = fmt.Sprintf("rotate:/%s", v)
+ }
+ } else {
+ path = v
+ }
+ outputPaths = append(outputPaths, path)
+ errOutputPaths = append(errOutputPaths, path)
+ }
+ }
+
+ if !isJournal {
+ copied := logutil.DefaultZapLoggerConfig
+ copied.OutputPaths = outputPaths
+ copied.ErrorOutputPaths = errOutputPaths
+ copied = logutil.MergeOutputPaths(copied)
+ copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+ encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat)
+ if err != nil {
+ return err
+ }
+ copied.Encoding = encoding
+ if cfg.ZapLoggerBuilder == nil {
+ lg, err := copied.Build()
+ if err != nil {
+ return err
+ }
+ cfg.ZapLoggerBuilder = NewZapLoggerBuilder(lg)
+ }
+ } else {
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v != DefaultLogOutput {
+ return fmt.Errorf("running with systemd/journal but other '--log-outputs' values (%q) are configured with 'default'; override 'default' value with something else", cfg.LogOutputs)
+ }
+ }
+ }
+
+ // use stderr as fallback
+ syncer, lerr := getJournalWriteSyncer()
+ if lerr != nil {
+ return lerr
+ }
+
+ lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+
+ var encoder zapcore.Encoder
+ encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat)
+ if err != nil {
+ return err
+ }
+
+ if encoding == logutil.ConsoleLogFormat {
+ encoder = zapcore.NewConsoleEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig)
+ } else {
+ encoder = zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig)
+ }
+
+ // WARN: do not change field names in encoder config
+ // journald logging writer assumes field names of "level" and "caller"
+ cr := zapcore.NewCore(
+ encoder,
+ syncer,
+ lvl,
+ )
+ if cfg.ZapLoggerBuilder == nil {
+ cfg.ZapLoggerBuilder = NewZapLoggerBuilder(zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer)))
+ }
+ }
+
+ err := cfg.ZapLoggerBuilder(cfg)
+ if err != nil {
+ return err
+ }
+
+ logTLSHandshakeFailureFunc := func(msg string) func(conn *tls.Conn, err error) {
+ return func(conn *tls.Conn, err error) {
+ state := conn.ConnectionState()
+ remoteAddr := conn.RemoteAddr().String()
+ serverName := state.ServerName
+ if len(state.PeerCertificates) > 0 {
+ cert := state.PeerCertificates[0]
+ ips := make([]string, len(cert.IPAddresses))
+ for i := range cert.IPAddresses {
+ ips[i] = cert.IPAddresses[i].String()
+ }
+ cfg.logger.Warn(
+ msg,
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Strings("ip-addresses", ips),
+ zap.Strings("dns-names", cert.DNSNames),
+ zap.Error(err),
+ )
+ } else {
+ cfg.logger.Warn(
+ msg,
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Error(err),
+ )
+ }
+ }
+ }
+
+ cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailureFunc("rejected connection on client endpoint")
+ cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailureFunc("rejected connection on peer endpoint")
+
+ default:
+ return fmt.Errorf("unknown logger option %q", cfg.Logger)
+ }
+
+ return nil
+}
+
+// NewZapLoggerBuilder generates a zap logger builder that sets given logger
+// for embedded etcd.
+func NewZapLoggerBuilder(lg *zap.Logger) func(*Config) error {
+ return func(cfg *Config) error {
+ cfg.loggerMu.Lock()
+ defer cfg.loggerMu.Unlock()
+ cfg.logger = lg
+ return nil
+ }
+}
+
+// SetupGlobalLoggers configures 'global' loggers (grpc, zapGlobal) based on the cfg.
+//
+// The method is not executed by embed server by default (since 3.5) to
+// enable setups where grpc/zap.Global logging is configured independently
+// or spans separate lifecycle (like in tests).
+func (cfg *Config) SetupGlobalLoggers() {
+ lg := cfg.GetLogger()
+ if lg != nil {
+ if cfg.LogLevel == "debug" {
+ grpc.EnableTracing = true
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
+ } else {
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, os.Stderr, os.Stderr))
+ }
+ zap.ReplaceGlobals(lg)
+ }
+}
+
+type logRotationConfig struct {
+ *lumberjack.Logger
+}
+
+// Sync implements zap.Sink
+func (logRotationConfig) Sync() error { return nil }
+
+// setupLogRotation initializes log rotation for a single file path target.
+func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
+ var logRotationCfg logRotationConfig
+ outputFilePaths := 0
+ for _, v := range logOutputs {
+ switch v {
+ case DefaultLogOutput, StdErrLogOutput, StdOutLogOutput:
+ continue
+ default:
+ outputFilePaths++
+ }
+ }
+ // log rotation requires file target
+ if len(logOutputs) == 1 && outputFilePaths == 0 {
+ return ErrLogRotationInvalidLogOutput
+ }
+ // support max 1 file target for log rotation
+ if outputFilePaths > 1 {
+ return ErrLogRotationInvalidLogOutput
+ }
+
+ if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationCfg); err != nil {
+ var unmarshalTypeError *json.UnmarshalTypeError
+ var syntaxError *json.SyntaxError
+ switch {
+ case errors.As(err, &syntaxError):
+ return fmt.Errorf("improperly formatted log rotation config: %w", err)
+ case errors.As(err, &unmarshalTypeError):
+ return fmt.Errorf("invalid log rotation config: %w", err)
+ default:
+ return fmt.Errorf("fail to unmarshal log rotation config: %w", err)
+ }
+ }
+ zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) {
+ logRotationCfg.Filename = u.Path[1:]
+ return &logRotationCfg, nil
+ })
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go
new file mode 100644
index 0000000..478dc65
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+
+package embed
+
+import (
+ "fmt"
+ "os"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+)
+
+// use stderr as fallback
+func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
+ jw, err := logutil.NewJournalWriter(os.Stderr)
+ if err != nil {
+ return nil, fmt.Errorf("can't find journal (%w)", err)
+ }
+ return zapcore.AddSync(jw), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go
new file mode 100644
index 0000000..90dfad9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package embed
+
+import (
+ "os"
+
+ "go.uber.org/zap/zapcore"
+)
+
+func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
+ return zapcore.AddSync(os.Stderr), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go
new file mode 100644
index 0000000..0ca90fd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go
@@ -0,0 +1,138 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.uber.org/zap"
+)
+
+const maxSamplingRatePerMillion = 1000000
+
+func validateTracingConfig(samplingRate int) error {
+ if samplingRate < 0 {
+ return fmt.Errorf("tracing sampling rate must be positive")
+ }
+ if samplingRate > maxSamplingRatePerMillion {
+ return fmt.Errorf("tracing sampling rate must be less than %d", maxSamplingRatePerMillion)
+ }
+
+ return nil
+}
+
+type tracingExporter struct {
+ exporter tracesdk.SpanExporter
+ opts []otelgrpc.Option
+ provider *tracesdk.TracerProvider
+}
+
+func newTracingExporter(ctx context.Context, cfg *Config) (*tracingExporter, error) {
+ exporter, err := otlptracegrpc.New(ctx,
+ otlptracegrpc.WithInsecure(),
+ otlptracegrpc.WithEndpoint(cfg.DistributedTracingAddress),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := resource.New(ctx,
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String(cfg.DistributedTracingServiceName),
+ ),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if resWithIDKey := determineResourceWithIDKey(cfg.DistributedTracingServiceInstanceID); resWithIDKey != nil {
+ // Merge resources into a new
+ // resource in case of duplicates.
+ res, err = resource.Merge(res, resWithIDKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ traceProvider := tracesdk.NewTracerProvider(
+ tracesdk.WithBatcher(exporter),
+ tracesdk.WithResource(res),
+ tracesdk.WithSampler(
+ tracesdk.ParentBased(determineSampler(cfg.DistributedTracingSamplingRatePerMillion)),
+ ),
+ )
+
+ options := []otelgrpc.Option{
+ otelgrpc.WithPropagators(
+ propagation.NewCompositeTextMapPropagator(
+ propagation.TraceContext{},
+ propagation.Baggage{},
+ ),
+ ),
+ otelgrpc.WithTracerProvider(
+ traceProvider,
+ ),
+ }
+
+ cfg.logger.Debug(
+ "distributed tracing enabled",
+ zap.String("address", cfg.DistributedTracingAddress),
+ zap.String("service-name", cfg.DistributedTracingServiceName),
+ zap.String("service-instance-id", cfg.DistributedTracingServiceInstanceID),
+ zap.Int("sampling-rate", cfg.DistributedTracingSamplingRatePerMillion),
+ )
+
+ return &tracingExporter{
+ exporter: exporter,
+ opts: options,
+ provider: traceProvider,
+ }, nil
+}
+
+func (te *tracingExporter) Close(ctx context.Context) {
+ if te.provider != nil {
+ te.provider.Shutdown(ctx)
+ }
+ if te.exporter != nil {
+ te.exporter.Shutdown(ctx)
+ }
+}
+
+func determineSampler(samplingRate int) tracesdk.Sampler {
+ sampler := tracesdk.NeverSample()
+ if samplingRate == 0 {
+ return sampler
+ }
+ return tracesdk.TraceIDRatioBased(float64(samplingRate) / float64(maxSamplingRatePerMillion))
+}
+
+// As Tracing service Instance ID must be unique, it should
+// never use the empty default string value, it's set if
+// if it's a non empty string.
+func determineResourceWithIDKey(serviceInstanceID string) *resource.Resource {
+ if serviceInstanceID != "" {
+ return resource.NewSchemaless(
+ (semconv.ServiceInstanceIDKey.String(serviceInstanceID)),
+ )
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/doc.go b/vendor/go.etcd.io/etcd/server/v3/embed/doc.go
new file mode 100644
index 0000000..3449855
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package embed provides bindings for embedding an etcd server in a program.
+
+Launch an embedded etcd server using the configuration defaults:
+
+ import (
+ "log"
+ "time"
+
+ "go.etcd.io/etcd/server/v3/embed"
+ )
+
+ func main() {
+ cfg := embed.NewConfig()
+ cfg.Dir = "default.etcd"
+ e, err := embed.StartEtcd(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer e.Close()
+ select {
+ case <-e.Server.ReadyNotify():
+ log.Printf("Server is ready!")
+ case <-time.After(60 * time.Second):
+ e.Server.Stop() // trigger a shutdown
+ log.Printf("Server took too long to start!")
+ }
+ log.Fatal(<-e.Err())
+ }
+*/
+package embed
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go
new file mode 100644
index 0000000..95c0d6d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go
@@ -0,0 +1,952 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ defaultLog "log"
+ "math"
+ "net"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/soheilhy/cmux"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/keepalive"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/client/v3/credentials"
+ "go.etcd.io/etcd/pkg/v3/debugutil"
+ runtimeutil "go.etcd.io/etcd/pkg/v3/runtime"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/features"
+ "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/verify"
+)
+
+const (
+ // internal fd usage includes disk usage and transport usage.
+ // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
+ // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
+ // read all logs after some snapshot index, which locates at the end of
+ // the second last and the head of the last. For purging, it needs to read
+ // directory, so it needs 1. For fd monitor, it needs 1.
+ // For transport, rafthttp builds two long-polling connections and at most
+ // four temporary connections with each member. There are at most 9 members
+ // in a cluster, so it should reserve 96.
+ // For the safety, we set the total reserved number to 150.
+ reservedInternalFDNum = 150
+)
+
+// Etcd contains a running etcd server and its listeners.
+type Etcd struct {
+ Peers []*peerListener
+ Clients []net.Listener
+ // a map of contexts for the servers that serves client requests.
+ sctxs map[string]*serveCtx
+ metricsListeners []net.Listener
+
+ tracingExporterShutdown func()
+
+ Server *etcdserver.EtcdServer
+
+ cfg Config
+
+ // closeOnce is to ensure `stopc` is closed only once, no matter
+ // how many times the Close() method is called.
+ closeOnce sync.Once
+ // stopc is used to notify the sub goroutines not to send
+ // any errors to `errc`.
+ stopc chan struct{}
+ // errc is used to receive error from sub goroutines (including
+ // client handler, peer handler and metrics handler). It's closed
+ // after all these sub goroutines exit (checked via `wg`). Writers
+ // should avoid writing after `stopc` is closed by selecting on
+ // reading from `stopc`.
+ errc chan error
+
+ // wg is used to track the lifecycle of all sub goroutines which
+ // need to send error back to the `errc`.
+ wg sync.WaitGroup
+}
+
+type peerListener struct {
+ net.Listener
+ serve func() error
+ close func(context.Context) error
+}
+
+// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
+// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
+// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
+func StartEtcd(inCfg *Config) (e *Etcd, err error) {
+ if err = inCfg.Validate(); err != nil {
+ return nil, err
+ }
+ serving := false
+ e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
+ cfg := &e.cfg
+ defer func() {
+ if e == nil || err == nil {
+ return
+ }
+ if !serving {
+ // errored before starting gRPC server for serveCtx.serversC
+ for _, sctx := range e.sctxs {
+ sctx.close()
+ }
+ }
+ e.Close()
+ e = nil
+ }()
+
+ if !cfg.SocketOpts.Empty() {
+ cfg.logger.Info(
+ "configuring socket options",
+ zap.Bool("reuse-address", cfg.SocketOpts.ReuseAddress),
+ zap.Bool("reuse-port", cfg.SocketOpts.ReusePort),
+ )
+ }
+ e.cfg.logger.Info(
+ "configuring peer listeners",
+ zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()),
+ )
+ if e.Peers, err = configurePeerListeners(cfg); err != nil {
+ return e, err
+ }
+
+ e.cfg.logger.Info(
+ "configuring client listeners",
+ zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()),
+ )
+ if e.sctxs, err = configureClientListeners(cfg); err != nil {
+ return e, err
+ }
+
+ for _, sctx := range e.sctxs {
+ e.Clients = append(e.Clients, sctx.l)
+ }
+
+ var (
+ urlsmap types.URLsMap
+ token string
+ )
+ memberInitialized := true
+ if !isMemberInitialized(cfg) {
+ memberInitialized = false
+ urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
+ if err != nil {
+ return e, fmt.Errorf("error setting up initial cluster: %w", err)
+ }
+ }
+
+ // AutoCompactionRetention defaults to "0" if not set.
+ if len(cfg.AutoCompactionRetention) == 0 {
+ cfg.AutoCompactionRetention = "0"
+ }
+ autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
+ if err != nil {
+ return e, err
+ }
+
+ backendFreelistType := parseBackendFreelistType(cfg.BackendFreelistType)
+
+ srvcfg := config.ServerConfig{
+ Name: cfg.Name,
+ ClientURLs: cfg.AdvertiseClientUrls,
+ PeerURLs: cfg.AdvertisePeerUrls,
+ DataDir: cfg.Dir,
+ DedicatedWALDir: cfg.WalDir,
+ SnapshotCount: cfg.SnapshotCount,
+ SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
+ MaxSnapFiles: cfg.MaxSnapFiles,
+ MaxWALFiles: cfg.MaxWalFiles,
+ InitialPeerURLsMap: urlsmap,
+ InitialClusterToken: token,
+ DiscoveryURL: cfg.Durl,
+ DiscoveryProxy: cfg.Dproxy,
+ DiscoveryCfg: cfg.DiscoveryCfg,
+ NewCluster: cfg.IsNewCluster(),
+ PeerTLSInfo: cfg.PeerTLSInfo,
+ TickMs: cfg.TickMs,
+ ElectionTicks: cfg.ElectionTicks(),
+ InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
+ AutoCompactionRetention: autoCompactionRetention,
+ AutoCompactionMode: cfg.AutoCompactionMode,
+ QuotaBackendBytes: cfg.QuotaBackendBytes,
+ BackendBatchLimit: cfg.BackendBatchLimit,
+ BackendFreelistType: backendFreelistType,
+ BackendBatchInterval: cfg.BackendBatchInterval,
+ MaxTxnOps: cfg.MaxTxnOps,
+ MaxRequestBytes: cfg.MaxRequestBytes,
+ MaxConcurrentStreams: cfg.MaxConcurrentStreams,
+ SocketOpts: cfg.SocketOpts,
+ StrictReconfigCheck: cfg.StrictReconfigCheck,
+ ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
+ AuthToken: cfg.AuthToken,
+ BcryptCost: cfg.BcryptCost,
+ TokenTTL: cfg.AuthTokenTTL,
+ CORS: cfg.CORS,
+ HostWhitelist: cfg.HostWhitelist,
+ CorruptCheckTime: cfg.CorruptCheckTime,
+ CompactHashCheckTime: cfg.CompactHashCheckTime,
+ PreVote: cfg.PreVote,
+ Logger: cfg.logger,
+ ForceNewCluster: cfg.ForceNewCluster,
+ EnableGRPCGateway: cfg.EnableGRPCGateway,
+ EnableDistributedTracing: cfg.EnableDistributedTracing,
+ UnsafeNoFsync: cfg.UnsafeNoFsync,
+ CompactionBatchLimit: cfg.CompactionBatchLimit,
+ CompactionSleepInterval: cfg.CompactionSleepInterval,
+ WatchProgressNotifyInterval: cfg.WatchProgressNotifyInterval,
+ DowngradeCheckTime: cfg.DowngradeCheckTime,
+ WarningApplyDuration: cfg.WarningApplyDuration,
+ WarningUnaryRequestDuration: cfg.WarningUnaryRequestDuration,
+ MemoryMlock: cfg.MemoryMlock,
+ BootstrapDefragThresholdMegabytes: cfg.BootstrapDefragThresholdMegabytes,
+ MaxLearners: cfg.MaxLearners,
+ V2Deprecation: cfg.V2DeprecationEffective(),
+ ExperimentalLocalAddress: cfg.InferLocalAddr(),
+ ServerFeatureGate: cfg.ServerFeatureGate,
+ Metrics: cfg.Metrics,
+ }
+
+ if srvcfg.EnableDistributedTracing {
+ tctx := context.Background()
+ tracingExporter, terr := newTracingExporter(tctx, cfg)
+ if terr != nil {
+ return e, terr
+ }
+ e.tracingExporterShutdown = func() {
+ tracingExporter.Close(tctx)
+ }
+ srvcfg.TracerOptions = tracingExporter.opts
+
+ e.cfg.logger.Info(
+ "distributed tracing setup enabled",
+ )
+ }
+
+ srvcfg.PeerTLSInfo.LocalAddr = srvcfg.ExperimentalLocalAddress
+
+ print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
+
+ if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
+ return e, err
+ }
+
+ // buffer channel so goroutines on closed connections won't wait forever
+ e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
+
+ // newly started member ("memberInitialized==false")
+ // does not need corruption check
+ if memberInitialized && srvcfg.ServerFeatureGate.Enabled(features.InitialCorruptCheck) {
+ if err = e.Server.CorruptionChecker().InitialCheck(); err != nil {
+ // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
+ // (nothing to close since rafthttp transports have not been started)
+
+ e.cfg.logger.Error("checkInitialHashKV failed", zap.Error(err))
+ e.Server.Cleanup()
+ e.Server = nil
+ return e, err
+ }
+ }
+ e.Server.Start()
+
+ e.servePeers()
+
+ e.serveClients()
+
+ if err = e.serveMetrics(); err != nil {
+ return e, err
+ }
+
+ e.cfg.logger.Info(
+ "now serving peer/client/metrics",
+ zap.String("local-member-id", e.Server.MemberID().String()),
+ zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerURLs()),
+ zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()),
+ zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()),
+ zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
+ )
+ serving = true
+ return e, nil
+}
+
+func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized bool) {
+ cors := make([]string, 0, len(ec.CORS))
+ for v := range ec.CORS {
+ cors = append(cors, v)
+ }
+ sort.Strings(cors)
+
+ hss := make([]string, 0, len(ec.HostWhitelist))
+ for v := range ec.HostWhitelist {
+ hss = append(hss, v)
+ }
+ sort.Strings(hss)
+
+ quota := ec.QuotaBackendBytes
+ if quota == 0 {
+ quota = storage.DefaultQuotaBytes
+ }
+
+ lg.Info(
+ "starting an etcd server",
+ zap.String("etcd-version", version.Version),
+ zap.String("git-sha", version.GitSHA),
+ zap.String("go-version", runtime.Version()),
+ zap.String("go-os", runtime.GOOS),
+ zap.String("go-arch", runtime.GOARCH),
+ zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)),
+ zap.Int("max-cpu-available", runtime.NumCPU()),
+ zap.Bool("member-initialized", memberInitialized),
+ zap.String("name", sc.Name),
+ zap.String("data-dir", sc.DataDir),
+ zap.String("wal-dir", ec.WalDir),
+ zap.String("wal-dir-dedicated", sc.DedicatedWALDir),
+ zap.String("member-dir", sc.MemberDir()),
+ zap.Bool("force-new-cluster", sc.ForceNewCluster),
+ zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)),
+ zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
+ zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
+ zap.Uint64("snapshot-count", sc.SnapshotCount),
+ zap.Uint("max-wals", sc.MaxWALFiles),
+ zap.Uint("max-snapshots", sc.MaxSnapFiles),
+ zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
+ zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerURLs()),
+ zap.Strings("listen-peer-urls", ec.getListenPeerURLs()),
+ zap.Strings("advertise-client-urls", ec.getAdvertiseClientURLs()),
+ zap.Strings("listen-client-urls", ec.getListenClientURLs()),
+ zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
+ zap.String("experimental-local-address", sc.ExperimentalLocalAddress),
+ zap.Strings("cors", cors),
+ zap.Strings("host-whitelist", hss),
+ zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
+ zap.String("initial-cluster-state", ec.ClusterState),
+ zap.String("initial-cluster-token", sc.InitialClusterToken),
+ zap.Int64("quota-backend-bytes", quota),
+ zap.Uint("max-request-bytes", sc.MaxRequestBytes),
+ zap.Uint32("max-concurrent-streams", sc.MaxConcurrentStreams),
+
+ zap.Bool("pre-vote", sc.PreVote),
+ zap.String(ServerFeatureGateFlagName, sc.ServerFeatureGate.String()),
+ zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
+ zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
+ zap.Duration("compact-check-time-interval", sc.CompactHashCheckTime),
+ zap.String("auto-compaction-mode", sc.AutoCompactionMode),
+ zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention),
+ zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()),
+ zap.String("discovery-url", sc.DiscoveryURL),
+ zap.String("discovery-proxy", sc.DiscoveryProxy),
+
+ zap.String("discovery-token", sc.DiscoveryCfg.Token),
+ zap.String("discovery-endpoints", strings.Join(sc.DiscoveryCfg.Endpoints, ",")),
+ zap.String("discovery-dial-timeout", sc.DiscoveryCfg.DialTimeout.String()),
+ zap.String("discovery-request-timeout", sc.DiscoveryCfg.RequestTimeout.String()),
+ zap.String("discovery-keepalive-time", sc.DiscoveryCfg.KeepAliveTime.String()),
+ zap.String("discovery-keepalive-timeout", sc.DiscoveryCfg.KeepAliveTimeout.String()),
+ zap.Bool("discovery-insecure-transport", sc.DiscoveryCfg.Secure.InsecureTransport),
+ zap.Bool("discovery-insecure-skip-tls-verify", sc.DiscoveryCfg.Secure.InsecureSkipVerify),
+ zap.String("discovery-cert", sc.DiscoveryCfg.Secure.Cert),
+ zap.String("discovery-key", sc.DiscoveryCfg.Secure.Key),
+ zap.String("discovery-cacert", sc.DiscoveryCfg.Secure.Cacert),
+ zap.String("discovery-user", sc.DiscoveryCfg.Auth.Username),
+
+ zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()),
+ zap.Int("max-learners", sc.MaxLearners),
+
+ zap.String("v2-deprecation", string(ec.V2Deprecation)),
+ )
+}
+
+// Config returns the current configuration.
+func (e *Etcd) Config() Config {
+ return e.cfg
+}
+
+// Close gracefully shuts down all servers/listeners.
+// Client requests will be terminated with request timeout.
+// After timeout, enforce remaning requests be closed immediately.
+//
+// The rough workflow to shut down etcd:
+// 1. close the `stopc` channel, so that all error handlers (child
+// goroutines) won't send back any errors anymore;
+// 2. stop the http and grpc servers gracefully, within request timeout;
+// 3. close all client and metrics listeners, so that etcd server
+// stops receiving any new connection;
+// 4. call the cancel function to close the gateway context, so that
+// all gateway connections are closed.
+// 5. stop etcd server gracefully, and ensure the main raft loop
+// goroutine is stopped;
+// 6. stop all peer listeners, so that it stops receiving peer connections
+// and messages (wait up to 1-second);
+// 7. wait for all child goroutines (i.e. client handlers, peer handlers
+// and metrics handlers) to exit;
+// 8. close the `errc` channel to release the resource. Note that it's only
+// safe to close the `errc` after step 7 above is done, otherwise the
+// child goroutines may send errors back to already closed `errc` channel.
+func (e *Etcd) Close() {
+ fields := []zap.Field{
+ zap.String("name", e.cfg.Name),
+ zap.String("data-dir", e.cfg.Dir),
+ zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()),
+ }
+ lg := e.GetLogger()
+ lg.Info("closing etcd server", fields...)
+ defer func() {
+ lg.Info("closed etcd server", fields...)
+ verify.MustVerifyIfEnabled(verify.Config{
+ Logger: lg,
+ DataDir: e.cfg.Dir,
+ ExactIndex: false,
+ })
+ lg.Sync()
+ }()
+
+ e.closeOnce.Do(func() {
+ close(e.stopc)
+ })
+
+ // close client requests with request timeout
+ timeout := 2 * time.Second
+ if e.Server != nil {
+ timeout = e.Server.Cfg.ReqTimeout()
+ }
+ for _, sctx := range e.sctxs {
+ for ss := range sctx.serversC {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ stopServers(ctx, ss)
+ cancel()
+ }
+ }
+
+ for _, sctx := range e.sctxs {
+ sctx.cancel()
+ }
+
+ for i := range e.Clients {
+ if e.Clients[i] != nil {
+ e.Clients[i].Close()
+ }
+ }
+
+ for i := range e.metricsListeners {
+ e.metricsListeners[i].Close()
+ }
+
+ // shutdown tracing exporter
+ if e.tracingExporterShutdown != nil {
+ e.tracingExporterShutdown()
+ }
+
+ // close rafthttp transports
+ if e.Server != nil {
+ e.Server.Stop()
+ }
+
+ // close all idle connections in peer handler (wait up to 1-second)
+ for i := range e.Peers {
+ if e.Peers[i] != nil && e.Peers[i].close != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ e.Peers[i].close(ctx)
+ cancel()
+ }
+ }
+ if e.errc != nil {
+ e.wg.Wait()
+ close(e.errc)
+ }
+}
+
+func stopServers(ctx context.Context, ss *servers) {
+ // first, close the http.Server
+ if ss.http != nil {
+ ss.http.Shutdown(ctx)
+ }
+ if ss.grpc == nil {
+ return
+ }
+ // do not grpc.Server.GracefulStop when grpc runs under http server
+ // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
+ // and https://github.com/etcd-io/etcd/issues/8916
+ if ss.secure && ss.http != nil {
+ ss.grpc.Stop()
+ return
+ }
+
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ // close listeners to stop accepting new connections,
+ // will block on any existing transports
+ ss.grpc.GracefulStop()
+ }()
+
+ // wait until all pending RPCs are finished
+ select {
+ case <-ch:
+ case <-ctx.Done():
+ // took too long, manually close open transports
+ // e.g. watch streams
+ ss.grpc.Stop()
+
+ // concurrent GracefulStop should be interrupted
+ <-ch
+ }
+}
+
+// Err - return channel used to report errors during etcd run/shutdown.
+// Since etcd 3.5 the channel is being closed when the etcd is over.
+func (e *Etcd) Err() <-chan error {
+ return e.errc
+}
+
+func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
+ if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+ if err = cfg.PeerSelfCert(); err != nil {
+ cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err))
+ }
+ updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Info(
+ "starting with peer TLS",
+ zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)),
+ zap.Strings("cipher-suites", cfg.CipherSuites),
+ )
+ }
+
+ peers = make([]*peerListener, len(cfg.ListenPeerUrls))
+ defer func() {
+ if err == nil {
+ return
+ }
+ for i := range peers {
+ if peers[i] != nil && peers[i].close != nil {
+ cfg.logger.Warn(
+ "closing peer listener",
+ zap.String("address", cfg.ListenPeerUrls[i].String()),
+ zap.Error(err),
+ )
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ peers[i].close(ctx)
+ cancel()
+ }
+ }
+ }()
+
+ for i, u := range cfg.ListenPeerUrls {
+ if u.Scheme == "http" {
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String()))
+ }
+ if cfg.PeerTLSInfo.ClientCertAuth {
+ cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String()))
+ }
+ }
+ peers[i] = &peerListener{close: func(context.Context) error { return nil }}
+ peers[i].Listener, err = transport.NewListenerWithOpts(u.Host, u.Scheme,
+ transport.WithTLSInfo(&cfg.PeerTLSInfo),
+ transport.WithSocketOpts(&cfg.SocketOpts),
+ transport.WithTimeout(rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout),
+ )
+ if err != nil {
+ cfg.logger.Error("creating peer listener failed", zap.Error(err))
+ return nil, err
+ }
+ // once serve, overwrite with 'http.Server.Shutdown'
+ peers[i].close = func(context.Context) error {
+ return peers[i].Listener.Close()
+ }
+ }
+ return peers, nil
+}
+
+// configure peer handlers after rafthttp.Transport started
+func (e *Etcd) servePeers() {
+ ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
+
+ for _, p := range e.Peers {
+ u := p.Listener.Addr().String()
+ m := cmux.New(p.Listener)
+ srv := &http.Server{
+ Handler: ph,
+ ReadTimeout: 5 * time.Minute,
+ ErrorLog: defaultLog.New(io.Discard, "", 0), // do not log user error
+ }
+ go srv.Serve(m.Match(cmux.Any()))
+ p.serve = func() error {
+ e.cfg.logger.Info(
+ "cmux::serve",
+ zap.String("address", u),
+ )
+ return m.Serve()
+ }
+ p.close = func(ctx context.Context) error {
+ // gracefully shutdown http.Server
+ // close open listeners, idle connections
+ // until context cancel or time-out
+ e.cfg.logger.Info(
+ "stopping serving peer traffic",
+ zap.String("address", u),
+ )
+ srv.Shutdown(ctx)
+ e.cfg.logger.Info(
+ "stopped serving peer traffic",
+ zap.String("address", u),
+ )
+ m.Close()
+ return nil
+ }
+ }
+
+ // start peer servers in a goroutine
+ for _, pl := range e.Peers {
+ l := pl
+ e.startHandler(func() error {
+ u := l.Addr().String()
+ e.cfg.logger.Info(
+ "serving peer traffic",
+ zap.String("address", u),
+ )
+ return l.serve()
+ })
+ }
+}
+
+func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
+ if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+ if err = cfg.ClientSelfCert(); err != nil {
+ cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err))
+ }
+ updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ if cfg.EnablePprof {
+ cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
+ }
+
+ sctxs = make(map[string]*serveCtx)
+ for _, u := range append(cfg.ListenClientUrls, cfg.ListenClientHttpUrls...) {
+ if u.Scheme == "http" || u.Scheme == "unix" {
+ if !cfg.ClientTLSInfo.Empty() {
+ cfg.logger.Warn("scheme is http or unix while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
+ }
+ if cfg.ClientTLSInfo.ClientCertAuth {
+ cfg.logger.Warn("scheme is http or unix while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String()))
+ }
+ }
+ if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
+ return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
+ }
+ }
+
+ for _, u := range cfg.ListenClientUrls {
+ addr, secure, network := resolveURL(u)
+ sctx := sctxs[addr]
+ if sctx == nil {
+ sctx = newServeCtx(cfg.logger)
+ sctxs[addr] = sctx
+ }
+ sctx.secure = sctx.secure || secure
+ sctx.insecure = sctx.insecure || !secure
+ sctx.scheme = u.Scheme
+ sctx.addr = addr
+ sctx.network = network
+ }
+ for _, u := range cfg.ListenClientHttpUrls {
+ addr, secure, network := resolveURL(u)
+
+ sctx := sctxs[addr]
+ if sctx == nil {
+ sctx = newServeCtx(cfg.logger)
+ sctxs[addr] = sctx
+ } else if !sctx.httpOnly {
+ return nil, fmt.Errorf("cannot bind both --listen-client-urls and --listen-client-http-urls on the same url %s", u.String())
+ }
+ sctx.secure = sctx.secure || secure
+ sctx.insecure = sctx.insecure || !secure
+ sctx.scheme = u.Scheme
+ sctx.addr = addr
+ sctx.network = network
+ sctx.httpOnly = true
+ }
+
+ for _, sctx := range sctxs {
+ if sctx.l, err = transport.NewListenerWithOpts(sctx.addr, sctx.scheme,
+ transport.WithSocketOpts(&cfg.SocketOpts),
+ transport.WithSkipTLSInfoCheck(true),
+ ); err != nil {
+ return nil, err
+ }
+ // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
+ // hosts that disable ipv6. So, use the address given by the user.
+
+ if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
+ if fdLimit <= reservedInternalFDNum {
+ cfg.logger.Fatal(
+ "file descriptor limit of etcd process is too low; please set higher",
+ zap.Uint64("limit", fdLimit),
+ zap.Int("recommended-limit", reservedInternalFDNum),
+ )
+ }
+ sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
+ }
+
+ defer func(sctx *serveCtx) {
+ if err == nil || sctx.l == nil {
+ return
+ }
+ sctx.l.Close()
+ cfg.logger.Warn(
+ "closing peer listener",
+ zap.String("address", sctx.addr),
+ zap.Error(err),
+ )
+ }(sctx)
+ for k := range cfg.UserHandlers {
+ sctx.userHandlers[k] = cfg.UserHandlers[k]
+ }
+ sctx.serviceRegister = cfg.ServiceRegister
+ if cfg.EnablePprof || cfg.LogLevel == "debug" {
+ sctx.registerPprof()
+ }
+ if cfg.LogLevel == "debug" {
+ sctx.registerTrace()
+ }
+ }
+ return sctxs, nil
+}
+
+func resolveURL(u url.URL) (addr string, secure bool, network string) {
+ addr = u.Host
+ network = "tcp"
+ if u.Scheme == "unix" || u.Scheme == "unixs" {
+ addr = u.Host + u.Path
+ network = "unix"
+ }
+ secure = u.Scheme == "https" || u.Scheme == "unixs"
+ return addr, secure, network
+}
+
+func (e *Etcd) serveClients() {
+ if !e.cfg.ClientTLSInfo.Empty() {
+ e.cfg.logger.Info(
+ "starting with client TLS",
+ zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)),
+ zap.Strings("cipher-suites", e.cfg.CipherSuites),
+ )
+ }
+
+ // Start a client server goroutine for each listen address
+ mux := http.NewServeMux()
+ etcdhttp.HandleDebug(mux)
+ etcdhttp.HandleVersion(mux, e.Server)
+ etcdhttp.HandleMetrics(mux)
+ etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server)
+
+ var gopts []grpc.ServerOption
+ if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: e.cfg.GRPCKeepAliveMinTime,
+ PermitWithoutStream: false,
+ }))
+ }
+ if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
+ e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: e.cfg.GRPCKeepAliveInterval,
+ Timeout: e.cfg.GRPCKeepAliveTimeout,
+ }))
+ }
+ gopts = append(gopts, e.cfg.GRPCAdditionalServerOptions...)
+
+ splitHTTP := false
+ for _, sctx := range e.sctxs {
+ if sctx.httpOnly {
+ splitHTTP = true
+ }
+ }
+
+ // start client servers in each goroutine
+ for _, sctx := range e.sctxs {
+ s := sctx
+ e.startHandler(func() error {
+ return s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, e.grpcGatewayDial(splitHTTP), splitHTTP, gopts...)
+ })
+ }
+}
+
+func (e *Etcd) grpcGatewayDial(splitHTTP bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
+ if !e.cfg.EnableGRPCGateway {
+ return nil
+ }
+ sctx := e.pickGRPCGatewayServeContext(splitHTTP)
+ addr := sctx.addr
+ if network := sctx.network; network == "unix" {
+ // explicitly define unix network for gRPC socket support
+ addr = fmt.Sprintf("%s:%s", network, addr)
+ }
+ opts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32))}
+ if sctx.secure {
+ tlscfg, tlsErr := e.cfg.ClientTLSInfo.ServerConfig()
+ if tlsErr != nil {
+ return func(ctx context.Context) (*grpc.ClientConn, error) {
+ return nil, tlsErr
+ }
+ }
+ dtls := tlscfg.Clone()
+ // trust local server
+ dtls.InsecureSkipVerify = true
+ opts = append(opts, grpc.WithTransportCredentials(credentials.NewTransportCredential(dtls)))
+ } else {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ return func(ctx context.Context) (*grpc.ClientConn, error) {
+ conn, err := grpc.DialContext(ctx, addr, opts...)
+ if err != nil {
+ sctx.lg.Error("grpc gateway failed to dial", zap.String("addr", addr), zap.Error(err))
+ return nil, err
+ }
+ return conn, err
+ }
+}
+
+func (e *Etcd) pickGRPCGatewayServeContext(splitHTTP bool) *serveCtx {
+ for _, sctx := range e.sctxs {
+ if !splitHTTP || !sctx.httpOnly {
+ return sctx
+ }
+ }
+ panic("Expect at least one context able to serve grpc")
+}
+
+var ErrMissingClientTLSInfoForMetricsURL = errors.New("client TLS key/cert (--cert-file, --key-file) must be provided for metrics secure url")
+
+func (e *Etcd) createMetricsListener(murl url.URL) (net.Listener, error) {
+ tlsInfo := &e.cfg.ClientTLSInfo
+ switch murl.Scheme {
+ case "http":
+ tlsInfo = nil
+ case "https", "unixs":
+ if e.cfg.ClientTLSInfo.Empty() {
+ return nil, ErrMissingClientTLSInfoForMetricsURL
+ }
+ }
+ return transport.NewListenerWithOpts(murl.Host, murl.Scheme,
+ transport.WithTLSInfo(tlsInfo),
+ transport.WithSocketOpts(&e.cfg.SocketOpts),
+ )
+}
+
+func (e *Etcd) serveMetrics() (err error) {
+ if len(e.cfg.ListenMetricsUrls) > 0 {
+ metricsMux := http.NewServeMux()
+ etcdhttp.HandleMetrics(metricsMux)
+ etcdhttp.HandleHealth(e.cfg.logger, metricsMux, e.Server)
+
+ for _, murl := range e.cfg.ListenMetricsUrls {
+ u := murl
+ ml, err := e.createMetricsListener(murl)
+ if err != nil {
+ return err
+ }
+ e.metricsListeners = append(e.metricsListeners, ml)
+
+ e.startHandler(func() error {
+ e.cfg.logger.Info(
+ "serving metrics",
+ zap.String("address", u.String()),
+ )
+ return http.Serve(ml, metricsMux)
+ })
+ }
+ }
+ return nil
+}
+
+func (e *Etcd) startHandler(handler func() error) {
+ // start each handler in a separate goroutine
+ e.wg.Add(1)
+ go func() {
+ defer e.wg.Done()
+ e.errHandler(handler())
+ }()
+}
+
+func (e *Etcd) errHandler(err error) {
+ if err != nil {
+ e.GetLogger().Error("setting up serving from embedded etcd failed.", zap.Error(err))
+ }
+ select {
+ case <-e.stopc:
+ return
+ default:
+ }
+ select {
+ case <-e.stopc:
+ case e.errc <- err:
+ }
+}
+
+// GetLogger returns the logger.
+func (e *Etcd) GetLogger() *zap.Logger {
+ e.cfg.loggerMu.RLock()
+ l := e.cfg.logger
+ e.cfg.loggerMu.RUnlock()
+ return l
+}
+
+func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
+ h, err := strconv.Atoi(retention)
+ if err == nil && h >= 0 {
+ switch mode {
+ case CompactorModeRevision:
+ ret = time.Duration(int64(h))
+ case CompactorModePeriodic:
+ ret = time.Duration(int64(h)) * time.Hour
+ case "":
+ return 0, errors.New("--auto-compaction-mode is undefined")
+ }
+ } else {
+ // periodic compaction
+ ret, err = time.ParseDuration(retention)
+ if err != nil {
+ return 0, fmt.Errorf("error parsing CompactionRetention: %w", err)
+ }
+ }
+ return ret, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/serve.go b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go
new file mode 100644
index 0000000..e50529d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go
@@ -0,0 +1,558 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ defaultLog "log"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+
+ gw "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/soheilhy/cmux"
+ "github.com/tmc/grpc-websocket-proxy/wsproxy"
+ "go.uber.org/zap"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/pkg/v3/debugutil"
+ "go.etcd.io/etcd/pkg/v3/httputil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ v3electiongw "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+ v3lockgw "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
+)
+
+type serveCtx struct {
+ lg *zap.Logger
+ l net.Listener
+
+ scheme string
+ addr string
+ network string
+ secure bool
+ insecure bool
+ httpOnly bool
+
+ // ctx is used to control the grpc gateway. Terminate the grpc gateway
+ // by calling `cancel` when shutting down the etcd.
+ ctx context.Context
+ cancel context.CancelFunc
+
+ userHandlers map[string]http.Handler
+ serviceRegister func(*grpc.Server)
+
+ // serversC is used to receive the http and grpc server objects (created
+ // in `serve`), both of which will be closed when shutting down the etcd.
+ // Close it when `serve` returns or when etcd fails to bootstrap.
+ serversC chan *servers
+ // closeOnce is to ensure `serversC` is closed only once.
+ closeOnce sync.Once
+
+ // wg is used to track the lifecycle of all sub goroutines created by `serve`.
+ wg sync.WaitGroup
+}
+
+func (sctx *serveCtx) startHandler(errHandler func(error), handler func() error) {
+ // start each handler in a separate goroutine
+ sctx.wg.Add(1)
+ go func() {
+ defer sctx.wg.Done()
+ err := handler()
+ if errHandler != nil {
+ errHandler(err)
+ }
+ }()
+}
+
+type servers struct {
+ secure bool
+ grpc *grpc.Server
+ http *http.Server
+}
+
+func newServeCtx(lg *zap.Logger) *serveCtx {
+ ctx, cancel := context.WithCancel(context.Background())
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &serveCtx{
+ lg: lg,
+ ctx: ctx,
+ cancel: cancel,
+ userHandlers: make(map[string]http.Handler),
+ serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
+ }
+}
+
+// serve accepts incoming connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+func (sctx *serveCtx) serve(
+ s *etcdserver.EtcdServer,
+ tlsinfo *transport.TLSInfo,
+ handler http.Handler,
+ errHandler func(error),
+ grpcDialForRestGatewayBackends func(ctx context.Context) (*grpc.ClientConn, error),
+ splitHTTP bool,
+ gopts ...grpc.ServerOption,
+) (err error) {
+ logger := defaultLog.New(io.Discard, "etcdhttp", 0)
+
+ // Make sure serversC is closed even if we prematurely exit the function.
+ defer sctx.close()
+
+ select {
+ case <-s.StoppingNotify():
+ return errors.New("server is stopping")
+ case <-s.ReadyNotify():
+ }
+
+ sctx.lg.Info("ready to serve client requests")
+
+ m := cmux.New(sctx.l)
+ var server func() error
+ onlyGRPC := splitHTTP && !sctx.httpOnly
+ onlyHTTP := splitHTTP && sctx.httpOnly
+ grpcEnabled := !onlyHTTP
+ httpEnabled := !onlyGRPC
+
+ v3c := v3client.New(s)
+ servElection := v3election.NewElectionServer(v3c)
+ servLock := v3lock.NewLockServer(v3c)
+
+ var gwmux *gw.ServeMux
+ if s.Cfg.EnableGRPCGateway {
+ // GRPC gateway connects to grpc server via connection provided by grpc dial.
+ gwmux, err = sctx.registerGateway(grpcDialForRestGatewayBackends)
+ if err != nil {
+ sctx.lg.Error("registerGateway failed", zap.Error(err))
+ return err
+ }
+ }
+ var traffic string
+ switch {
+ case onlyGRPC:
+ traffic = "grpc"
+ case onlyHTTP:
+ traffic = "http"
+ default:
+ traffic = "grpc+http"
+ }
+
+ if sctx.insecure {
+ var gs *grpc.Server
+ var srv *http.Server
+ if httpEnabled {
+ httpmux := sctx.createMux(gwmux, handler)
+ srv = &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ ErrorLog: logger, // do not log user error
+ }
+ if err = configureHTTPServer(srv, s.Cfg); err != nil {
+ sctx.lg.Error("Configure http server failed", zap.Error(err))
+ return err
+ }
+ }
+ if grpcEnabled {
+ gs = v3rpc.Server(s, nil, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ defer func(gs *grpc.Server) {
+ if err != nil {
+ sctx.lg.Warn("stopping insecure grpc server due to error", zap.Error(err))
+ gs.Stop()
+ sctx.lg.Warn("stopped insecure grpc server due to error", zap.Error(err))
+ }
+ }(gs)
+ }
+ if onlyGRPC {
+ server = func() error {
+ return gs.Serve(sctx.l)
+ }
+ } else {
+ server = m.Serve
+
+ httpl := m.Match(cmux.HTTP1())
+ sctx.startHandler(errHandler, func() error {
+ return srv.Serve(httpl)
+ })
+
+ if grpcEnabled {
+ grpcl := m.Match(cmux.HTTP2())
+ sctx.startHandler(errHandler, func() error {
+ return gs.Serve(grpcl)
+ })
+ }
+ }
+
+ sctx.serversC <- &servers{grpc: gs, http: srv}
+ sctx.lg.Info(
+ "serving client traffic insecurely; this is strongly discouraged!",
+ zap.String("traffic", traffic),
+ zap.String("address", sctx.l.Addr().String()),
+ )
+ }
+
+ if sctx.secure {
+ var gs *grpc.Server
+ var srv *http.Server
+
+ tlscfg, tlsErr := tlsinfo.ServerConfig()
+ if tlsErr != nil {
+ return tlsErr
+ }
+
+ if grpcEnabled {
+ gs = v3rpc.Server(s, tlscfg, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ defer func(gs *grpc.Server) {
+ if err != nil {
+ sctx.lg.Warn("stopping secure grpc server due to error", zap.Error(err))
+ gs.Stop()
+ sctx.lg.Warn("stopped secure grpc server due to error", zap.Error(err))
+ }
+ }(gs)
+ }
+ if httpEnabled {
+ if grpcEnabled {
+ handler = grpcHandlerFunc(gs, handler)
+ }
+ httpmux := sctx.createMux(gwmux, handler)
+
+ srv = &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ TLSConfig: tlscfg,
+ ErrorLog: logger, // do not log user error
+ }
+ if err = configureHTTPServer(srv, s.Cfg); err != nil {
+ sctx.lg.Error("Configure https server failed", zap.Error(err))
+ return err
+ }
+ }
+
+ if onlyGRPC {
+ server = func() error { return gs.Serve(sctx.l) }
+ } else {
+ server = m.Serve
+
+ tlsl, tlsErr := transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
+ if tlsErr != nil {
+ return tlsErr
+ }
+ sctx.startHandler(errHandler, func() error {
+ return srv.Serve(tlsl)
+ })
+ }
+
+ sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
+ sctx.lg.Info(
+ "serving client traffic securely",
+ zap.String("traffic", traffic),
+ zap.String("address", sctx.l.Addr().String()),
+ )
+ }
+
+ err = server()
+ sctx.close()
+ sctx.wg.Wait()
+ return err
+}
+
+func configureHTTPServer(srv *http.Server, cfg config.ServerConfig) error {
+ // todo (ahrtr): should we support configuring other parameters in the future as well?
+ return http2.ConfigureServer(srv, &http2.Server{
+ MaxConcurrentStreams: cfg.MaxConcurrentStreams,
+ })
+}
+
+// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
+// connections or otherHandler otherwise. Given in gRPC docs.
+func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
+ if otherHandler == nil {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ grpcServer.ServeHTTP(w, r)
+ })
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
+ grpcServer.ServeHTTP(w, r)
+ } else {
+ otherHandler.ServeHTTP(w, r)
+ }
+ })
+}
+
+type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
+
+func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.ClientConn, error)) (*gw.ServeMux, error) {
+ ctx := sctx.ctx
+
+ conn, err := dial(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Refer to https://grpc-ecosystem.github.io/grpc-gateway/docs/mapping/customizing_your_gateway/
+ gwmux := gw.NewServeMux(
+ gw.WithMarshalerOption(gw.MIMEWildcard,
+ &gw.HTTPBodyMarshaler{
+ Marshaler: &gw.JSONPb{
+ MarshalOptions: protojson.MarshalOptions{
+ UseProtoNames: true,
+ EmitUnpopulated: false,
+ },
+ UnmarshalOptions: protojson.UnmarshalOptions{
+ DiscardUnknown: true,
+ },
+ },
+ },
+ ),
+ )
+
+ handlers := []registerHandlerFunc{
+ etcdservergw.RegisterKVHandler,
+ etcdservergw.RegisterWatchHandler,
+ etcdservergw.RegisterLeaseHandler,
+ etcdservergw.RegisterClusterHandler,
+ etcdservergw.RegisterMaintenanceHandler,
+ etcdservergw.RegisterAuthHandler,
+ v3lockgw.RegisterLockHandler,
+ v3electiongw.RegisterElectionHandler,
+ }
+ for _, h := range handlers {
+ if err := h(ctx, gwmux, conn); err != nil {
+ return nil, err
+ }
+ }
+ sctx.startHandler(nil, func() error {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ sctx.lg.Warn(
+ "failed to close connection",
+ zap.String("address", sctx.l.Addr().String()),
+ zap.Error(cerr),
+ )
+ }
+ return nil
+ })
+
+ return gwmux, nil
+}
+
+type wsProxyZapLogger struct {
+ *zap.Logger
+}
+
+func (w wsProxyZapLogger) Warnln(i ...any) {
+ w.Warn(fmt.Sprint(i...))
+}
+
+func (w wsProxyZapLogger) Debugln(i ...any) {
+ w.Debug(fmt.Sprint(i...))
+}
+
+func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
+ httpmux := http.NewServeMux()
+ for path, h := range sctx.userHandlers {
+ httpmux.Handle(path, h)
+ }
+
+ if gwmux != nil {
+ httpmux.Handle(
+ "/v3/",
+ wsproxy.WebsocketProxy(
+ gwmux,
+ wsproxy.WithRequestMutator(
+ // Default to the POST method for streams
+ func(_ *http.Request, outgoing *http.Request) *http.Request {
+ outgoing.Method = "POST"
+ return outgoing
+ },
+ ),
+ wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
+ wsproxy.WithLogger(wsProxyZapLogger{sctx.lg}),
+ ),
+ )
+ }
+ if handler != nil {
+ httpmux.Handle("/", handler)
+ }
+ return httpmux
+}
+
+// createAccessController wraps HTTP multiplexer:
+// - mutate gRPC gateway request paths
+// - check hostname whitelist
+// client HTTP requests goes here first
+func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &accessController{lg: lg, s: s, mux: mux}
+}
+
+type accessController struct {
+ lg *zap.Logger
+ s *etcdserver.EtcdServer
+ mux *http.ServeMux
+}
+
+func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if req == nil {
+ http.Error(rw, "Request is nil", http.StatusBadRequest)
+ return
+ }
+ // redirect for backward compatibilities
+ if req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") {
+ req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1)
+ }
+
+ if req.TLS == nil { // check origin if client connection is not secure
+ host := httputil.GetHostname(req)
+ if !ac.s.AccessController.IsHostWhitelisted(host) {
+ ac.lg.Warn(
+ "rejecting HTTP request to prevent DNS rebinding attacks",
+ zap.String("host", host),
+ )
+ http.Error(rw, errCVE20185702(host), http.StatusMisdirectedRequest)
+ return
+ }
+ } else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway &&
+ ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") {
+ for _, chains := range req.TLS.VerifiedChains {
+ if len(chains) < 1 {
+ continue
+ }
+ if len(chains[0].Subject.CommonName) != 0 {
+ http.Error(rw, "CommonName of client sending a request against gateway will be ignored and not used as expected", http.StatusBadRequest)
+ return
+ }
+ }
+ }
+
+ // Write CORS header.
+ if ac.s.AccessController.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == http.MethodOptions {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ac.mux.ServeHTTP(rw, req)
+}
+
+// addCORSHeader adds the correct cors headers given an origin
+func addCORSHeader(w http.ResponseWriter, origin string) {
+ w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+ w.Header().Add("Access-Control-Allow-Origin", origin)
+ w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
+}
+
+// https://github.com/transmission/transmission/pull/468
+func errCVE20185702(host string) string {
+ return fmt.Sprintf(`
+etcd received your request, but the Host header was unrecognized.
+
+To fix this, choose one of the following options:
+- Enable TLS, then any HTTPS request will be allowed.
+- Add the hostname you want to use to the whitelist in settings.
+ - e.g. etcd --host-whitelist %q
+
+This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702).
+`, host)
+}
+
+// WrapCORS wraps existing handler with CORS.
+// TODO: deprecate this after v2 proxy deprecate
+func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler {
+ return &corsHandler{
+ ac: &etcdserver.AccessController{CORS: cors},
+ h: h,
+ }
+}
+
+type corsHandler struct {
+ ac *etcdserver.AccessController
+ h http.Handler
+}
+
+func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if ch.ac.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == http.MethodOptions {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ch.h.ServeHTTP(rw, req)
+}
+
+func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
+ if sctx.userHandlers[s] != nil {
+ sctx.lg.Warn("path is already registered by user handler", zap.String("path", s))
+ return
+ }
+ sctx.userHandlers[s] = h
+}
+
+func (sctx *serveCtx) registerPprof() {
+ for p, h := range debugutil.PProfHandlers() {
+ sctx.registerUserHandler(p, h)
+ }
+}
+
+func (sctx *serveCtx) registerTrace() {
+ reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
+ sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
+ evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
+ sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
+}
+
+func (sctx *serveCtx) close() {
+ sctx.closeOnce.Do(func() {
+ close(sctx.serversC)
+ })
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/util.go b/vendor/go.etcd.io/etcd/server/v3/embed/util.go
new file mode 100644
index 0000000..32efbe6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/util.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "path/filepath"
+
+ "go.etcd.io/etcd/server/v3/storage/wal"
+)
+
+func isMemberInitialized(cfg *Config) bool {
+ walDir := cfg.WalDir
+ if walDir == "" {
+ walDir = filepath.Join(cfg.Dir, "member", "wal")
+ }
+ return wal.Exist(walDir)
+}