[VOL-5486] Upgrade library versions

Change-Id: I8b4e88699e03f44ee13e467867f45ae3f0a63c4b
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/go.etcd.io/raft/v3/.gitignore b/vendor/go.etcd.io/raft/v3/.gitignore
new file mode 100644
index 0000000..485dee6
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/.gitignore
@@ -0,0 +1 @@
+.idea
diff --git a/vendor/go.etcd.io/raft/v3/.go-version b/vendor/go.etcd.io/raft/v3/.go-version
new file mode 100644
index 0000000..d8c40e5
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/.go-version
@@ -0,0 +1 @@
+1.23.6
diff --git a/vendor/go.etcd.io/raft/v3/.golangci.yaml b/vendor/go.etcd.io/raft/v3/.golangci.yaml
new file mode 100644
index 0000000..d169aa4
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/.golangci.yaml
@@ -0,0 +1,41 @@
+run:
+  timeout: 30m
+  skip-files:
+    - "^zz_generated.*"
+
+issues:
+  max-same-issues: 0
+  # Excluding configuration per-path, per-linter, per-text and per-source
+  exclude-rules:
+    # exclude ineffassing linter for generated files for conversion
+    - path: conversion\.go
+      linters:
+        - ineffassign
+
+linters:
+  disable-all: true
+  enable: # please keep this alphabetized
+  # Don't use soon to deprecated[1] linters that lead to false
+  # https://github.com/golangci/golangci-lint/issues/1841
+  # - deadcode
+  # - structcheck
+  # - varcheck
+    - goimports
+    - ineffassign
+    - revive
+    - staticcheck
+    - stylecheck
+    - unused
+    - unconvert # Remove unnecessary type conversions
+
+linters-settings: # please keep this alphabetized
+  goimports:
+    local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
+  staticcheck:
+    checks:
+      - "all"
+      - "-SA1019" # TODO(fix) Using a deprecated function, variable, constant or field
+      - "-SA2002"  # TODO(fix) Called testing.T.FailNow or SkipNow in a goroutine, which isn’t allowed
+  stylecheck:
+    checks:
+      - "ST1019"  # Importing the same package multiple times.
diff --git a/vendor/go.etcd.io/raft/v3/LICENSE b/vendor/go.etcd.io/raft/v3/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.etcd.io/raft/v3/Makefile b/vendor/go.etcd.io/raft/v3/Makefile
new file mode 100644
index 0000000..3ae9d6a
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/Makefile
@@ -0,0 +1,28 @@
+GO_TEST_FLAGS?=
+
+.PHONY: verify
+verify: verify-gofmt verify-dep verify-lint verify-mod-tidy verify-genproto
+
+.PHONY: verify-gofmt
+verify-gofmt:
+	PASSES="gofmt" ./scripts/test.sh
+
+.PHONY: verify-dep
+verify-dep:
+	PASSES="dep" ./scripts/test.sh
+
+.PHONY: verify-lint
+verify-lint:
+	golangci-lint run
+
+.PHONY: verify-mod-tidy
+verify-mod-tidy:
+	PASSES="mod_tidy" ./scripts/test.sh
+
+.PHONY: verify-genproto
+verify-genproto:
+	PASSES="genproto" ./scripts/test.sh
+
+.PHONY: test
+test:
+	PASSES="unit" ./scripts/test.sh $(GO_TEST_FLAGS)
diff --git a/vendor/go.etcd.io/raft/v3/OWNERS b/vendor/go.etcd.io/raft/v3/OWNERS
new file mode 100644
index 0000000..288f199
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - ahrtr           # Benjamin Wang <benjamin.ahrtr@gmail.com> <benjamin.wang@broadcom.com>
+  - serathius       # Marek Siarkowicz <siarkowicz@google.com> <marek.siarkowicz@gmail.com>
+  - ptabor          # Piotr Tabor <piotr.tabor@gmail.com>
+  - spzala          # Sahdev Zala <spzala@us.ibm.com>
+reviewers:
+  - pav-kv          # Pavel Kalinnikov <pavel@cockroachlabs.com>
+emeritus_approvers:
+  - tbg             # Tobias Grieger <tobias.b.grieger@gmail.com>
diff --git a/vendor/go.etcd.io/raft/v3/README.md b/vendor/go.etcd.io/raft/v3/README.md
new file mode 100644
index 0000000..6527fd9
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/README.md
@@ -0,0 +1,201 @@
+# Raft library
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, Hyperledger and more.
+
+Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
+
+To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
+
+In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine.  The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
+
+A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/main/contrib/raftexample
+
+# Features
+
+This raft implementation is a full feature implementation of Raft protocol. Features includes:
+
+- Leader election
+- Log replication
+- Log compaction
+- Membership changes
+- Leadership transfer extension
+- Efficient linearizable read-only queries served by both the leader and followers
+  - leader checks with quorum and bypasses Raft log before processing read-only queries
+  - followers asks leader to get a safe read index before processing read-only queries
+- More efficient lease-based linearizable read-only queries served by both the leader and followers
+  - leader bypasses Raft log and processing read-only queries locally
+  - followers asks leader to get a safe read index before processing read-only queries
+  - this approach relies on the clock of the all the machines in raft group
+
+This raft implementation also includes a few optional enhancements:
+
+- Optimistic pipelining to reduce log replication latency
+- Flow control for log replication
+- Batching Raft messages to reduce synchronized network I/O calls
+- Batching log entries to reduce disk synchronized I/O
+- Writing to leader's disk in parallel
+- Internal proposal redirection from followers to leader
+- Automatic stepping down when the leader loses quorum
+- Protection against unbounded log growth when quorum is lost
+
+## Notable Users
+
+- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
+- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
+- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store
+- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
+- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
+- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
+
+## Usage
+
+The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a three-node cluster
+```go
+  storage := raft.NewMemoryStorage()
+  c := &raft.Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  // Set peer list to all nodes in the cluster.
+  // Note that other nodes in the cluster, apart from this node, need to be started separately.
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x01}, {ID: 0x02}, {ID: 0x03}})
+```
+
+Start a single node cluster, like so:
+```go
+  // Create storage and config as shown above.
+  // Set peer list to itself, so this node can become the leader of this single-node cluster.
+  peers := []raft.Peer{{ID: 0x01}}
+  n := raft.StartNode(c, peers)
+```
+
+To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
+```go
+  // Create storage and config as shown above.
+  n := raft.StartNode(c, nil)
+```
+
+To restart a node from previous state:
+```go
+  storage := raft.NewMemoryStorage()
+
+  // Recover the in-memory storage from persistent snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &raft.Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // Restart raft without peer information.
+  // Peer information is already included in the storage.
+  n := raft.RestartNode(c)
+```
+
+After creating a Node, the user has a few responsibilities:
+
+First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
+
+1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
+
+Third, after receiving a message from another node, pass it to Node.Step:
+
+```go
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+```
+
+Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+```go
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.HardState, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+```
+
+To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
+
+```go
+	n.Propose(ctx, data)
+```
+
+If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+```go
+	n.ProposeConfChange(ctx, cc)
+```
+
+After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
+
+```go
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+```
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+## Implementation notes
+
+This implementation is up to date with the final Raft thesis (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
+
+To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
+
+This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
+
+## Go docs
+
+More detailed development documentation can be found in go docs: https://pkg.go.dev/go.etcd.io/raft/v3.
\ No newline at end of file
diff --git a/vendor/go.etcd.io/raft/v3/bootstrap.go b/vendor/go.etcd.io/raft/v3/bootstrap.go
new file mode 100644
index 0000000..2a61aa2
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/bootstrap.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// Bootstrap initializes the RawNode for first use by appending configuration
+// changes for the supplied peers. This method returns an error if the Storage
+// is nonempty.
+//
+// It is recommended that instead of calling this method, applications bootstrap
+// their state manually by setting up a Storage that has a first index > 1 and
+// which stores the desired ConfState as its InitialState.
+func (rn *RawNode) Bootstrap(peers []Peer) error {
+	if len(peers) == 0 {
+		return errors.New("must provide at least one peer to Bootstrap")
+	}
+	lastIndex, err := rn.raft.raftLog.storage.LastIndex()
+	if err != nil {
+		return err
+	}
+
+	if lastIndex != 0 {
+		return errors.New("can't bootstrap a nonempty Storage")
+	}
+
+	// We've faked out initial entries above, but nothing has been
+	// persisted. Start with an empty HardState (thus the first Ready will
+	// emit a HardState update for the app to persist).
+	rn.prevHardSt = emptyState
+
+	// TODO(tbg): remove StartNode and give the application the right tools to
+	// bootstrap the initial membership in a cleaner way.
+	rn.raft.becomeFollower(1, None)
+	ents := make([]pb.Entry, len(peers))
+	for i, peer := range peers {
+		cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+		data, err := cc.Marshal()
+		if err != nil {
+			return err
+		}
+
+		ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+	}
+	rn.raft.raftLog.append(ents...)
+
+	// Now apply them, mainly so that the application can call Campaign
+	// immediately after StartNode in tests. Note that these nodes will
+	// be added to raft twice: here and when the application's Ready
+	// loop calls ApplyConfChange. The calls to addNode must come after
+	// all calls to raftLog.append so progress.next is set after these
+	// bootstrapping entries (it is an error if we try to append these
+	// entries since they have already been committed).
+	// We do not set raftLog.applied so the application will be able
+	// to observe all conf changes via Ready.CommittedEntries.
+	//
+	// TODO(bdarnell): These entries are still unstable; do we need to preserve
+	// the invariant that committed < unstable?
+	rn.raft.raftLog.committed = uint64(len(ents))
+	for _, peer := range peers {
+		rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2())
+	}
+	return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/confchange/confchange.go b/vendor/go.etcd.io/raft/v3/confchange/confchange.go
new file mode 100644
index 0000000..a73d560
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/confchange/confchange.go
@@ -0,0 +1,419 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package confchange
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"go.etcd.io/raft/v3/quorum"
+	pb "go.etcd.io/raft/v3/raftpb"
+	"go.etcd.io/raft/v3/tracker"
+)
+
+// Changer facilitates configuration changes. It exposes methods to handle
+// simple and joint consensus while performing the proper validation that allows
+// refusing invalid configuration changes before they affect the active
+// configuration.
+type Changer struct {
+	Tracker   tracker.ProgressTracker
+	LastIndex uint64
+}
+
+// EnterJoint verifies that the outgoing (=right) majority config of the joint
+// config is empty and initializes it with a copy of the incoming (=left)
+// majority config. That is, it transitions from
+//
+//	(1 2 3)&&()
+//
+// to
+//
+//	(1 2 3)&&(1 2 3).
+//
+// The supplied changes are then applied to the incoming majority config,
+// resulting in a joint configuration that in terms of the Raft thesis[1]
+// (Section 4.3) corresponds to `C_{new,old}`.
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
+	cfg, trk, err := c.checkAndCopy()
+	if err != nil {
+		return c.err(err)
+	}
+	if joint(cfg) {
+		err := errors.New("config is already joint")
+		return c.err(err)
+	}
+	if len(incoming(cfg.Voters)) == 0 {
+		// We allow adding nodes to an empty config for convenience (testing and
+		// bootstrap), but you can't enter a joint state.
+		err := errors.New("can't make a zero-voter config joint")
+		return c.err(err)
+	}
+	// Clear the outgoing config.
+	*outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{}
+	// Copy incoming to outgoing.
+	for id := range incoming(cfg.Voters) {
+		outgoing(cfg.Voters)[id] = struct{}{}
+	}
+
+	if err := c.apply(&cfg, trk, ccs...); err != nil {
+		return c.err(err)
+	}
+	cfg.AutoLeave = autoLeave
+	return checkAndReturn(cfg, trk)
+}
+
+// LeaveJoint transitions out of a joint configuration. It is an error to call
+// this method if the configuration is not joint, i.e. if the outgoing majority
+// config Voters[1] is empty.
+//
+// The outgoing majority config of the joint configuration will be removed,
+// that is, the incoming config is promoted as the sole decision maker. In the
+// notation of the Raft thesis[1] (Section 4.3), this method transitions from
+// `C_{new,old}` into `C_new`.
+//
+// At the same time, any staged learners (LearnersNext) the addition of which
+// was held back by an overlapping voter in the former outgoing config will be
+// inserted into Learners.
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) {
+	cfg, trk, err := c.checkAndCopy()
+	if err != nil {
+		return c.err(err)
+	}
+	if !joint(cfg) {
+		err := errors.New("can't leave a non-joint config")
+		return c.err(err)
+	}
+	for id := range cfg.LearnersNext {
+		nilAwareAdd(&cfg.Learners, id)
+		trk[id].IsLearner = true
+	}
+	cfg.LearnersNext = nil
+
+	for id := range outgoing(cfg.Voters) {
+		_, isVoter := incoming(cfg.Voters)[id]
+		_, isLearner := cfg.Learners[id]
+
+		if !isVoter && !isLearner {
+			delete(trk, id)
+		}
+	}
+	*outgoingPtr(&cfg.Voters) = nil
+	cfg.AutoLeave = false
+
+	return checkAndReturn(cfg, trk)
+}
+
+// Simple carries out a series of configuration changes that (in aggregate)
+// mutates the incoming majority config Voters[0] by at most one. This method
+// will return an error if that is not the case, if the resulting quorum is
+// zero, or if the configuration is in a joint state (i.e. if there is an
+// outgoing configuration).
+func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
+	cfg, trk, err := c.checkAndCopy()
+	if err != nil {
+		return c.err(err)
+	}
+	if joint(cfg) {
+		err := errors.New("can't apply simple config change in joint config")
+		return c.err(err)
+	}
+	if err := c.apply(&cfg, trk, ccs...); err != nil {
+		return c.err(err)
+	}
+	if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 {
+		return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config")
+	}
+
+	return checkAndReturn(cfg, trk)
+}
+
+// apply a change to the configuration. By convention, changes to voters are
+// always made to the incoming majority config Voters[0]. Voters[1] is either
+// empty or preserves the outgoing majority configuration while in a joint state.
+func (c Changer) apply(cfg *tracker.Config, trk tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error {
+	for _, cc := range ccs {
+		if cc.NodeID == 0 {
+			// etcd replaces the NodeID with zero if it decides (downstream of
+			// raft) to not apply a change, so we have to have explicit code
+			// here to ignore these.
+			continue
+		}
+		switch cc.Type {
+		case pb.ConfChangeAddNode:
+			c.makeVoter(cfg, trk, cc.NodeID)
+		case pb.ConfChangeAddLearnerNode:
+			c.makeLearner(cfg, trk, cc.NodeID)
+		case pb.ConfChangeRemoveNode:
+			c.remove(cfg, trk, cc.NodeID)
+		case pb.ConfChangeUpdateNode:
+		default:
+			return fmt.Errorf("unexpected conf type %d", cc.Type)
+		}
+	}
+	if len(incoming(cfg.Voters)) == 0 {
+		return errors.New("removed all voters")
+	}
+	return nil
+}
+
+// makeVoter adds or promotes the given ID to be a voter in the incoming
+// majority config.
+func (c Changer) makeVoter(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) {
+	pr := trk[id]
+	if pr == nil {
+		c.initProgress(cfg, trk, id, false /* isLearner */)
+		return
+	}
+
+	pr.IsLearner = false
+	nilAwareDelete(&cfg.Learners, id)
+	nilAwareDelete(&cfg.LearnersNext, id)
+	incoming(cfg.Voters)[id] = struct{}{}
+}
+
+// makeLearner makes the given ID a learner or stages it to be a learner once
+// an active joint configuration is exited.
+//
+// The former happens when the peer is not a part of the outgoing config, in
+// which case we either add a new learner or demote a voter in the incoming
+// config.
+//
+// The latter case occurs when the configuration is joint and the peer is a
+// voter in the outgoing config. In that case, we do not want to add the peer
+// as a learner because then we'd have to track a peer as a voter and learner
+// simultaneously. Instead, we add the learner to LearnersNext, so that it will
+// be added to Learners the moment the outgoing config is removed by
+// LeaveJoint().
+func (c Changer) makeLearner(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) {
+	pr := trk[id]
+	if pr == nil {
+		c.initProgress(cfg, trk, id, true /* isLearner */)
+		return
+	}
+	if pr.IsLearner {
+		return
+	}
+	// Remove any existing voter in the incoming config...
+	c.remove(cfg, trk, id)
+	// ... but save the Progress.
+	trk[id] = pr
+	// Use LearnersNext if we can't add the learner to Learners directly, i.e.
+	// if the peer is still tracked as a voter in the outgoing config. It will
+	// be turned into a learner in LeaveJoint().
+	//
+	// Otherwise, add a regular learner right away.
+	if _, onRight := outgoing(cfg.Voters)[id]; onRight {
+		nilAwareAdd(&cfg.LearnersNext, id)
+	} else {
+		pr.IsLearner = true
+		nilAwareAdd(&cfg.Learners, id)
+	}
+}
+
+// remove this peer as a voter or learner from the incoming config.
+func (c Changer) remove(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) {
+	if _, ok := trk[id]; !ok {
+		return
+	}
+
+	delete(incoming(cfg.Voters), id)
+	nilAwareDelete(&cfg.Learners, id)
+	nilAwareDelete(&cfg.LearnersNext, id)
+
+	// If the peer is still a voter in the outgoing config, keep the Progress.
+	if _, onRight := outgoing(cfg.Voters)[id]; !onRight {
+		delete(trk, id)
+	}
+}
+
+// initProgress initializes a new progress for the given node or learner.
+func (c Changer) initProgress(cfg *tracker.Config, trk tracker.ProgressMap, id uint64, isLearner bool) {
+	if !isLearner {
+		incoming(cfg.Voters)[id] = struct{}{}
+	} else {
+		nilAwareAdd(&cfg.Learners, id)
+	}
+	trk[id] = &tracker.Progress{
+		// Initializing the Progress with the last index means that the follower
+		// can be probed (with the last index).
+		//
+		// TODO(tbg): seems awfully optimistic. Using the first index would be
+		// better. The general expectation here is that the follower has no log
+		// at all (and will thus likely need a snapshot), though the app may
+		// have applied a snapshot out of band before adding the replica (thus
+		// making the first index the better choice).
+		Match:     0,
+		Next:      max(c.LastIndex, 1), // invariant: Match < Next
+		Inflights: tracker.NewInflights(c.Tracker.MaxInflight, c.Tracker.MaxInflightBytes),
+		IsLearner: isLearner,
+		// When a node is first added, we should mark it as recently active.
+		// Otherwise, CheckQuorum may cause us to step down if it is invoked
+		// before the added node has had a chance to communicate with us.
+		RecentActive: true,
+	}
+}
+
+// checkInvariants makes sure that the config and progress are compatible with
+// each other. This is used to check both what the Changer is initialized with,
+// as well as what it returns.
+func checkInvariants(cfg tracker.Config, trk tracker.ProgressMap) error {
+	// NB: intentionally allow the empty config. In production we'll never see a
+	// non-empty config (we prevent it from being created) but we will need to
+	// be able to *create* an initial config, for example during bootstrap (or
+	// during tests). Instead of having to hand-code this, we allow
+	// transitioning from an empty config into any other legal and non-empty
+	// config.
+	for _, ids := range []map[uint64]struct{}{
+		cfg.Voters.IDs(),
+		cfg.Learners,
+		cfg.LearnersNext,
+	} {
+		for id := range ids {
+			if _, ok := trk[id]; !ok {
+				return fmt.Errorf("no progress for %d", id)
+			}
+		}
+	}
+
+	// Any staged learner was staged because it could not be directly added due
+	// to a conflicting voter in the outgoing config.
+	for id := range cfg.LearnersNext {
+		if _, ok := outgoing(cfg.Voters)[id]; !ok {
+			return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id)
+		}
+		if trk[id].IsLearner {
+			return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id)
+		}
+	}
+	// Conversely Learners and Voters doesn't intersect at all.
+	for id := range cfg.Learners {
+		if _, ok := outgoing(cfg.Voters)[id]; ok {
+			return fmt.Errorf("%d is in Learners and Voters[1]", id)
+		}
+		if _, ok := incoming(cfg.Voters)[id]; ok {
+			return fmt.Errorf("%d is in Learners and Voters[0]", id)
+		}
+		if !trk[id].IsLearner {
+			return fmt.Errorf("%d is in Learners, but is not marked as learner", id)
+		}
+	}
+
+	if !joint(cfg) {
+		// We enforce that empty maps are nil instead of zero.
+		if outgoing(cfg.Voters) != nil {
+			return fmt.Errorf("cfg.Voters[1] must be nil when not joint")
+		}
+		if cfg.LearnersNext != nil {
+			return fmt.Errorf("cfg.LearnersNext must be nil when not joint")
+		}
+		if cfg.AutoLeave {
+			return fmt.Errorf("AutoLeave must be false when not joint")
+		}
+	}
+
+	return nil
+}
+
+// checkAndCopy copies the tracker's config and progress map (deeply enough for
+// the purposes of the Changer) and returns those copies. It returns an error
+// if checkInvariants does.
+func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) {
+	cfg := c.Tracker.Config.Clone()
+	trk := tracker.ProgressMap{}
+
+	for id, pr := range c.Tracker.Progress {
+		// A shallow copy is enough because we only mutate the Learner field.
+		ppr := *pr
+		trk[id] = &ppr
+	}
+	return checkAndReturn(cfg, trk)
+}
+
+// checkAndReturn calls checkInvariants on the input and returns either the
+// resulting error or the input.
+func checkAndReturn(cfg tracker.Config, trk tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) {
+	if err := checkInvariants(cfg, trk); err != nil {
+		return tracker.Config{}, tracker.ProgressMap{}, err
+	}
+	return cfg, trk, nil
+}
+
+// err returns zero values and an error.
+func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) {
+	return tracker.Config{}, nil, err
+}
+
+// nilAwareAdd populates a map entry, creating the map if necessary.
+func nilAwareAdd(m *map[uint64]struct{}, id uint64) {
+	if *m == nil {
+		*m = map[uint64]struct{}{}
+	}
+	(*m)[id] = struct{}{}
+}
+
+// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after.
+func nilAwareDelete(m *map[uint64]struct{}, id uint64) {
+	if *m == nil {
+		return
+	}
+	delete(*m, id)
+	if len(*m) == 0 {
+		*m = nil
+	}
+}
+
+// symdiff returns the count of the symmetric difference between the sets of
+// uint64s, i.e. len( (l - r) \union (r - l)).
+func symdiff(l, r map[uint64]struct{}) int {
+	var n int
+	pairs := [][2]quorum.MajorityConfig{
+		{l, r}, // count elems in l but not in r
+		{r, l}, // count elems in r but not in l
+	}
+	for _, p := range pairs {
+		for id := range p[0] {
+			if _, ok := p[1][id]; !ok {
+				n++
+			}
+		}
+	}
+	return n
+}
+
+func joint(cfg tracker.Config) bool {
+	return len(outgoing(cfg.Voters)) > 0
+}
+
+func incoming(voters quorum.JointConfig) quorum.MajorityConfig      { return voters[0] }
+func outgoing(voters quorum.JointConfig) quorum.MajorityConfig      { return voters[1] }
+func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] }
+
+// Describe prints the type and NodeID of the configuration changes as a
+// space-delimited string.
+func Describe(ccs ...pb.ConfChangeSingle) string {
+	var buf strings.Builder
+	for _, cc := range ccs {
+		if buf.Len() > 0 {
+			buf.WriteByte(' ')
+		}
+		fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID)
+	}
+	return buf.String()
+}
diff --git a/vendor/go.etcd.io/raft/v3/confchange/restore.go b/vendor/go.etcd.io/raft/v3/confchange/restore.go
new file mode 100644
index 0000000..68ef029
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/confchange/restore.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package confchange
+
+import (
+	pb "go.etcd.io/raft/v3/raftpb"
+	"go.etcd.io/raft/v3/tracker"
+)
+
+// toConfChangeSingle translates a conf state into 1) a slice of operations creating
+// first the config that will become the outgoing one, and then the incoming one, and
+// b) another slice that, when applied to the config resulted from 1), represents the
+// ConfState.
+func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) {
+	// Example to follow along this code:
+	// voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4)
+	//
+	// This means that before entering the joint config, the configuration
+	// had voters (1 2 4 6) and perhaps some learners that are already gone.
+	// The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6)
+	// are no longer voters; however 4 is poised to become a learner upon leaving
+	// the joint state.
+	// We can't tell whether 5 was a learner before entering the joint config,
+	// but it doesn't matter (we'll pretend that it wasn't).
+	//
+	// The code below will construct
+	// outgoing = add 1; add 2; add 4; add 6
+	// incoming = remove 1; remove 2; remove 4; remove 6
+	//            add 1;    add 2;    add 3;
+	//            add-learner 5;
+	//            add-learner 4;
+	//
+	// So, when starting with an empty config, after applying 'outgoing' we have
+	//
+	//   quorum=(1 2 4 6)
+	//
+	// From which we enter a joint state via 'incoming'
+	//
+	//   quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4)
+	//
+	// as desired.
+
+	for _, id := range cs.VotersOutgoing {
+		// If there are outgoing voters, first add them one by one so that the
+		// (non-joint) config has them all.
+		out = append(out, pb.ConfChangeSingle{
+			Type:   pb.ConfChangeAddNode,
+			NodeID: id,
+		})
+
+	}
+
+	// We're done constructing the outgoing slice, now on to the incoming one
+	// (which will apply on top of the config created by the outgoing slice).
+
+	// First, we'll remove all of the outgoing voters.
+	for _, id := range cs.VotersOutgoing {
+		in = append(in, pb.ConfChangeSingle{
+			Type:   pb.ConfChangeRemoveNode,
+			NodeID: id,
+		})
+	}
+	// Then we'll add the incoming voters and learners.
+	for _, id := range cs.Voters {
+		in = append(in, pb.ConfChangeSingle{
+			Type:   pb.ConfChangeAddNode,
+			NodeID: id,
+		})
+	}
+	for _, id := range cs.Learners {
+		in = append(in, pb.ConfChangeSingle{
+			Type:   pb.ConfChangeAddLearnerNode,
+			NodeID: id,
+		})
+	}
+	// Same for LearnersNext; these are nodes we want to be learners but which
+	// are currently voters in the outgoing config.
+	for _, id := range cs.LearnersNext {
+		in = append(in, pb.ConfChangeSingle{
+			Type:   pb.ConfChangeAddLearnerNode,
+			NodeID: id,
+		})
+	}
+	return out, in
+}
+
+func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) {
+	for _, op := range ops {
+		cfg, trk, err := op(chg)
+		if err != nil {
+			return tracker.Config{}, nil, err
+		}
+		chg.Tracker.Config = cfg
+		chg.Tracker.Progress = trk
+	}
+	return chg.Tracker.Config, chg.Tracker.Progress, nil
+}
+
+// Restore takes a Changer (which must represent an empty configuration), and
+// runs a sequence of changes enacting the configuration described in the
+// ConfState.
+//
+// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure
+// the Changer only needs a ProgressMap (not a whole Tracker) at which point
+// this can just take LastIndex and MaxInflight directly instead and cook up
+// the results from that alone.
+func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) {
+	outgoing, incoming := toConfChangeSingle(cs)
+
+	var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error)
+
+	if len(outgoing) == 0 {
+		// No outgoing config, so just apply the incoming changes one by one.
+		for _, cc := range incoming {
+			cc := cc // loop-local copy
+			ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
+				return chg.Simple(cc)
+			})
+		}
+	} else {
+		// The ConfState describes a joint configuration.
+		//
+		// First, apply all of the changes of the outgoing config one by one, so
+		// that it temporarily becomes the incoming active config. For example,
+		// if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&().
+		for _, cc := range outgoing {
+			cc := cc // loop-local copy
+			ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
+				return chg.Simple(cc)
+			})
+		}
+		// Now enter the joint state, which rotates the above additions into the
+		// outgoing config, and adds the incoming config in. Continuing the
+		// example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations
+		// would be removing 2,3,4 and then adding in 1,2,3 while transitioning
+		// into a joint state.
+		ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
+			return chg.EnterJoint(cs.AutoLeave, incoming...)
+		})
+	}
+
+	return chain(chg, ops...)
+}
diff --git a/vendor/go.etcd.io/raft/v3/design.md b/vendor/go.etcd.io/raft/v3/design.md
new file mode 100644
index 0000000..7bc0531
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/design.md
@@ -0,0 +1,57 @@
+## Progress
+
+Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. 
+
+`replication message` is a `msgApp` with log entries.
+
+A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
+
+A progress is in one of the three state: `probe`, `replicate`, `snapshot`. 
+
+```
+                            +--------------------------------------------------------+          
+                            |                  send snapshot                         |          
+                            |                                                        |          
+                  +---------+----------+                                  +----------v---------+
+              +--->       probe        |                                  |      snapshot      |
+              |   |  max inflight = 1  <----------------------------------+  max inflight = 0  |
+              |   +---------+----------+                                  +--------------------+
+              |             |            1. snapshot success                                    
+              |             |               (next=snapshot.index + 1)                           
+              |             |            2. snapshot failure                                    
+              |             |               (no change)                                         
+              |             |            3. receives msgAppResp(rej=false&&index>lastsnap.index)
+              |             |               (match=m.index,next=match+1)                        
+receives msgAppResp(rej=true)                                                                   
+(next=match+1)|             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |             |   receives msgAppResp(rej=false&&index>match)                     
+              |             |   (match=m.index,next=match+1)                                    
+              |             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |   +---------v----------+                                                        
+              |   |     replicate      |                                                        
+              +---+  max inflight = n  |                                                        
+                  +--------------------+                                                        
+```
+
+When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
+
+When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
+
+When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
+
+A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
+
+A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low.  see open question)
+
+A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
+
+### Flow Control
+
+1. limit the max size of message sent per message. Max should be configurable.
+Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
+
+2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. 
diff --git a/vendor/go.etcd.io/raft/v3/doc.go b/vendor/go.etcd.io/raft/v3/doc.go
new file mode 100644
index 0000000..06253f4
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/doc.go
@@ -0,0 +1,399 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/etcd-io/etcd/tree/main/contrib/raftexample
+
+# Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+	storage := raft.NewMemoryStorage()
+	c := &Config{
+	  ID:              0x01,
+	  ElectionTick:    10,
+	  HeartbeatTick:   1,
+	  Storage:         storage,
+	  MaxSizePerMsg:   4096,
+	  MaxInflightMsgs: 256,
+	}
+	n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+	storage := raft.NewMemoryStorage()
+
+	// recover the in-memory storage from persistent
+	// snapshot, state and entries.
+	storage.ApplySnapshot(snapshot)
+	storage.SetHardState(state)
+	storage.Append(entries)
+
+	c := &Config{
+	  ID:              0x01,
+	  ElectionTick:    10,
+	  HeartbeatTick:   1,
+	  Storage:         storage,
+	  MaxSizePerMsg:   4096,
+	  MaxInflightMsgs: 256,
+	}
+
+	// restart raft without peer information.
+	// peer information is already included in the storage.
+	n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialize the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+	for {
+	  select {
+	  case <-s.Ticker:
+	    n.Tick()
+	  case rd := <-s.Node.Ready():
+	    saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+	    send(rd.Messages)
+	    if !raft.IsEmptySnap(rd.Snapshot) {
+	      processSnapshot(rd.Snapshot)
+	    }
+	    for _, entry := range rd.CommittedEntries {
+	      process(entry)
+	      if entry.Type == raftpb.EntryConfChange {
+	        var cc raftpb.ConfChange
+	        cc.Unmarshal(entry.Data)
+	        s.Node.ApplyConfChange(cc)
+	      }
+	    }
+	    s.Node.Advance()
+	  case <-s.done:
+	    return
+	  }
+	}
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+	n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove a node in a cluster, build ConfChange struct 'cc' and call:
+
+	n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+# Usage with Asynchronous Storage Writes
+
+The library can be configured with an alternate interface for local storage
+writes that can provide better performance in the presence of high proposal
+concurrency by minimizing interference between proposals. This feature is called
+AsynchronousStorageWrites, and can be enabled using the flag on the Config
+struct with the same name.
+
+When Asynchronous Storage Writes is enabled, the responsibility of code using
+the library is different from what was presented above. Users still read from
+the Node.Ready() channel. However, they process the updates it contains in a
+different manner. Users no longer consult the HardState, Entries, and Snapshot
+fields (steps 1 and 3 above). They also no longer call Node.Advance() to
+indicate that they have processed all entries in the Ready (step 4 above).
+Instead, all local storage operations are also communicated through messages
+present in the Ready.Message slice.
+
+The local storage messages come in two flavors. The first flavor is log append
+messages, which target a LocalAppendThread and carry Entries, HardState, and a
+Snapshot. The second flavor is entry application messages, which target a
+LocalApplyThread and carry CommittedEntries. Messages to the same target must be
+reliably processed in order. Messages to different targets can be processed in
+any order.
+
+Each local storage message carries a slice of response messages that must
+delivered after the corresponding storage write has been completed. These
+responses may target the same node or may target other nodes.
+
+With Asynchronous Storage Writes enabled, the total state machine handling loop
+will look something like this:
+
+	for {
+	  select {
+	  case <-s.Ticker:
+	    n.Tick()
+	  case rd := <-s.Node.Ready():
+	    for _, m := range rd.Messages {
+	      switch m.To {
+	      case raft.LocalAppendThread:
+	        toAppend <- m
+	      case raft.LocalApplyThread:
+	        toApply <-m
+	      default:
+	        sendOverNetwork(m)
+	      }
+	    }
+	  case <-s.done:
+	    return
+	  }
+	}
+
+Usage of Asynchronous Storage Writes will typically also contain a pair of
+storage handler threads, one for log writes (append) and one for entry
+application to the local state machine (apply). Those will look something like:
+
+	// append thread
+	go func() {
+	  for {
+	    select {
+	    case m := <-toAppend:
+	      saveToStorage(m.State, m.Entries, m.Snapshot)
+	      send(m.Responses)
+	    case <-s.done:
+	      return
+	    }
+	  }
+	}
+
+	// apply thread
+	go func() {
+	  for {
+	    select {
+	    case m := <-toApply:
+	      for _, entry := range m.CommittedEntries {
+	        process(entry)
+	        if entry.Type == raftpb.EntryConfChange {
+	          var cc raftpb.ConfChange
+	          cc.Unmarshal(entry.Data)
+	          s.Node.ApplyConfChange(cc)
+	        }
+	      }
+	      send(m.Responses)
+	    case <-s.done:
+	      return
+	    }
+	  }
+	}
+
+# Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+# MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+	'MsgHup' is used for election. If a node is a follower or candidate, the
+	'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+	candidate has not received any heartbeat before the election timeout, it
+	passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+	start a new election.
+
+	'MsgBeat' is an internal type that signals the leader to send a heartbeat of
+	the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+	the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
+	send periodic 'MsgHeartbeat' messages to its followers.
+
+	'MsgProp' proposes to append data to its log entries. This is a special
+	type to redirect proposals to leader. Therefore, send method overwrites
+	raftpb.Message's term with its HardState's term to avoid attaching its
+	local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+	method, the leader first calls the 'appendEntry' method to append entries
+	to its log, and then calls 'bcastAppend' method to send those entries to
+	its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+	follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+	method. It is stored with sender's ID and later forwarded to leader by
+	rafthttp package.
+
+	'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+	which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+	type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+	back to follower, because it indicates that there is a valid leader sending
+	'MsgApp' messages. Candidate and follower respond to this message in
+	'MsgAppResp' type.
+
+	'MsgAppResp' is response to log replication request('MsgApp'). When
+	'MsgApp' is passed to candidate or follower's Step method, it responds by
+	calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+	mailbox.
+
+	'MsgVote' requests votes for election. When a node is a follower or
+	candidate and 'MsgHup' is passed to its Step method, then the node calls
+	'campaign' method to campaign itself to become a leader. Once 'campaign'
+	method is called, the node becomes candidate and sends 'MsgVote' to peers
+	in cluster to request votes. When passed to leader or candidate's Step
+	method and the message's Term is lower than leader's or candidate's,
+	'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+	If leader or candidate receives 'MsgVote' with higher term, it will revert
+	back to follower. When 'MsgVote' is passed to follower, it votes for the
+	sender only when sender's last term is greater than MsgVote's term or
+	sender's last term is equal to MsgVote's term but sender's last committed
+	index is greater than or equal to follower's.
+
+	'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+	passed to candidate, the candidate calculates how many votes it has won. If
+	it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+	If candidate receives majority of votes of denials, it reverts back to
+	follower.
+
+	'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
+	protocol. When Config.PreVote is true, a pre-election is carried out first
+	(using the same rules as a regular election), and no node increases its term
+	number unless the pre-election indicates that the campaigning node would win.
+	This minimizes disruption when a partitioned node rejoins the cluster.
+
+	'MsgSnap' requests to install a snapshot message. When a node has just
+	become a leader or the leader receives 'MsgProp' message, it calls
+	'bcastAppend' method, which then calls 'sendAppend' method to each
+	follower. In 'sendAppend', if a leader fails to get term or entries,
+	the leader requests snapshot by sending 'MsgSnap' type message.
+
+	'MsgSnapStatus' tells the result of snapshot install message. When a
+	follower rejected 'MsgSnap', it indicates the snapshot request with
+	'MsgSnap' had failed from network issues which causes the network layer
+	to fail to send out snapshots to its followers. Then leader considers
+	follower's progress as probe. When 'MsgSnap' were not rejected, it
+	indicates that the snapshot succeeded and the leader sets follower's
+	progress to probe and resumes its log replication.
+
+	'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+	to candidate and message's term is higher than candidate's, the candidate
+	reverts back to follower and updates its committed index from the one in
+	this heartbeat. And it sends the message to its mailbox. When
+	'MsgHeartbeat' is passed to follower's Step method and message's term is
+	higher than follower's, the follower updates its leaderID with the ID
+	from the message.
+
+	'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+	is passed to leader's Step method, the leader knows which follower
+	responded. And only when the leader's last committed index is greater than
+	follower's Match index, the leader runs 'sendAppend` method.
+
+	'MsgUnreachable' tells that request(message) wasn't delivered. When
+	'MsgUnreachable' is passed to leader's Step method, the leader discovers
+	that the follower that sent this 'MsgUnreachable' is not reachable, often
+	indicating 'MsgApp' is lost. When follower's progress state is replicate,
+	the leader sets it back to probe.
+
+	'MsgStorageAppend' is a message from a node to its local append storage
+	thread to write entries, hard state, and/or a snapshot to stable storage.
+	The message will carry one or more responses, one of which will be a
+	'MsgStorageAppendResp' back to itself. The responses can also contain
+	'MsgAppResp', 'MsgVoteResp', and 'MsgPreVoteResp' messages. Used with
+	AsynchronousStorageWrites.
+
+	'MsgStorageApply' is a message from a node to its local apply storage
+	thread to apply committed entries. The message will carry one response,
+	which will be a 'MsgStorageApplyResp' back to itself. Used with
+	AsynchronousStorageWrites.
+*/
+package raft
diff --git a/vendor/go.etcd.io/raft/v3/log.go b/vendor/go.etcd.io/raft/v3/log.go
new file mode 100644
index 0000000..bd7c2fe
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/log.go
@@ -0,0 +1,574 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+)
+
+type raftLog struct {
+	// storage contains all stable entries since the last snapshot.
+	storage Storage
+
+	// unstable contains all unstable entries and snapshot.
+	// they will be saved into storage.
+	unstable unstable
+
+	// committed is the highest log position that is known to be in
+	// stable storage on a quorum of nodes.
+	committed uint64
+	// applying is the highest log position that the application has
+	// been instructed to apply to its state machine. Some of these
+	// entries may be in the process of applying and have not yet
+	// reached applied.
+	// Use: The field is incremented when accepting a Ready struct.
+	// Invariant: applied <= applying && applying <= committed
+	applying uint64
+	// applied is the highest log position that the application has
+	// successfully applied to its state machine.
+	// Use: The field is incremented when advancing after the committed
+	// entries in a Ready struct have been applied (either synchronously
+	// or asynchronously).
+	// Invariant: applied <= committed
+	applied uint64
+
+	logger Logger
+
+	// maxApplyingEntsSize limits the outstanding byte size of the messages
+	// returned from calls to nextCommittedEnts that have not been acknowledged
+	// by a call to appliedTo.
+	maxApplyingEntsSize entryEncodingSize
+	// applyingEntsSize is the current outstanding byte size of the messages
+	// returned from calls to nextCommittedEnts that have not been acknowledged
+	// by a call to appliedTo.
+	applyingEntsSize entryEncodingSize
+	// applyingEntsPaused is true when entry application has been paused until
+	// enough progress is acknowledged.
+	applyingEntsPaused bool
+}
+
+// newLog returns log using the given storage and default options. It
+// recovers the log to the state that it just commits and applies the
+// latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+	return newLogWithSize(storage, logger, noLimit)
+}
+
+// newLogWithSize returns a log using the given storage and max
+// message size.
+func newLogWithSize(storage Storage, logger Logger, maxApplyingEntsSize entryEncodingSize) *raftLog {
+	firstIndex, err := storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	lastIndex, err := storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return &raftLog{
+		storage: storage,
+		unstable: unstable{
+			offset:           lastIndex + 1,
+			offsetInProgress: lastIndex + 1,
+			logger:           logger,
+		},
+		maxApplyingEntsSize: maxApplyingEntsSize,
+
+		// Initialize our committed and applied pointers to the time of the last compaction.
+		committed: firstIndex - 1,
+		applying:  firstIndex - 1,
+		applied:   firstIndex - 1,
+
+		logger: logger,
+	}
+}
+
+func (l *raftLog) String() string {
+	return fmt.Sprintf("committed=%d, applied=%d, applying=%d, unstable.offset=%d, unstable.offsetInProgress=%d, len(unstable.Entries)=%d",
+		l.committed, l.applied, l.applying, l.unstable.offset, l.unstable.offsetInProgress, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(a logSlice, committed uint64) (lastnewi uint64, ok bool) {
+	if !l.matchTerm(a.prev) {
+		return 0, false
+	}
+	// TODO(pav-kv): propagate logSlice down the stack. It will be used all the
+	// way down in unstable, for safety checks, and for useful bookkeeping.
+
+	lastnewi = a.prev.index + uint64(len(a.entries))
+	ci := l.findConflict(a.entries)
+	switch {
+	case ci == 0:
+	case ci <= l.committed:
+		l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+	default:
+		offset := a.prev.index + 1
+		if ci-offset > uint64(len(a.entries)) {
+			l.logger.Panicf("index, %d, is out of range [%d]", ci-offset, len(a.entries))
+		}
+		l.append(a.entries[ci-offset:]...)
+	}
+	l.commitTo(min(committed, lastnewi))
+	return lastnewi, true
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+	if len(ents) == 0 {
+		return l.lastIndex()
+	}
+	if after := ents[0].Index - 1; after < l.committed {
+		l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+	}
+	l.unstable.truncateAndAppend(ents)
+	return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+	for i := range ents {
+		if id := pbEntryID(&ents[i]); !l.matchTerm(id) {
+			if id.index <= l.lastIndex() {
+				// TODO(pav-kv): can simply print %+v of the id. This will change the
+				// log format though.
+				l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+					id.index, l.zeroTermOnOutOfBounds(l.term(id.index)), id.term)
+			}
+			return id.index
+		}
+	}
+	return 0
+}
+
+// findConflictByTerm returns a best guess on where this log ends matching
+// another log, given that the only information known about the other log is the
+// (index, term) of its single entry.
+//
+// Specifically, the first returned value is the max guessIndex <= index, such
+// that term(guessIndex) <= term or term(guessIndex) is not known (because this
+// index is compacted or not yet stored).
+//
+// The second returned value is the term(guessIndex), or 0 if it is unknown.
+//
+// This function is used by a follower and leader to resolve log conflicts after
+// an unsuccessful append to a follower, and ultimately restore the steady flow
+// of appends.
+func (l *raftLog) findConflictByTerm(index uint64, term uint64) (uint64, uint64) {
+	for ; index > 0; index-- {
+		// If there is an error (likely ErrCompacted or ErrUnavailable), we don't
+		// know whether it's a match or not, so assume a possible match and return
+		// the index, with 0 term indicating an unknown term.
+		if ourTerm, err := l.term(index); err != nil {
+			return index, 0
+		} else if ourTerm <= term {
+			return index, ourTerm
+		}
+	}
+	return 0, 0
+}
+
+// nextUnstableEnts returns all entries that are available to be written to the
+// local stable log and are not already in-progress.
+func (l *raftLog) nextUnstableEnts() []pb.Entry {
+	return l.unstable.nextEntries()
+}
+
+// hasNextUnstableEnts returns if there are any entries that are available to be
+// written to the local stable log and are not already in-progress.
+func (l *raftLog) hasNextUnstableEnts() bool {
+	return len(l.nextUnstableEnts()) > 0
+}
+
+// hasNextOrInProgressUnstableEnts returns if there are any entries that are
+// available to be written to the local stable log or in the process of being
+// written to the local stable log.
+func (l *raftLog) hasNextOrInProgressUnstableEnts() bool {
+	return len(l.unstable.entries) > 0
+}
+
+// nextCommittedEnts returns all the available entries for execution.
+// Entries can be committed even when the local raft instance has not durably
+// appended them to the local raft log yet. If allowUnstable is true, committed
+// entries from the unstable log may be returned; otherwise, only entries known
+// to reside locally on stable storage will be returned.
+func (l *raftLog) nextCommittedEnts(allowUnstable bool) (ents []pb.Entry) {
+	if l.applyingEntsPaused {
+		// Entry application outstanding size limit reached.
+		return nil
+	}
+	if l.hasNextOrInProgressSnapshot() {
+		// See comment in hasNextCommittedEnts.
+		return nil
+	}
+	lo, hi := l.applying+1, l.maxAppliableIndex(allowUnstable)+1 // [lo, hi)
+	if lo >= hi {
+		// Nothing to apply.
+		return nil
+	}
+	maxSize := l.maxApplyingEntsSize - l.applyingEntsSize
+	if maxSize <= 0 {
+		l.logger.Panicf("applying entry size (%d-%d)=%d not positive",
+			l.maxApplyingEntsSize, l.applyingEntsSize, maxSize)
+	}
+	ents, err := l.slice(lo, hi, maxSize)
+	if err != nil {
+		l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+	}
+	return ents
+}
+
+// hasNextCommittedEnts returns if there is any available entries for execution.
+// This is a fast check without heavy raftLog.slice() in nextCommittedEnts().
+func (l *raftLog) hasNextCommittedEnts(allowUnstable bool) bool {
+	if l.applyingEntsPaused {
+		// Entry application outstanding size limit reached.
+		return false
+	}
+	if l.hasNextOrInProgressSnapshot() {
+		// If we have a snapshot to apply, don't also return any committed
+		// entries. Doing so raises questions about what should be applied
+		// first.
+		return false
+	}
+	lo, hi := l.applying+1, l.maxAppliableIndex(allowUnstable)+1 // [lo, hi)
+	return lo < hi
+}
+
+// maxAppliableIndex returns the maximum committed index that can be applied.
+// If allowUnstable is true, committed entries from the unstable log can be
+// applied; otherwise, only entries known to reside locally on stable storage
+// can be applied.
+func (l *raftLog) maxAppliableIndex(allowUnstable bool) uint64 {
+	hi := l.committed
+	if !allowUnstable {
+		hi = min(hi, l.unstable.offset-1)
+	}
+	return hi
+}
+
+// nextUnstableSnapshot returns the snapshot, if present, that is available to
+// be applied to the local storage and is not already in-progress.
+func (l *raftLog) nextUnstableSnapshot() *pb.Snapshot {
+	return l.unstable.nextSnapshot()
+}
+
+// hasNextUnstableSnapshot returns if there is a snapshot that is available to
+// be applied to the local storage and is not already in-progress.
+func (l *raftLog) hasNextUnstableSnapshot() bool {
+	return l.unstable.nextSnapshot() != nil
+}
+
+// hasNextOrInProgressSnapshot returns if there is pending snapshot waiting for
+// applying or in the process of being applied.
+func (l *raftLog) hasNextOrInProgressSnapshot() bool {
+	return l.unstable.snapshot != nil
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+	if l.unstable.snapshot != nil {
+		return *l.unstable.snapshot, nil
+	}
+	return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+	if i, ok := l.unstable.maybeFirstIndex(); ok {
+		return i
+	}
+	index, err := l.storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+	if i, ok := l.unstable.maybeLastIndex(); ok {
+		return i
+	}
+	i, err := l.storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+	// never decrease commit
+	if l.committed < tocommit {
+		if l.lastIndex() < tocommit {
+			l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+		}
+		l.committed = tocommit
+	}
+}
+
+func (l *raftLog) appliedTo(i uint64, size entryEncodingSize) {
+	if l.committed < i || i < l.applied {
+		l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+	}
+	l.applied = i
+	l.applying = max(l.applying, i)
+	if l.applyingEntsSize > size {
+		l.applyingEntsSize -= size
+	} else {
+		// Defense against underflow.
+		l.applyingEntsSize = 0
+	}
+	l.applyingEntsPaused = l.applyingEntsSize >= l.maxApplyingEntsSize
+}
+
+func (l *raftLog) acceptApplying(i uint64, size entryEncodingSize, allowUnstable bool) {
+	if l.committed < i {
+		l.logger.Panicf("applying(%d) is out of range [prevApplying(%d), committed(%d)]", i, l.applying, l.committed)
+	}
+	l.applying = i
+	l.applyingEntsSize += size
+	// Determine whether to pause entry application until some progress is
+	// acknowledged. We pause in two cases:
+	// 1. the outstanding entry size equals or exceeds the maximum size.
+	// 2. the outstanding entry size does not equal or exceed the maximum size,
+	//    but we determine that the next entry in the log will push us over the
+	//    limit. We determine this by comparing the last entry returned from
+	//    raftLog.nextCommittedEnts to the maximum entry that the method was
+	//    allowed to return had there been no size limit. If these indexes are
+	//    not equal, then the returned entries slice must have been truncated to
+	//    adhere to the memory limit.
+	l.applyingEntsPaused = l.applyingEntsSize >= l.maxApplyingEntsSize ||
+		i < l.maxAppliableIndex(allowUnstable)
+}
+
+func (l *raftLog) stableTo(id entryID) { l.unstable.stableTo(id) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+// acceptUnstable indicates that the application has started persisting the
+// unstable entries in storage, and that the current unstable entries are thus
+// to be marked as being in-progress, to avoid returning them with future calls
+// to Ready().
+func (l *raftLog) acceptUnstable() { l.unstable.acceptInProgress() }
+
+// lastEntryID returns the ID of the last entry in the log.
+func (l *raftLog) lastEntryID() entryID {
+	index := l.lastIndex()
+	t, err := l.term(index)
+	if err != nil {
+		l.logger.Panicf("unexpected error when getting the last term at %d: %v", index, err)
+	}
+	return entryID{term: t, index: index}
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+	// Check the unstable log first, even before computing the valid term range,
+	// which may need to access stable Storage. If we find the entry's term in
+	// the unstable log, we know it was in the valid range.
+	if t, ok := l.unstable.maybeTerm(i); ok {
+		return t, nil
+	}
+
+	// The valid term range is [firstIndex-1, lastIndex]. Even though the entry at
+	// firstIndex-1 is compacted away, its term is available for matching purposes
+	// when doing log appends.
+	if i+1 < l.firstIndex() {
+		return 0, ErrCompacted
+	}
+	if i > l.lastIndex() {
+		return 0, ErrUnavailable
+	}
+
+	t, err := l.storage.Term(i)
+	if err == nil {
+		return t, nil
+	}
+	if err == ErrCompacted || err == ErrUnavailable {
+		return 0, err
+	}
+	panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i uint64, maxSize entryEncodingSize) ([]pb.Entry, error) {
+	if i > l.lastIndex() {
+		return nil, nil
+	}
+	return l.slice(i, l.lastIndex()+1, maxSize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+	ents, err := l.entries(l.firstIndex(), noLimit)
+	if err == nil {
+		return ents
+	}
+	if err == ErrCompacted { // try again if there was a racing compaction
+		return l.allEntries()
+	}
+	// TODO (xiangli): handle error?
+	panic(err)
+}
+
+// isUpToDate determines if a log with the given last entry is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+//
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(their entryID) bool {
+	our := l.lastEntryID()
+	return their.term > our.term || their.term == our.term && their.index >= our.index
+}
+
+func (l *raftLog) matchTerm(id entryID) bool {
+	t, err := l.term(id.index)
+	if err != nil {
+		return false
+	}
+	return t == id.term
+}
+
+func (l *raftLog) maybeCommit(at entryID) bool {
+	// NB: term should never be 0 on a commit because the leader campaigned at
+	// least at term 1. But if it is 0 for some reason, we don't consider this a
+	// term match.
+	if at.term != 0 && at.index > l.committed && l.matchTerm(at) {
+		l.commitTo(at.index)
+		return true
+	}
+	return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+	l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+	l.committed = s.Metadata.Index
+	l.unstable.restore(s)
+}
+
+// scan visits all log entries in the [lo, hi) range, returning them via the
+// given callback. The callback can be invoked multiple times, with consecutive
+// sub-ranges of the requested range. Returns up to pageSize bytes worth of
+// entries at a time. May return more if a single entry size exceeds the limit.
+//
+// The entries in [lo, hi) must exist, otherwise scan() eventually returns an
+// error (possibly after passing some entries through the callback).
+//
+// If the callback returns an error, scan terminates and returns this error
+// immediately. This can be used to stop the scan early ("break" the loop).
+func (l *raftLog) scan(lo, hi uint64, pageSize entryEncodingSize, v func([]pb.Entry) error) error {
+	for lo < hi {
+		ents, err := l.slice(lo, hi, pageSize)
+		if err != nil {
+			return err
+		} else if len(ents) == 0 {
+			return fmt.Errorf("got 0 entries in [%d, %d)", lo, hi)
+		}
+		if err := v(ents); err != nil {
+			return err
+		}
+		lo += uint64(len(ents))
+	}
+	return nil
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi uint64, maxSize entryEncodingSize) ([]pb.Entry, error) {
+	if err := l.mustCheckOutOfBounds(lo, hi); err != nil {
+		return nil, err
+	}
+	if lo == hi {
+		return nil, nil
+	}
+	if lo >= l.unstable.offset {
+		ents := limitSize(l.unstable.slice(lo, hi), maxSize)
+		// NB: use the full slice expression to protect the unstable slice from
+		// appends to the returned ents slice.
+		return ents[:len(ents):len(ents)], nil
+	}
+
+	cut := min(hi, l.unstable.offset)
+	ents, err := l.storage.Entries(lo, cut, uint64(maxSize))
+	if err == ErrCompacted {
+		return nil, err
+	} else if err == ErrUnavailable {
+		l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, cut)
+	} else if err != nil {
+		panic(err) // TODO(pavelkalinnikov): handle errors uniformly
+	}
+	if hi <= l.unstable.offset {
+		return ents, nil
+	}
+
+	// Fast path to check if ents has reached the size limitation. Either the
+	// returned slice is shorter than requested (which means the next entry would
+	// bring it over the limit), or a single entry reaches the limit.
+	if uint64(len(ents)) < cut-lo {
+		return ents, nil
+	}
+	// Slow path computes the actual total size, so that unstable entries are cut
+	// optimally before being copied to ents slice.
+	size := entsSize(ents)
+	if size >= maxSize {
+		return ents, nil
+	}
+
+	unstable := limitSize(l.unstable.slice(l.unstable.offset, hi), maxSize-size)
+	// Total size of unstable may exceed maxSize-size only if len(unstable) == 1.
+	// If this happens, ignore this extra entry.
+	if len(unstable) == 1 && size+entsSize(unstable) > maxSize {
+		return ents, nil
+	}
+	// Otherwise, total size of unstable does not exceed maxSize-size, so total
+	// size of ents+unstable does not exceed maxSize. Simply concatenate them.
+	return extend(ents, unstable), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+	if lo > hi {
+		l.logger.Panicf("invalid slice %d > %d", lo, hi)
+	}
+	fi := l.firstIndex()
+	if lo < fi {
+		return ErrCompacted
+	}
+
+	length := l.lastIndex() + 1 - fi
+	if hi > fi+length {
+		l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+	}
+	return nil
+}
+
+func (l *raftLog) zeroTermOnOutOfBounds(t uint64, err error) uint64 {
+	if err == nil {
+		return t
+	}
+	if err == ErrCompacted || err == ErrUnavailable {
+		return 0
+	}
+	l.logger.Panicf("unexpected error (%v)", err)
+	return 0
+}
diff --git a/vendor/go.etcd.io/raft/v3/log_unstable.go b/vendor/go.etcd.io/raft/v3/log_unstable.go
new file mode 100644
index 0000000..2629aae
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/log_unstable.go
@@ -0,0 +1,245 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "go.etcd.io/raft/v3/raftpb"
+
+// unstable contains "unstable" log entries and snapshot state that has
+// not yet been written to Storage. The type serves two roles. First, it
+// holds on to new log entries and an optional snapshot until they are
+// handed to a Ready struct for persistence. Second, it continues to
+// hold on to this state after it has been handed off to provide raftLog
+// with a view of the in-progress log entries and snapshot until their
+// writes have been stabilized and are guaranteed to be reflected in
+// queries of Storage. After this point, the corresponding log entries
+// and/or snapshot can be cleared from unstable.
+//
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+	// the incoming unstable snapshot, if any.
+	snapshot *pb.Snapshot
+	// all entries that have not yet been written to storage.
+	entries []pb.Entry
+	// entries[i] has raft log position i+offset.
+	offset uint64
+
+	// if true, snapshot is being written to storage.
+	snapshotInProgress bool
+	// entries[:offsetInProgress-offset] are being written to storage.
+	// Like offset, offsetInProgress is exclusive, meaning that it
+	// contains the index following the largest in-progress entry.
+	// Invariant: offset <= offsetInProgress
+	offsetInProgress uint64
+
+	logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index + 1, true
+	}
+	return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+	if l := len(u.entries); l != 0 {
+		return u.offset + uint64(l) - 1, true
+	}
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index, true
+	}
+	return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+	if i < u.offset {
+		if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+			return u.snapshot.Metadata.Term, true
+		}
+		return 0, false
+	}
+
+	last, ok := u.maybeLastIndex()
+	if !ok {
+		return 0, false
+	}
+	if i > last {
+		return 0, false
+	}
+
+	return u.entries[i-u.offset].Term, true
+}
+
+// nextEntries returns the unstable entries that are not already in the process
+// of being written to storage.
+func (u *unstable) nextEntries() []pb.Entry {
+	inProgress := int(u.offsetInProgress - u.offset)
+	if len(u.entries) == inProgress {
+		return nil
+	}
+	return u.entries[inProgress:]
+}
+
+// nextSnapshot returns the unstable snapshot, if one exists that is not already
+// in the process of being written to storage.
+func (u *unstable) nextSnapshot() *pb.Snapshot {
+	if u.snapshot == nil || u.snapshotInProgress {
+		return nil
+	}
+	return u.snapshot
+}
+
+// acceptInProgress marks all entries and the snapshot, if any, in the unstable
+// as having begun the process of being written to storage. The entries/snapshot
+// will no longer be returned from nextEntries/nextSnapshot. However, new
+// entries/snapshots added after a call to acceptInProgress will be returned
+// from those methods, until the next call to acceptInProgress.
+func (u *unstable) acceptInProgress() {
+	if len(u.entries) > 0 {
+		// NOTE: +1 because offsetInProgress is exclusive, like offset.
+		u.offsetInProgress = u.entries[len(u.entries)-1].Index + 1
+	}
+	if u.snapshot != nil {
+		u.snapshotInProgress = true
+	}
+}
+
+// stableTo marks entries up to the entry with the specified (index, term) as
+// being successfully written to stable storage.
+//
+// The method should only be called when the caller can attest that the entries
+// can not be overwritten by an in-progress log append. See the related comment
+// in newStorageAppendRespMsg.
+func (u *unstable) stableTo(id entryID) {
+	gt, ok := u.maybeTerm(id.index)
+	if !ok {
+		// Unstable entry missing. Ignore.
+		u.logger.Infof("entry at index %d missing from unstable log; ignoring", id.index)
+		return
+	}
+	if id.index < u.offset {
+		// Index matched unstable snapshot, not unstable entry. Ignore.
+		u.logger.Infof("entry at index %d matched unstable snapshot; ignoring", id.index)
+		return
+	}
+	if gt != id.term {
+		// Term mismatch between unstable entry and specified entry. Ignore.
+		// This is possible if part or all of the unstable log was replaced
+		// between that time that a set of entries started to be written to
+		// stable storage and when they finished.
+		u.logger.Infof("entry at (index,term)=(%d,%d) mismatched with "+
+			"entry at (%d,%d) in unstable log; ignoring", id.index, id.term, id.index, gt)
+		return
+	}
+	num := int(id.index + 1 - u.offset)
+	u.entries = u.entries[num:]
+	u.offset = id.index + 1
+	u.offsetInProgress = max(u.offsetInProgress, u.offset)
+	u.shrinkEntriesArray()
+}
+
+// shrinkEntriesArray discards the underlying array used by the entries slice
+// if most of it isn't being used. This avoids holding references to a bunch of
+// potentially large entries that aren't needed anymore. Simply clearing the
+// entries wouldn't be safe because clients might still be using them.
+func (u *unstable) shrinkEntriesArray() {
+	// We replace the array if we're using less than half of the space in
+	// it. This number is fairly arbitrary, chosen as an attempt to balance
+	// memory usage vs number of allocations. It could probably be improved
+	// with some focused tuning.
+	const lenMultiple = 2
+	if len(u.entries) == 0 {
+		u.entries = nil
+	} else if len(u.entries)*lenMultiple < cap(u.entries) {
+		newEntries := make([]pb.Entry, len(u.entries))
+		copy(newEntries, u.entries)
+		u.entries = newEntries
+	}
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+	if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+		u.snapshot = nil
+		u.snapshotInProgress = false
+	}
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+	u.offset = s.Metadata.Index + 1
+	u.offsetInProgress = u.offset
+	u.entries = nil
+	u.snapshot = &s
+	u.snapshotInProgress = false
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+	fromIndex := ents[0].Index
+	switch {
+	case fromIndex == u.offset+uint64(len(u.entries)):
+		// fromIndex is the next index in the u.entries, so append directly.
+		u.entries = append(u.entries, ents...)
+	case fromIndex <= u.offset:
+		u.logger.Infof("replace the unstable entries from index %d", fromIndex)
+		// The log is being truncated to before our current offset
+		// portion, so set the offset and replace the entries.
+		u.entries = ents
+		u.offset = fromIndex
+		u.offsetInProgress = u.offset
+	default:
+		// Truncate to fromIndex (exclusive), and append the new entries.
+		u.logger.Infof("truncate the unstable entries before index %d", fromIndex)
+		keep := u.slice(u.offset, fromIndex) // NB: appending to this slice is safe,
+		u.entries = append(keep, ents...)    // and will reallocate/copy it
+		// Only in-progress entries before fromIndex are still considered to be
+		// in-progress.
+		u.offsetInProgress = min(u.offsetInProgress, fromIndex)
+	}
+}
+
+// slice returns the entries from the unstable log with indexes in the range
+// [lo, hi). The entire range must be stored in the unstable log or the method
+// will panic. The returned slice can be appended to, but the entries in it must
+// not be changed because they are still shared with unstable.
+//
+// TODO(pavelkalinnikov): this, and similar []pb.Entry slices, may bubble up all
+// the way to the application code through Ready struct. Protect other slices
+// similarly, and document how the client can use them.
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+	u.mustCheckOutOfBounds(lo, hi)
+	// NB: use the full slice expression to limit what the caller can do with the
+	// returned slice. For example, an append will reallocate and copy this slice
+	// instead of corrupting the neighbouring u.entries.
+	return u.entries[lo-u.offset : hi-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.entries)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+	if lo > hi {
+		u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+	}
+	upper := u.offset + uint64(len(u.entries))
+	if lo < u.offset || hi > upper {
+		u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+	}
+}
diff --git a/vendor/go.etcd.io/raft/v3/logger.go b/vendor/go.etcd.io/raft/v3/logger.go
new file mode 100644
index 0000000..e3cb00c
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/logger.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"sync"
+)
+
+type Logger interface {
+	Debug(v ...interface{})
+	Debugf(format string, v ...interface{})
+
+	Error(v ...interface{})
+	Errorf(format string, v ...interface{})
+
+	Info(v ...interface{})
+	Infof(format string, v ...interface{})
+
+	Warning(v ...interface{})
+	Warningf(format string, v ...interface{})
+
+	Fatal(v ...interface{})
+	Fatalf(format string, v ...interface{})
+
+	Panic(v ...interface{})
+	Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) {
+	raftLoggerMu.Lock()
+	raftLogger = l
+	raftLoggerMu.Unlock()
+}
+
+func ResetDefaultLogger() {
+	SetLogger(defaultLogger)
+}
+
+func getLogger() Logger {
+	raftLoggerMu.Lock()
+	defer raftLoggerMu.Unlock()
+	return raftLogger
+}
+
+var (
+	defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+	discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)}
+	raftLoggerMu  sync.Mutex
+	raftLogger    = Logger(defaultLogger)
+)
+
+const (
+	calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+	*log.Logger
+	debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+	l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+	l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+	}
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+	}
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+	l.Logger.Panic(v...)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+	l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+	return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/go.etcd.io/raft/v3/node.go b/vendor/go.etcd.io/raft/v3/node.go
new file mode 100644
index 0000000..e2a261c
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/node.go
@@ -0,0 +1,610 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"context"
+	"errors"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+	SnapshotFinish  SnapshotStatus = 1
+	SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+	emptyState = pb.HardState{}
+
+	// ErrStopped is returned by methods on Nodes that have been stopped.
+	ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+	Lead      uint64 // must use atomic operations to access; keep 64-bit aligned.
+	RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+	return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+	// The current volatile state of a Node.
+	// SoftState will be nil if there is no update.
+	// It is not required to consume or store SoftState.
+	*SoftState
+
+	// The current state of a Node to be saved to stable storage BEFORE
+	// Messages are sent.
+	//
+	// HardState will be equal to empty state if there is no update.
+	//
+	// If async storage writes are enabled, this field does not need to be acted
+	// on immediately. It will be reflected in a MsgStorageAppend message in the
+	// Messages slice.
+	pb.HardState
+
+	// ReadStates can be used for node to serve linearizable read requests locally
+	// when its applied index is greater than the index in ReadState.
+	// Note that the readState will be returned when raft receives msgReadIndex.
+	// The returned is only valid for the request that requested to read.
+	ReadStates []ReadState
+
+	// Entries specifies entries to be saved to stable storage BEFORE
+	// Messages are sent.
+	//
+	// If async storage writes are enabled, this field does not need to be acted
+	// on immediately. It will be reflected in a MsgStorageAppend message in the
+	// Messages slice.
+	Entries []pb.Entry
+
+	// Snapshot specifies the snapshot to be saved to stable storage.
+	//
+	// If async storage writes are enabled, this field does not need to be acted
+	// on immediately. It will be reflected in a MsgStorageAppend message in the
+	// Messages slice.
+	Snapshot pb.Snapshot
+
+	// CommittedEntries specifies entries to be committed to a
+	// store/state-machine. These have previously been appended to stable
+	// storage.
+	//
+	// If async storage writes are enabled, this field does not need to be acted
+	// on immediately. It will be reflected in a MsgStorageApply message in the
+	// Messages slice.
+	CommittedEntries []pb.Entry
+
+	// Messages specifies outbound messages.
+	//
+	// If async storage writes are not enabled, these messages must be sent
+	// AFTER Entries are appended to stable storage.
+	//
+	// If async storage writes are enabled, these messages can be sent
+	// immediately as the messages that have the completion of the async writes
+	// as a precondition are attached to the individual MsgStorage{Append,Apply}
+	// messages instead.
+	//
+	// If it contains a MsgSnap message, the application MUST report back to raft
+	// when the snapshot has been received or has failed by calling ReportSnapshot.
+	Messages []pb.Message
+
+	// MustSync indicates whether the HardState and Entries must be durably
+	// written to disk or if a non-durable write is permissible.
+	MustSync bool
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+	return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+	return sp.Metadata.Index == 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+	// Tick increments the internal logical clock for the Node by a single tick. Election
+	// timeouts and heartbeat timeouts are in units of ticks.
+	Tick()
+	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+	Campaign(ctx context.Context) error
+	// Propose proposes that data be appended to the log. Note that proposals can be lost without
+	// notice, therefore it is user's job to ensure proposal retries.
+	Propose(ctx context.Context, data []byte) error
+	// ProposeConfChange proposes a configuration change. Like any proposal, the
+	// configuration change may be dropped with or without an error being
+	// returned. In particular, configuration changes are dropped unless the
+	// leader has certainty that there is no prior unapplied configuration
+	// change in its log.
+	//
+	// The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
+	// message. The latter allows arbitrary configuration changes via joint
+	// consensus, notably including replacing a voter. Passing a ConfChangeV2
+	// message is only allowed if all Nodes participating in the cluster run a
+	// version of this library aware of the V2 API. See pb.ConfChangeV2 for
+	// usage details and semantics.
+	ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
+
+	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+	Step(ctx context.Context, msg pb.Message) error
+
+	// Ready returns a channel that returns the current point-in-time state.
+	// Users of the Node must call Advance after retrieving the state returned by Ready (unless
+	// async storage writes is enabled, in which case it should never be called).
+	//
+	// NOTE: No committed entries from the next Ready may be applied until all committed entries
+	// and snapshots from the previous one have finished.
+	Ready() <-chan Ready
+
+	// Advance notifies the Node that the application has saved progress up to the last Ready.
+	// It prepares the node to return the next available Ready.
+	//
+	// The application should generally call Advance after it applies the entries in last Ready.
+	//
+	// However, as an optimization, the application may call Advance while it is applying the
+	// commands. For example. when the last Ready contains a snapshot, the application might take
+	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+	// progress, it can call Advance before finishing applying the last ready.
+	//
+	// NOTE: Advance must not be called when using AsyncStorageWrites. Response messages from the
+	// local append and apply threads take its place.
+	Advance()
+	// ApplyConfChange applies a config change (previously passed to
+	// ProposeConfChange) to the node. This must be called whenever a config
+	// change is observed in Ready.CommittedEntries, except when the app decides
+	// to reject the configuration change (i.e. treats it as a noop instead), in
+	// which case it must not be called.
+	//
+	// Returns an opaque non-nil ConfState protobuf which must be recorded in
+	// snapshots.
+	ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
+
+	// TransferLeadership attempts to transfer leadership to the given transferee.
+	TransferLeadership(ctx context.Context, lead, transferee uint64)
+
+	// ForgetLeader forgets a follower's current leader, changing it to None. It
+	// remains a leaderless follower in the current term, without campaigning.
+	//
+	// This is useful with PreVote+CheckQuorum, where followers will normally not
+	// grant pre-votes if they've heard from the leader in the past election
+	// timeout interval. Leaderless followers can grant pre-votes immediately, so
+	// if a quorum of followers have strong reason to believe the leader is dead
+	// (for example via a side-channel or external failure detector) and forget it
+	// then they can elect a new leader immediately, without waiting out the
+	// election timeout. They will also revert to normal followers if they hear
+	// from the leader again, or transition to candidates on an election timeout.
+	//
+	// For example, consider a three-node cluster where 1 is the leader and 2+3
+	// have just received a heartbeat from it. If 2 and 3 believe the leader has
+	// now died (maybe they know that an orchestration system shut down 1's VM),
+	// we can instruct 2 to forget the leader and 3 to campaign. 2 will then be
+	// able to grant 3's pre-vote and elect 3 as leader immediately (normally 2
+	// would reject the vote until an election timeout passes because it has heard
+	// from the leader recently). However, 3 can not campaign unilaterally, a
+	// quorum have to agree that the leader is dead, which avoids disrupting the
+	// leader if individual nodes are wrong about it being dead.
+	//
+	// This does nothing with ReadOnlyLeaseBased, since it would allow a new
+	// leader to be elected without the old leader knowing.
+	ForgetLeader(ctx context.Context) error
+
+	// ReadIndex request a read state. The read state will be set in the ready.
+	// Read state has a read index. Once the application advances further than the read
+	// index, any linearizable read requests issued before the read request can be
+	// processed safely. The read state will have the same rctx attached.
+	// Note that request can be lost without notice, therefore it is user's job
+	// to ensure read index retries.
+	ReadIndex(ctx context.Context, rctx []byte) error
+
+	// Status returns the current status of the raft state machine.
+	Status() Status
+	// ReportUnreachable reports the given node is not reachable for the last send.
+	ReportUnreachable(id uint64)
+	// ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
+	// who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
+	// Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
+	// snapshot (for e.g., while streaming it from leader to follower), should be reported to the
+	// leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
+	// log probes until the follower can apply the snapshot and advance its state. If the follower
+	// can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
+	// updates from the leader. Therefore, it is crucial that the application ensures that any
+	// failure in snapshot sending is caught and reported back to the leader; so it can resume raft
+	// log probing in the follower.
+	ReportSnapshot(id uint64, status SnapshotStatus)
+	// Stop performs any necessary termination of the Node.
+	Stop()
+}
+
+type Peer struct {
+	ID      uint64
+	Context []byte
+}
+
+func setupNode(c *Config, peers []Peer) *node {
+	if len(peers) == 0 {
+		panic("no peers given; use RestartNode instead")
+	}
+	rn, err := NewRawNode(c)
+	if err != nil {
+		panic(err)
+	}
+	err = rn.Bootstrap(peers)
+	if err != nil {
+		c.Logger.Warningf("error occurred during starting a new node: %v", err)
+	}
+
+	n := newNode(rn)
+	return &n
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+//
+// Peers must not be zero length; call RestartNode in that case.
+func StartNode(c *Config, peers []Peer) Node {
+	n := setupNode(c, peers)
+	go n.run()
+	return n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+	rn, err := NewRawNode(c)
+	if err != nil {
+		panic(err)
+	}
+	n := newNode(rn)
+	go n.run()
+	return &n
+}
+
+type msgWithResult struct {
+	m      pb.Message
+	result chan error
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+	propc      chan msgWithResult
+	recvc      chan pb.Message
+	confc      chan pb.ConfChangeV2
+	confstatec chan pb.ConfState
+	readyc     chan Ready
+	advancec   chan struct{}
+	tickc      chan struct{}
+	done       chan struct{}
+	stop       chan struct{}
+	status     chan chan Status
+
+	rn *RawNode
+}
+
+func newNode(rn *RawNode) node {
+	return node{
+		propc:      make(chan msgWithResult),
+		recvc:      make(chan pb.Message),
+		confc:      make(chan pb.ConfChangeV2),
+		confstatec: make(chan pb.ConfState),
+		readyc:     make(chan Ready),
+		advancec:   make(chan struct{}),
+		// make tickc a buffered chan, so raft node can buffer some ticks when the node
+		// is busy processing raft messages. Raft node will resume process buffered
+		// ticks when it becomes idle.
+		tickc:  make(chan struct{}, 128),
+		done:   make(chan struct{}),
+		stop:   make(chan struct{}),
+		status: make(chan chan Status),
+		rn:     rn,
+	}
+}
+
+func (n *node) Stop() {
+	select {
+	case n.stop <- struct{}{}:
+		// Not already stopped, so trigger it
+	case <-n.done:
+		// Node has already been stopped - no need to do anything
+		return
+	}
+	// Block until the stop has been acknowledged by run()
+	<-n.done
+}
+
+func (n *node) run() {
+	var propc chan msgWithResult
+	var readyc chan Ready
+	var advancec chan struct{}
+	var rd Ready
+
+	r := n.rn.raft
+
+	lead := None
+
+	for {
+		if advancec == nil && n.rn.HasReady() {
+			// Populate a Ready. Note that this Ready is not guaranteed to
+			// actually be handled. We will arm readyc, but there's no guarantee
+			// that we will actually send on it. It's possible that we will
+			// service another channel instead, loop around, and then populate
+			// the Ready again. We could instead force the previous Ready to be
+			// handled first, but it's generally good to emit larger Readys plus
+			// it simplifies testing (by emitting less frequently and more
+			// predictably).
+			rd = n.rn.readyWithoutAccept()
+			readyc = n.readyc
+		}
+
+		if lead != r.lead {
+			if r.hasLeader() {
+				if lead == None {
+					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+				} else {
+					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+				}
+				propc = n.propc
+			} else {
+				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+				propc = nil
+			}
+			lead = r.lead
+		}
+
+		select {
+		// TODO: maybe buffer the config propose if there exists one (the way
+		// described in raft dissertation)
+		// Currently it is dropped in Step silently.
+		case pm := <-propc:
+			m := pm.m
+			m.From = r.id
+			err := r.Step(m)
+			if pm.result != nil {
+				pm.result <- err
+				close(pm.result)
+			}
+		case m := <-n.recvc:
+			if IsResponseMsg(m.Type) && !IsLocalMsgTarget(m.From) && r.trk.Progress[m.From] == nil {
+				// Filter out response message from unknown From.
+				break
+			}
+			r.Step(m)
+		case cc := <-n.confc:
+			_, okBefore := r.trk.Progress[r.id]
+			cs := r.applyConfChange(cc)
+			// If the node was removed, block incoming proposals. Note that we
+			// only do this if the node was in the config before. Nodes may be
+			// a member of the group without knowing this (when they're catching
+			// up on the log and don't have the latest config) and we don't want
+			// to block the proposal channel in that case.
+			//
+			// NB: propc is reset when the leader changes, which, if we learn
+			// about it, sort of implies that we got readded, maybe? This isn't
+			// very sound and likely has bugs.
+			if _, okAfter := r.trk.Progress[r.id]; okBefore && !okAfter {
+				var found bool
+				for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
+					for _, id := range sl {
+						if id == r.id {
+							found = true
+							break
+						}
+					}
+					if found {
+						break
+					}
+				}
+				if !found {
+					propc = nil
+				}
+			}
+			select {
+			case n.confstatec <- cs:
+			case <-n.done:
+			}
+		case <-n.tickc:
+			n.rn.Tick()
+		case readyc <- rd:
+			n.rn.acceptReady(rd)
+			if !n.rn.asyncStorageWrites {
+				advancec = n.advancec
+			} else {
+				rd = Ready{}
+			}
+			readyc = nil
+		case <-advancec:
+			n.rn.Advance(rd)
+			rd = Ready{}
+			advancec = nil
+		case c := <-n.status:
+			c <- getStatus(r)
+		case <-n.stop:
+			close(n.done)
+			return
+		}
+	}
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+	select {
+	case n.tickc <- struct{}{}:
+	case <-n.done:
+	default:
+		n.rn.raft.logger.Warningf("%x A tick missed to fire. Node blocks too long!", n.rn.raft.id)
+	}
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+	return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+	// Ignore unexpected local messages receiving over network.
+	if IsLocalMsg(m.Type) && !IsLocalMsgTarget(m.From) {
+		// TODO: return an error?
+		return nil
+	}
+	return n.step(ctx, m)
+}
+
+func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
+	typ, data, err := pb.MarshalConfChange(c)
+	if err != nil {
+		return pb.Message{}, err
+	}
+	return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
+	msg, err := confChangeToMsg(cc)
+	if err != nil {
+		return err
+	}
+	return n.Step(ctx, msg)
+}
+
+func (n *node) step(ctx context.Context, m pb.Message) error {
+	return n.stepWithWaitOption(ctx, m, false)
+}
+
+func (n *node) stepWait(ctx context.Context, m pb.Message) error {
+	return n.stepWithWaitOption(ctx, m, true)
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
+	if m.Type != pb.MsgProp {
+		select {
+		case n.recvc <- m:
+			return nil
+		case <-ctx.Done():
+			return ctx.Err()
+		case <-n.done:
+			return ErrStopped
+		}
+	}
+	ch := n.propc
+	pm := msgWithResult{m: m}
+	if wait {
+		pm.result = make(chan error, 1)
+	}
+	select {
+	case ch <- pm:
+		if !wait {
+			return nil
+		}
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-n.done:
+		return ErrStopped
+	}
+	select {
+	case err := <-pm.result:
+		if err != nil {
+			return err
+		}
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-n.done:
+		return ErrStopped
+	}
+	return nil
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+	select {
+	case n.advancec <- struct{}{}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
+	var cs pb.ConfState
+	select {
+	case n.confc <- cc.AsV2():
+	case <-n.done:
+	}
+	select {
+	case cs = <-n.confstatec:
+	case <-n.done:
+	}
+	return &cs
+}
+
+func (n *node) Status() Status {
+	c := make(chan Status)
+	select {
+	case n.status <- c:
+		return <-c
+	case <-n.done:
+		return Status{}
+	}
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+	case <-n.done:
+	}
+}
+
+func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
+	select {
+	// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
+	case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
+	case <-n.done:
+	case <-ctx.Done():
+	}
+}
+
+func (n *node) ForgetLeader(ctx context.Context) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgForgetLeader})
+}
+
+func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/go.etcd.io/raft/v3/quorum/joint.go b/vendor/go.etcd.io/raft/v3/quorum/joint.go
new file mode 100644
index 0000000..e3741e0
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/joint.go
@@ -0,0 +1,75 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quorum
+
+// JointConfig is a configuration of two groups of (possibly overlapping)
+// majority configurations. Decisions require the support of both majorities.
+type JointConfig [2]MajorityConfig
+
+func (c JointConfig) String() string {
+	if len(c[1]) > 0 {
+		return c[0].String() + "&&" + c[1].String()
+	}
+	return c[0].String()
+}
+
+// IDs returns a newly initialized map representing the set of voters present
+// in the joint configuration.
+func (c JointConfig) IDs() map[uint64]struct{} {
+	m := map[uint64]struct{}{}
+	for _, cc := range c {
+		for id := range cc {
+			m[id] = struct{}{}
+		}
+	}
+	return m
+}
+
+// Describe returns a (multi-line) representation of the commit indexes for the
+// given lookuper.
+func (c JointConfig) Describe(l AckedIndexer) string {
+	return MajorityConfig(c.IDs()).Describe(l)
+}
+
+// CommittedIndex returns the largest committed index for the given joint
+// quorum. An index is jointly committed if it is committed in both constituent
+// majorities.
+func (c JointConfig) CommittedIndex(l AckedIndexer) Index {
+	idx0 := c[0].CommittedIndex(l)
+	idx1 := c[1].CommittedIndex(l)
+	if idx0 < idx1 {
+		return idx0
+	}
+	return idx1
+}
+
+// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
+// a result indicating whether the vote is pending, lost, or won. A joint quorum
+// requires both majority quorums to vote in favor.
+func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult {
+	r1 := c[0].VoteResult(votes)
+	r2 := c[1].VoteResult(votes)
+
+	if r1 == r2 {
+		// If they agree, return the agreed state.
+		return r1
+	}
+	if r1 == VoteLost || r2 == VoteLost {
+		// If either config has lost, loss is the only possible outcome.
+		return VoteLost
+	}
+	// One side won, the other one is pending, so the whole outcome is.
+	return VotePending
+}
diff --git a/vendor/go.etcd.io/raft/v3/quorum/majority.go b/vendor/go.etcd.io/raft/v3/quorum/majority.go
new file mode 100644
index 0000000..85b296a
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/majority.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quorum
+
+import (
+	"cmp"
+	"fmt"
+	"math"
+	"slices"
+	"strings"
+)
+
+// MajorityConfig is a set of IDs that uses majority quorums to make decisions.
+type MajorityConfig map[uint64]struct{}
+
+func (c MajorityConfig) String() string {
+	sl := make([]uint64, 0, len(c))
+	for id := range c {
+		sl = append(sl, id)
+	}
+	slices.Sort(sl)
+	var buf strings.Builder
+	buf.WriteByte('(')
+	for i := range sl {
+		if i > 0 {
+			buf.WriteByte(' ')
+		}
+		fmt.Fprint(&buf, sl[i])
+	}
+	buf.WriteByte(')')
+	return buf.String()
+}
+
+// Describe returns a (multi-line) representation of the commit indexes for the
+// given lookuper.
+func (c MajorityConfig) Describe(l AckedIndexer) string {
+	if len(c) == 0 {
+		return "<empty majority quorum>"
+	}
+	type tup struct {
+		id  uint64
+		idx Index
+		ok  bool // idx found?
+		bar int  // length of bar displayed for this tup
+	}
+
+	// Below, populate .bar so that the i-th largest commit index has bar i (we
+	// plot this as sort of a progress bar). The actual code is a bit more
+	// complicated and also makes sure that equal index => equal bar.
+
+	n := len(c)
+	info := make([]tup, 0, n)
+	for id := range c {
+		idx, ok := l.AckedIndex(id)
+		info = append(info, tup{id: id, idx: idx, ok: ok})
+	}
+
+	// Sort by index
+	slices.SortFunc(info, func(a, b tup) int {
+		if n := cmp.Compare(a.idx, b.idx); n != 0 {
+			return n
+		}
+		return cmp.Compare(a.id, b.id)
+	})
+
+	// Populate .bar.
+	for i := range info {
+		if i > 0 && info[i-1].idx < info[i].idx {
+			info[i].bar = i
+		}
+	}
+
+	// Sort by ID.
+	slices.SortFunc(info, func(a, b tup) int {
+		return cmp.Compare(a.id, b.id)
+	})
+
+	var buf strings.Builder
+
+	// Print.
+	fmt.Fprint(&buf, strings.Repeat(" ", n)+"    idx\n")
+	for i := range info {
+		bar := info[i].bar
+		if !info[i].ok {
+			fmt.Fprint(&buf, "?"+strings.Repeat(" ", n))
+		} else {
+			fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar))
+		}
+		fmt.Fprintf(&buf, " %5d    (id=%d)\n", info[i].idx, info[i].id)
+	}
+	return buf.String()
+}
+
+// Slice returns the MajorityConfig as a sorted slice.
+func (c MajorityConfig) Slice() []uint64 {
+	var sl []uint64
+	for id := range c {
+		sl = append(sl, id)
+	}
+	slices.Sort(sl)
+	return sl
+}
+
+// CommittedIndex computes the committed index from those supplied via the
+// provided AckedIndexer (for the active config).
+func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index {
+	n := len(c)
+	if n == 0 {
+		// This plays well with joint quorums which, when one half is the zero
+		// MajorityConfig, should behave like the other half.
+		return math.MaxUint64
+	}
+
+	// Use an on-stack slice to collect the committed indexes when n <= 7
+	// (otherwise we alloc). The alternative is to stash a slice on
+	// MajorityConfig, but this impairs usability (as is, MajorityConfig is just
+	// a map, and that's nice). The assumption is that running with a
+	// replication factor of >7 is rare, and in cases in which it happens
+	// performance is a lesser concern (additionally the performance
+	// implications of an allocation here are far from drastic).
+	var stk [7]uint64
+	var srt []uint64
+	if len(stk) >= n {
+		srt = stk[:n]
+	} else {
+		srt = make([]uint64, n)
+	}
+
+	{
+		// Fill the slice with the indexes observed. Any unused slots will be
+		// left as zero; these correspond to voters that may report in, but
+		// haven't yet. We fill from the right (since the zeroes will end up on
+		// the left after sorting below anyway).
+		i := n - 1
+		for id := range c {
+			if idx, ok := l.AckedIndex(id); ok {
+				srt[i] = uint64(idx)
+				i--
+			}
+		}
+	}
+	slices.Sort(srt)
+
+	// The smallest index into the array for which the value is acked by a
+	// quorum. In other words, from the end of the slice, move n/2+1 to the
+	// left (accounting for zero-indexing).
+	pos := n - (n/2 + 1)
+	return Index(srt[pos])
+}
+
+// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
+// a result indicating whether the vote is pending (i.e. neither a quorum of
+// yes/no has been reached), won (a quorum of yes has been reached), or lost (a
+// quorum of no has been reached).
+func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult {
+	if len(c) == 0 {
+		// By convention, the elections on an empty config win. This comes in
+		// handy with joint quorums because it'll make a half-populated joint
+		// quorum behave like a majority quorum.
+		return VoteWon
+	}
+
+	var votedCnt int //vote counts for yes.
+	var missing int
+	for id := range c {
+		v, ok := votes[id]
+		if !ok {
+			missing++
+			continue
+		}
+		if v {
+			votedCnt++
+		}
+	}
+
+	q := len(c)/2 + 1
+	if votedCnt >= q {
+		return VoteWon
+	}
+	if votedCnt+missing >= q {
+		return VotePending
+	}
+	return VoteLost
+}
diff --git a/vendor/go.etcd.io/raft/v3/quorum/quorum.go b/vendor/go.etcd.io/raft/v3/quorum/quorum.go
new file mode 100644
index 0000000..2899e46
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/quorum.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quorum
+
+import (
+	"math"
+	"strconv"
+)
+
+// Index is a Raft log position.
+type Index uint64
+
+func (i Index) String() string {
+	if i == math.MaxUint64 {
+		return "∞"
+	}
+	return strconv.FormatUint(uint64(i), 10)
+}
+
+// AckedIndexer allows looking up a commit index for a given ID of a voter
+// from a corresponding MajorityConfig.
+type AckedIndexer interface {
+	AckedIndex(voterID uint64) (idx Index, found bool)
+}
+
+type mapAckIndexer map[uint64]Index
+
+func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) {
+	idx, ok := m[id]
+	return idx, ok
+}
+
+// VoteResult indicates the outcome of a vote.
+//
+//go:generate stringer -type=VoteResult
+type VoteResult uint8
+
+const (
+	// VotePending indicates that the decision of the vote depends on future
+	// votes, i.e. neither "yes" or "no" has reached quorum yet.
+	VotePending VoteResult = 1 + iota
+	// VoteLost indicates that the quorum has voted "no".
+	VoteLost
+	// VoteWon indicates that the quorum has voted "yes".
+	VoteWon
+)
diff --git a/vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go b/vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go
new file mode 100644
index 0000000..9eca8fd
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go
@@ -0,0 +1,26 @@
+// Code generated by "stringer -type=VoteResult"; DO NOT EDIT.
+
+package quorum
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[VotePending-1]
+	_ = x[VoteLost-2]
+	_ = x[VoteWon-3]
+}
+
+const _VoteResult_name = "VotePendingVoteLostVoteWon"
+
+var _VoteResult_index = [...]uint8{0, 11, 19, 26}
+
+func (i VoteResult) String() string {
+	i -= 1
+	if i >= VoteResult(len(_VoteResult_index)-1) {
+		return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")"
+	}
+	return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]]
+}
diff --git a/vendor/go.etcd.io/raft/v3/raft.go b/vendor/go.etcd.io/raft/v3/raft.go
new file mode 100644
index 0000000..94c2363
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raft.go
@@ -0,0 +1,2158 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"crypto/rand"
+	"errors"
+	"fmt"
+	"math"
+	"math/big"
+	"slices"
+	"strings"
+	"sync"
+
+	"go.etcd.io/raft/v3/confchange"
+	"go.etcd.io/raft/v3/quorum"
+	pb "go.etcd.io/raft/v3/raftpb"
+	"go.etcd.io/raft/v3/tracker"
+)
+
+const (
+	// None is a placeholder node ID used when there is no leader.
+	None uint64 = 0
+	// LocalAppendThread is a reference to a local thread that saves unstable
+	// log entries and snapshots to stable storage. The identifier is used as a
+	// target for MsgStorageAppend messages when AsyncStorageWrites is enabled.
+	LocalAppendThread uint64 = math.MaxUint64
+	// LocalApplyThread is a reference to a local thread that applies committed
+	// log entries to the local state machine. The identifier is used as a
+	// target for MsgStorageApply messages when AsyncStorageWrites is enabled.
+	LocalApplyThread uint64 = math.MaxUint64 - 1
+)
+
+// Possible values for StateType.
+const (
+	StateFollower StateType = iota
+	StateCandidate
+	StateLeader
+	StatePreCandidate
+	numStates
+)
+
+type ReadOnlyOption int
+
+const (
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	ReadOnlySafe ReadOnlyOption = iota
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	ReadOnlyLeaseBased
+)
+
+// Possible values for CampaignType
+const (
+	// campaignPreElection represents the first phase of a normal election when
+	// Config.PreVote is true.
+	campaignPreElection CampaignType = "CampaignPreElection"
+	// campaignElection represents a normal (time-based) election (the second phase
+	// of the election when Config.PreVote is true).
+	campaignElection CampaignType = "CampaignElection"
+	// campaignTransfer represents the type of leader transfer
+	campaignTransfer CampaignType = "CampaignTransfer"
+)
+
+const noLimit = math.MaxUint64
+
+// ErrProposalDropped is returned when the proposal is ignored by some cases,
+// so that the proposer can be notified and fail fast.
+var ErrProposalDropped = errors.New("raft proposal dropped")
+
+// lockedRand is a small wrapper around rand.Rand to provide
+// synchronization among multiple raft groups. Only the methods needed
+// by the code are exposed (e.g. Intn).
+type lockedRand struct {
+	mu sync.Mutex
+}
+
+func (r *lockedRand) Intn(n int) int {
+	r.mu.Lock()
+	v, _ := rand.Int(rand.Reader, big.NewInt(int64(n)))
+	r.mu.Unlock()
+	return int(v.Int64())
+}
+
+var globalRand = &lockedRand{}
+
+// CampaignType represents the type of campaigning
+// the reason we use the type of string instead of uint64
+// is because it's simpler to compare and fill in raft entries
+type CampaignType string
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+	"StateFollower",
+	"StateCandidate",
+	"StateLeader",
+	"StatePreCandidate",
+}
+
+func (st StateType) String() string {
+	return stmap[st]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+	// ID is the identity of the local raft. ID cannot be 0.
+	ID uint64
+
+	// ElectionTick is the number of Node.Tick invocations that must pass between
+	// elections. That is, if a follower does not receive any message from the
+	// leader of current term before ElectionTick has elapsed, it will become
+	// candidate and start an election. ElectionTick must be greater than
+	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+	// unnecessary leader switching.
+	ElectionTick int
+	// HeartbeatTick is the number of Node.Tick invocations that must pass between
+	// heartbeats. That is, a leader sends heartbeat messages to maintain its
+	// leadership every HeartbeatTick ticks.
+	HeartbeatTick int
+
+	// Storage is the storage for raft. raft generates entries and states to be
+	// stored in storage. raft reads the persisted entries and states out of
+	// Storage when it needs. raft reads out the previous state and configuration
+	// out of storage when restarting.
+	Storage Storage
+	// Applied is the last applied index. It should only be set when restarting
+	// raft. raft will not return entries to the application smaller or equal to
+	// Applied. If Applied is unset when restarting, raft might return previous
+	// applied entries. This is a very application dependent configuration.
+	Applied uint64
+
+	// AsyncStorageWrites configures the raft node to write to its local storage
+	// (raft log and state machine) using a request/response message passing
+	// interface instead of the default Ready/Advance function call interface.
+	// Local storage messages can be pipelined and processed asynchronously
+	// (with respect to Ready iteration), facilitating reduced interference
+	// between Raft proposals and increased batching of log appends and state
+	// machine application. As a result, use of asynchronous storage writes can
+	// reduce end-to-end commit latency and increase maximum throughput.
+	//
+	// When true, the Ready.Message slice will include MsgStorageAppend and
+	// MsgStorageApply messages. The messages will target a LocalAppendThread
+	// and a LocalApplyThread, respectively. Messages to the same target must be
+	// reliably processed in order. In other words, they can't be dropped (like
+	// messages over the network) and those targeted at the same thread can't be
+	// reordered. Messages to different targets can be processed in any order.
+	//
+	// MsgStorageAppend carries Raft log entries to append, election votes /
+	// term changes / updated commit indexes to persist, and snapshots to apply.
+	// All writes performed in service of a MsgStorageAppend must be durable
+	// before response messages are delivered. However, if the MsgStorageAppend
+	// carries no response messages, durability is not required. The message
+	// assumes the role of the Entries, HardState, and Snapshot fields in Ready.
+	//
+	// MsgStorageApply carries committed entries to apply. Writes performed in
+	// service of a MsgStorageApply need not be durable before response messages
+	// are delivered. The message assumes the role of the CommittedEntries field
+	// in Ready.
+	//
+	// Local messages each carry one or more response messages which should be
+	// delivered after the corresponding storage write has been completed. These
+	// responses may target the same node or may target other nodes. The storage
+	// threads are not responsible for understanding the response messages, only
+	// for delivering them to the correct target after performing the storage
+	// write.
+	AsyncStorageWrites bool
+
+	// MaxSizePerMsg limits the max byte size of each append message. Smaller
+	// value lowers the raft recovery cost(initial probing and message lost
+	// during normal operation). On the other side, it might affect the
+	// throughput during normal replication. Note: math.MaxUint64 for unlimited,
+	// 0 for at most one entry per message.
+	MaxSizePerMsg uint64
+	// MaxCommittedSizePerReady limits the size of the committed entries which
+	// can be applying at the same time.
+	//
+	// Despite its name (preserved for compatibility), this quota applies across
+	// Ready structs to encompass all outstanding entries in unacknowledged
+	// MsgStorageApply messages when AsyncStorageWrites is enabled.
+	MaxCommittedSizePerReady uint64
+	// MaxUncommittedEntriesSize limits the aggregate byte size of the
+	// uncommitted entries that may be appended to a leader's log. Once this
+	// limit is exceeded, proposals will begin to return ErrProposalDropped
+	// errors. Note: 0 for no limit.
+	MaxUncommittedEntriesSize uint64
+	// MaxInflightMsgs limits the max number of in-flight append messages during
+	// optimistic replication phase. The application transportation layer usually
+	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+	// overflowing that sending buffer. TODO (xiangli): feedback to application to
+	// limit the proposal rate?
+	MaxInflightMsgs int
+	// MaxInflightBytes limits the number of in-flight bytes in append messages.
+	// Complements MaxInflightMsgs. Ignored if zero.
+	//
+	// This effectively bounds the bandwidth-delay product. Note that especially
+	// in high-latency deployments setting this too low can lead to a dramatic
+	// reduction in throughput. For example, with a peer that has a round-trip
+	// latency of 100ms to the leader and this setting is set to 1 MB, there is a
+	// throughput limit of 10 MB/s for this group. With RTT of 400ms, this drops
+	// to 2.5 MB/s. See Little's law to understand the maths behind.
+	MaxInflightBytes uint64
+
+	// CheckQuorum specifies if the leader should check quorum activity. Leader
+	// steps down when quorum is not active for an electionTimeout.
+	CheckQuorum bool
+
+	// PreVote enables the Pre-Vote algorithm described in raft thesis section
+	// 9.6. This prevents disruption when a node that has been partitioned away
+	// rejoins the cluster.
+	PreVote bool
+
+	// ReadOnlyOption specifies how the read only request is processed.
+	//
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	//
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
+	ReadOnlyOption ReadOnlyOption
+
+	// Logger is the logger used for raft log. For multinode which can host
+	// multiple raft group, each raft group can have its own logger
+	Logger Logger
+
+	// DisableProposalForwarding set to true means that followers will drop
+	// proposals, rather than forwarding them to the leader. One use case for
+	// this feature would be in a situation where the Raft leader is used to
+	// compute the data of a proposal, for example, adding a timestamp from a
+	// hybrid logical clock to data in a monotonically increasing way. Forwarding
+	// should be disabled to prevent a follower with an inaccurate hybrid
+	// logical clock from assigning the timestamp and then forwarding the data
+	// to the leader.
+	DisableProposalForwarding bool
+
+	// DisableConfChangeValidation turns off propose-time verification of
+	// configuration changes against the currently active configuration of the
+	// raft instance. These checks are generally sensible (cannot leave a joint
+	// config unless in a joint config, et cetera) but they have false positives
+	// because the active configuration may not be the most recent
+	// configuration. This is because configurations are activated during log
+	// application, and even the leader can trail log application by an
+	// unbounded number of entries.
+	// Symmetrically, the mechanism has false negatives - because the check may
+	// not run against the "actual" config that will be the predecessor of the
+	// newly proposed one, the check may pass but the new config may be invalid
+	// when it is being applied. In other words, the checks are best-effort.
+	//
+	// Users should *not* use this option unless they have a reliable mechanism
+	// (above raft) that serializes and verifies configuration changes. If an
+	// invalid configuration change enters the log and gets applied, a panic
+	// will result.
+	//
+	// This option may be removed once false positives are no longer possible.
+	// See: https://github.com/etcd-io/raft/issues/80
+	DisableConfChangeValidation bool
+
+	// StepDownOnRemoval makes the leader step down when it is removed from the
+	// group or demoted to a learner.
+	//
+	// This behavior will become unconditional in the future. See:
+	// https://github.com/etcd-io/raft/issues/83
+	StepDownOnRemoval bool
+
+	// raft state tracer
+	TraceLogger TraceLogger
+}
+
+func (c *Config) validate() error {
+	if c.ID == None {
+		return errors.New("cannot use none as id")
+	}
+	if IsLocalMsgTarget(c.ID) {
+		return errors.New("cannot use local target as id")
+	}
+
+	if c.HeartbeatTick <= 0 {
+		return errors.New("heartbeat tick must be greater than 0")
+	}
+
+	if c.ElectionTick <= c.HeartbeatTick {
+		return errors.New("election tick must be greater than heartbeat tick")
+	}
+
+	if c.Storage == nil {
+		return errors.New("storage cannot be nil")
+	}
+
+	if c.MaxUncommittedEntriesSize == 0 {
+		c.MaxUncommittedEntriesSize = noLimit
+	}
+
+	// default MaxCommittedSizePerReady to MaxSizePerMsg because they were
+	// previously the same parameter.
+	if c.MaxCommittedSizePerReady == 0 {
+		c.MaxCommittedSizePerReady = c.MaxSizePerMsg
+	}
+
+	if c.MaxInflightMsgs <= 0 {
+		return errors.New("max inflight messages must be greater than 0")
+	}
+	if c.MaxInflightBytes == 0 {
+		c.MaxInflightBytes = noLimit
+	} else if c.MaxInflightBytes < c.MaxSizePerMsg {
+		return errors.New("max inflight bytes must be >= max message size")
+	}
+
+	if c.Logger == nil {
+		c.Logger = getLogger()
+	}
+
+	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
+		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
+	}
+
+	return nil
+}
+
+type raft struct {
+	id uint64
+
+	Term uint64
+	Vote uint64
+
+	readStates []ReadState
+
+	// the log
+	raftLog *raftLog
+
+	maxMsgSize         entryEncodingSize
+	maxUncommittedSize entryPayloadSize
+
+	trk tracker.ProgressTracker
+
+	state StateType
+
+	// isLearner is true if the local raft node is a learner.
+	isLearner bool
+
+	// msgs contains the list of messages that should be sent out immediately to
+	// other nodes.
+	//
+	// Messages in this list must target other nodes.
+	msgs []pb.Message
+	// msgsAfterAppend contains the list of messages that should be sent after
+	// the accumulated unstable state (e.g. term, vote, []entry, and snapshot)
+	// has been persisted to durable storage. This includes waiting for any
+	// unstable state that is already in the process of being persisted (i.e.
+	// has already been handed out in a prior Ready struct) to complete.
+	//
+	// Messages in this list may target other nodes or may target this node.
+	//
+	// Messages in this list have the type MsgAppResp, MsgVoteResp, or
+	// MsgPreVoteResp. See the comment in raft.send for details.
+	msgsAfterAppend []pb.Message
+
+	// the leader id
+	lead uint64
+	// leadTransferee is id of the leader transfer target when its value is not zero.
+	// Follow the procedure defined in raft thesis 3.10.
+	leadTransferee uint64
+	// Only one conf change may be pending (in the log, but not yet
+	// applied) at a time. This is enforced via pendingConfIndex, which
+	// is set to a value >= the log index of the latest pending
+	// configuration change (if any). Config changes are only allowed to
+	// be proposed if the leader's applied index is greater than this
+	// value.
+	pendingConfIndex uint64
+	// disableConfChangeValidation is Config.DisableConfChangeValidation,
+	// see there for details.
+	disableConfChangeValidation bool
+	// an estimate of the size of the uncommitted tail of the Raft log. Used to
+	// prevent unbounded log growth. Only maintained by the leader. Reset on
+	// term changes.
+	uncommittedSize entryPayloadSize
+
+	readOnly *readOnly
+
+	// number of ticks since it reached last electionTimeout when it is leader
+	// or candidate.
+	// number of ticks since it reached last electionTimeout or received a
+	// valid message from current leader when it is a follower.
+	electionElapsed int
+
+	// number of ticks since it reached last heartbeatTimeout.
+	// only leader keeps heartbeatElapsed.
+	heartbeatElapsed int
+
+	checkQuorum bool
+	preVote     bool
+
+	heartbeatTimeout int
+	electionTimeout  int
+	// randomizedElectionTimeout is a random number between
+	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
+	// when raft changes its state to follower or candidate.
+	randomizedElectionTimeout int
+	disableProposalForwarding bool
+	stepDownOnRemoval         bool
+
+	tick func()
+	step stepFunc
+
+	logger Logger
+
+	// pendingReadIndexMessages is used to store messages of type MsgReadIndex
+	// that can't be answered as new leader didn't committed any log in
+	// current term. Those will be handled as fast as first log is committed in
+	// current term.
+	pendingReadIndexMessages []pb.Message
+
+	traceLogger TraceLogger
+}
+
+func newRaft(c *Config) *raft {
+	if err := c.validate(); err != nil {
+		panic(err.Error())
+	}
+	raftlog := newLogWithSize(c.Storage, c.Logger, entryEncodingSize(c.MaxCommittedSizePerReady))
+	hs, cs, err := c.Storage.InitialState()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+
+	r := &raft{
+		id:                          c.ID,
+		lead:                        None,
+		isLearner:                   false,
+		raftLog:                     raftlog,
+		maxMsgSize:                  entryEncodingSize(c.MaxSizePerMsg),
+		maxUncommittedSize:          entryPayloadSize(c.MaxUncommittedEntriesSize),
+		trk:                         tracker.MakeProgressTracker(c.MaxInflightMsgs, c.MaxInflightBytes),
+		electionTimeout:             c.ElectionTick,
+		heartbeatTimeout:            c.HeartbeatTick,
+		logger:                      c.Logger,
+		checkQuorum:                 c.CheckQuorum,
+		preVote:                     c.PreVote,
+		readOnly:                    newReadOnly(c.ReadOnlyOption),
+		disableProposalForwarding:   c.DisableProposalForwarding,
+		disableConfChangeValidation: c.DisableConfChangeValidation,
+		stepDownOnRemoval:           c.StepDownOnRemoval,
+		traceLogger:                 c.TraceLogger,
+	}
+
+	traceInitState(r)
+
+	lastID := r.raftLog.lastEntryID()
+	cfg, trk, err := confchange.Restore(confchange.Changer{
+		Tracker:   r.trk,
+		LastIndex: lastID.index,
+	}, cs)
+	if err != nil {
+		panic(err)
+	}
+	assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, trk))
+
+	if !IsEmptyHardState(hs) {
+		r.loadState(hs)
+	}
+	if c.Applied > 0 {
+		raftlog.appliedTo(c.Applied, 0 /* size */)
+	}
+	r.becomeFollower(r.Term, None)
+
+	var nodesStrs []string
+	for _, n := range r.trk.VoterNodes() {
+		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+	}
+
+	// TODO(pav-kv): it should be ok to simply print %+v for lastID.
+	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, lastID.index, lastID.term)
+	return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() SoftState { return SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+	return pb.HardState{
+		Term:   r.Term,
+		Vote:   r.Vote,
+		Commit: r.raftLog.committed,
+	}
+}
+
+// send schedules persisting state to a stable storage and AFTER that
+// sending the message (as part of next Ready message processing).
+func (r *raft) send(m pb.Message) {
+	if m.From == None {
+		m.From = r.id
+	}
+	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
+		if m.Term == 0 {
+			// All {pre-,}campaign messages need to have the term set when
+			// sending.
+			// - MsgVote: m.Term is the term the node is campaigning for,
+			//   non-zero as we increment the term when campaigning.
+			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
+			//   granted, non-zero for the same reason MsgVote is
+			// - MsgPreVote: m.Term is the term the node will campaign,
+			//   non-zero as we use m.Term to indicate the next term we'll be
+			//   campaigning for
+			// - MsgPreVoteResp: m.Term is the term received in the original
+			//   MsgPreVote if the pre-vote was granted, non-zero for the
+			//   same reasons MsgPreVote is
+			r.logger.Panicf("term should be set when sending %s", m.Type)
+		}
+	} else {
+		if m.Term != 0 {
+			r.logger.Panicf("term should not be set when sending %s (was %d)", m.Type, m.Term)
+		}
+		// do not attach term to MsgProp, MsgReadIndex
+		// proposals are a way to forward to the leader and
+		// should be treated as local message.
+		// MsgReadIndex is also forwarded to leader.
+		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
+			m.Term = r.Term
+		}
+	}
+	if m.Type == pb.MsgAppResp || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVoteResp {
+		// If async storage writes are enabled, messages added to the msgs slice
+		// are allowed to be sent out before unstable state (e.g. log entry
+		// writes and election votes) have been durably synced to the local
+		// disk.
+		//
+		// For most message types, this is not an issue. However, response
+		// messages that relate to "voting" on either leader election or log
+		// appends require durability before they can be sent. It would be
+		// incorrect to publish a vote in an election before that vote has been
+		// synced to stable storage locally. Similarly, it would be incorrect to
+		// acknowledge a log append to the leader before that entry has been
+		// synced to stable storage locally.
+		//
+		// Per the Raft thesis, section 3.8 Persisted state and server restarts:
+		//
+		// > Raft servers must persist enough information to stable storage to
+		// > survive server restarts safely. In particular, each server persists
+		// > its current term and vote; this is necessary to prevent the server
+		// > from voting twice in the same term or replacing log entries from a
+		// > newer leader with those from a deposed leader. Each server also
+		// > persists new log entries before they are counted towards the entries’
+		// > commitment; this prevents committed entries from being lost or
+		// > “uncommitted” when servers restart
+		//
+		// To enforce this durability requirement, these response messages are
+		// queued to be sent out as soon as the current collection of unstable
+		// state (the state that the response message was predicated upon) has
+		// been durably persisted. This unstable state may have already been
+		// passed to a Ready struct whose persistence is in progress or may be
+		// waiting for the next Ready struct to begin being written to Storage.
+		// These messages must wait for all of this state to be durable before
+		// being published.
+		//
+		// Rejected responses (m.Reject == true) present an interesting case
+		// where the durability requirement is less unambiguous. A rejection may
+		// be predicated upon unstable state. For instance, a node may reject a
+		// vote for one peer because it has already begun syncing its vote for
+		// another peer. Or it may reject a vote from one peer because it has
+		// unstable log entries that indicate that the peer is behind on its
+		// log. In these cases, it is likely safe to send out the rejection
+		// response immediately without compromising safety in the presence of a
+		// server restart. However, because these rejections are rare and
+		// because the safety of such behavior has not been formally verified,
+		// we err on the side of safety and omit a `&& !m.Reject` condition
+		// above.
+		r.msgsAfterAppend = append(r.msgsAfterAppend, m)
+		traceSendMessage(r, &m)
+	} else {
+		if m.To == r.id {
+			r.logger.Panicf("message should not be self-addressed when sending %s", m.Type)
+		}
+		r.msgs = append(r.msgs, m)
+		traceSendMessage(r, &m)
+	}
+}
+
+// sendAppend sends an append RPC with new entries (if any) and the
+// current commit index to the given peer.
+func (r *raft) sendAppend(to uint64) {
+	r.maybeSendAppend(to, true)
+}
+
+// maybeSendAppend sends an append RPC with new entries to the given peer,
+// if necessary. Returns true if a message was sent. The sendIfEmpty
+// argument controls whether messages with no entries will be sent
+// ("empty" messages are useful to convey updated Commit indexes, but
+// are undesirable when we're sending multiple messages in a batch).
+//
+// TODO(pav-kv): make invocation of maybeSendAppend stateless. The Progress
+// struct contains all the state necessary for deciding whether to send a
+// message.
+func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool {
+	pr := r.trk.Progress[to]
+	if pr.IsPaused() {
+		return false
+	}
+
+	prevIndex := pr.Next - 1
+	prevTerm, err := r.raftLog.term(prevIndex)
+	if err != nil {
+		// The log probably got truncated at >= pr.Next, so we can't catch up the
+		// follower log anymore. Send a snapshot instead.
+		return r.maybeSendSnapshot(to, pr)
+	}
+
+	var ents []pb.Entry
+	// In a throttled StateReplicate only send empty MsgApp, to ensure progress.
+	// Otherwise, if we had a full Inflights and all inflight messages were in
+	// fact dropped, replication to that follower would stall. Instead, an empty
+	// MsgApp will eventually reach the follower (heartbeats responses prompt the
+	// leader to send an append), allowing it to be acked or rejected, both of
+	// which will clear out Inflights.
+	if pr.State != tracker.StateReplicate || !pr.Inflights.Full() {
+		ents, err = r.raftLog.entries(pr.Next, r.maxMsgSize)
+	}
+	if len(ents) == 0 && !sendIfEmpty {
+		return false
+	}
+	// TODO(pav-kv): move this check up to where err is returned.
+	if err != nil { // send a snapshot if we failed to get the entries
+		return r.maybeSendSnapshot(to, pr)
+	}
+
+	// Send the actual MsgApp otherwise, and update the progress accordingly.
+	r.send(pb.Message{
+		To:      to,
+		Type:    pb.MsgApp,
+		Index:   prevIndex,
+		LogTerm: prevTerm,
+		Entries: ents,
+		Commit:  r.raftLog.committed,
+	})
+	pr.SentEntries(len(ents), uint64(payloadsSize(ents)))
+	pr.SentCommit(r.raftLog.committed)
+	return true
+}
+
+// maybeSendSnapshot fetches a snapshot from Storage, and sends it to the given
+// node. Returns true iff the snapshot message has been emitted successfully.
+func (r *raft) maybeSendSnapshot(to uint64, pr *tracker.Progress) bool {
+	if !pr.RecentActive {
+		r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+		return false
+	}
+
+	snapshot, err := r.raftLog.snapshot()
+	if err != nil {
+		if err == ErrSnapshotTemporarilyUnavailable {
+			r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+			return false
+		}
+		panic(err) // TODO(bdarnell)
+	}
+	if IsEmptySnap(snapshot) {
+		panic("need non-empty snapshot")
+	}
+	sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+	r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+		r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+	pr.BecomeSnapshot(sindex)
+	r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+
+	r.send(pb.Message{To: to, Type: pb.MsgSnap, Snapshot: &snapshot})
+	return true
+}
+
+// sendHeartbeat sends a heartbeat RPC to the given peer.
+func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
+	pr := r.trk.Progress[to]
+	// Attach the commit as min(to.matched, r.committed).
+	// When the leader sends out heartbeat message,
+	// the receiver(follower) might not be matched with the leader
+	// or it might not have all the committed entries.
+	// The leader MUST NOT forward the follower's commit to
+	// an unmatched index.
+	commit := min(pr.Match, r.raftLog.committed)
+	r.send(pb.Message{
+		To:      to,
+		Type:    pb.MsgHeartbeat,
+		Commit:  commit,
+		Context: ctx,
+	})
+	pr.SentCommit(commit)
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.trk.
+func (r *raft) bcastAppend() {
+	r.trk.Visit(func(id uint64, _ *tracker.Progress) {
+		if id == r.id {
+			return
+		}
+		r.sendAppend(id)
+	})
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+	lastCtx := r.readOnly.lastPendingRequestCtx()
+	if len(lastCtx) == 0 {
+		r.bcastHeartbeatWithCtx(nil)
+	} else {
+		r.bcastHeartbeatWithCtx([]byte(lastCtx))
+	}
+}
+
+func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
+	r.trk.Visit(func(id uint64, _ *tracker.Progress) {
+		if id == r.id {
+			return
+		}
+		r.sendHeartbeat(id, ctx)
+	})
+}
+
+func (r *raft) appliedTo(index uint64, size entryEncodingSize) {
+	oldApplied := r.raftLog.applied
+	newApplied := max(index, oldApplied)
+	r.raftLog.appliedTo(newApplied, size)
+
+	if r.trk.Config.AutoLeave && newApplied >= r.pendingConfIndex && r.state == StateLeader {
+		// If the current (and most recent, at least for this leader's term)
+		// configuration should be auto-left, initiate that now. We use a
+		// nil Data which unmarshals into an empty ConfChangeV2 and has the
+		// benefit that appendEntry can never refuse it based on its size
+		// (which registers as zero).
+		m, err := confChangeToMsg(nil)
+		if err != nil {
+			panic(err)
+		}
+		// NB: this proposal can't be dropped due to size, but can be
+		// dropped if a leadership transfer is in progress. We'll keep
+		// checking this condition on each applied entry, so either the
+		// leadership transfer will succeed and the new leader will leave
+		// the joint configuration, or the leadership transfer will fail,
+		// and we will propose the config change on the next advance.
+		if err := r.Step(m); err != nil {
+			r.logger.Debugf("not initiating automatic transition out of joint configuration %s: %v", r.trk.Config, err)
+		} else {
+			r.logger.Infof("initiating automatic transition out of joint configuration %s", r.trk.Config)
+		}
+	}
+}
+
+func (r *raft) appliedSnap(snap *pb.Snapshot) {
+	index := snap.Metadata.Index
+	r.raftLog.stableSnapTo(index)
+	r.appliedTo(index, 0 /* size */)
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if the commit
+// index changed (in which case the caller should call r.bcastAppend). This can
+// only be called in StateLeader.
+func (r *raft) maybeCommit() bool {
+	defer traceCommit(r)
+
+	return r.raftLog.maybeCommit(entryID{term: r.Term, index: r.trk.Committed()})
+}
+
+func (r *raft) reset(term uint64) {
+	if r.Term != term {
+		r.Term = term
+		r.Vote = None
+	}
+	r.lead = None
+
+	r.electionElapsed = 0
+	r.heartbeatElapsed = 0
+	r.resetRandomizedElectionTimeout()
+
+	r.abortLeaderTransfer()
+
+	r.trk.ResetVotes()
+	r.trk.Visit(func(id uint64, pr *tracker.Progress) {
+		*pr = tracker.Progress{
+			Match:     0,
+			Next:      r.raftLog.lastIndex() + 1,
+			Inflights: tracker.NewInflights(r.trk.MaxInflight, r.trk.MaxInflightBytes),
+			IsLearner: pr.IsLearner,
+		}
+		if id == r.id {
+			pr.Match = r.raftLog.lastIndex()
+		}
+	})
+
+	r.pendingConfIndex = 0
+	r.uncommittedSize = 0
+	r.readOnly = newReadOnly(r.readOnly.option)
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) {
+	li := r.raftLog.lastIndex()
+	for i := range es {
+		es[i].Term = r.Term
+		es[i].Index = li + 1 + uint64(i)
+	}
+	// Track the size of this uncommitted proposal.
+	if !r.increaseUncommittedSize(es) {
+		r.logger.Warningf(
+			"%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal",
+			r.id,
+		)
+		// Drop the proposal.
+		return false
+	}
+
+	traceReplicate(r, es...)
+
+	// use latest "last" index after truncate/append
+	li = r.raftLog.append(es...)
+	// The leader needs to self-ack the entries just appended once they have
+	// been durably persisted (since it doesn't send an MsgApp to itself). This
+	// response message will be added to msgsAfterAppend and delivered back to
+	// this node after these entries have been written to stable storage. When
+	// handled, this is roughly equivalent to:
+	//
+	//  r.trk.Progress[r.id].MaybeUpdate(e.Index)
+	//  if r.maybeCommit() {
+	//  	r.bcastAppend()
+	//  }
+	r.send(pb.Message{To: r.id, Type: pb.MsgAppResp, Index: li})
+	return true
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+	r.electionElapsed++
+
+	if r.promotable() && r.pastElectionTimeout() {
+		r.electionElapsed = 0
+		if err := r.Step(pb.Message{From: r.id, Type: pb.MsgHup}); err != nil {
+			r.logger.Debugf("error occurred during election: %v", err)
+		}
+	}
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+	r.heartbeatElapsed++
+	r.electionElapsed++
+
+	if r.electionElapsed >= r.electionTimeout {
+		r.electionElapsed = 0
+		if r.checkQuorum {
+			if err := r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}); err != nil {
+				r.logger.Debugf("error occurred during checking sending heartbeat: %v", err)
+			}
+		}
+		// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
+		if r.state == StateLeader && r.leadTransferee != None {
+			r.abortLeaderTransfer()
+		}
+	}
+
+	if r.state != StateLeader {
+		return
+	}
+
+	if r.heartbeatElapsed >= r.heartbeatTimeout {
+		r.heartbeatElapsed = 0
+		if err := r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}); err != nil {
+			r.logger.Debugf("error occurred during checking sending heartbeat: %v", err)
+		}
+	}
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+	r.step = stepFollower
+	r.reset(term)
+	r.tick = r.tickElection
+	r.lead = lead
+	r.state = StateFollower
+	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+
+	traceBecomeFollower(r)
+}
+
+func (r *raft) becomeCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> candidate]")
+	}
+	r.step = stepCandidate
+	r.reset(r.Term + 1)
+	r.tick = r.tickElection
+	r.Vote = r.id
+	r.state = StateCandidate
+	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+
+	traceBecomeCandidate(r)
+}
+
+func (r *raft) becomePreCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> pre-candidate]")
+	}
+	// Becoming a pre-candidate changes our step functions and state,
+	// but doesn't change anything else. In particular it does not increase
+	// r.Term or change r.Vote.
+	r.step = stepCandidate
+	r.trk.ResetVotes()
+	r.tick = r.tickElection
+	r.lead = None
+	r.state = StatePreCandidate
+	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateFollower {
+		panic("invalid transition [follower -> leader]")
+	}
+	r.step = stepLeader
+	r.reset(r.Term)
+	r.tick = r.tickHeartbeat
+	r.lead = r.id
+	r.state = StateLeader
+	// Followers enter replicate mode when they've been successfully probed
+	// (perhaps after having received a snapshot as a result). The leader is
+	// trivially in this state. Note that r.reset() has initialized this
+	// progress with the last index already.
+	pr := r.trk.Progress[r.id]
+	pr.BecomeReplicate()
+	// The leader always has RecentActive == true; MsgCheckQuorum makes sure to
+	// preserve this.
+	pr.RecentActive = true
+
+	// Conservatively set the pendingConfIndex to the last index in the
+	// log. There may or may not be a pending config change, but it's
+	// safe to delay any future proposals until we commit all our
+	// pending log entries, and scanning the entire tail of the log
+	// could be expensive.
+	r.pendingConfIndex = r.raftLog.lastIndex()
+
+	traceBecomeLeader(r)
+	emptyEnt := pb.Entry{Data: nil}
+	if !r.appendEntry(emptyEnt) {
+		// This won't happen because we just called reset() above.
+		r.logger.Panic("empty entry was dropped")
+	}
+	// The payloadSize of an empty entry is 0 (see TestPayloadSizeOfEmptyEntry),
+	// so the preceding log append does not count against the uncommitted log
+	// quota of the new leader. In other words, after the call to appendEntry,
+	// r.uncommittedSize is still 0.
+	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) hup(t CampaignType) {
+	if r.state == StateLeader {
+		r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+		return
+	}
+
+	if !r.promotable() {
+		r.logger.Warningf("%x is unpromotable and can not campaign", r.id)
+		return
+	}
+	if r.hasUnappliedConfChanges() {
+		r.logger.Warningf("%x cannot campaign at term %d since there are still pending configuration changes to apply", r.id, r.Term)
+		return
+	}
+
+	r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+	r.campaign(t)
+}
+
+// errBreak is a sentinel error used to break a callback-based loop.
+var errBreak = errors.New("break")
+
+func (r *raft) hasUnappliedConfChanges() bool {
+	if r.raftLog.applied >= r.raftLog.committed { // in fact applied == committed
+		return false
+	}
+	found := false
+	// Scan all unapplied committed entries to find a config change. Paginate the
+	// scan, to avoid a potentially unlimited memory spike.
+	lo, hi := r.raftLog.applied+1, r.raftLog.committed+1
+	// Reuse the maxApplyingEntsSize limit because it is used for similar purposes
+	// (limiting the read of unapplied committed entries) when raft sends entries
+	// via the Ready struct for application.
+	// TODO(pavelkalinnikov): find a way to budget memory/bandwidth for this scan
+	// outside the raft package.
+	pageSize := r.raftLog.maxApplyingEntsSize
+	if err := r.raftLog.scan(lo, hi, pageSize, func(ents []pb.Entry) error {
+		for i := range ents {
+			if ents[i].Type == pb.EntryConfChange || ents[i].Type == pb.EntryConfChangeV2 {
+				found = true
+				return errBreak
+			}
+		}
+		return nil
+	}); err != nil && err != errBreak {
+		r.logger.Panicf("error scanning unapplied entries [%d, %d): %v", lo, hi, err)
+	}
+	return found
+}
+
+// campaign transitions the raft instance to candidate state. This must only be
+// called after verifying that this is a legitimate transition.
+func (r *raft) campaign(t CampaignType) {
+	if !r.promotable() {
+		// This path should not be hit (callers are supposed to check), but
+		// better safe than sorry.
+		r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id)
+	}
+	var term uint64
+	var voteMsg pb.MessageType
+	if t == campaignPreElection {
+		r.becomePreCandidate()
+		voteMsg = pb.MsgPreVote
+		// PreVote RPCs are sent for the next term before we've incremented r.Term.
+		term = r.Term + 1
+	} else {
+		r.becomeCandidate()
+		voteMsg = pb.MsgVote
+		term = r.Term
+	}
+	var ids []uint64
+	{
+		idMap := r.trk.Voters.IDs()
+		ids = make([]uint64, 0, len(idMap))
+		for id := range idMap {
+			ids = append(ids, id)
+		}
+		slices.Sort(ids)
+	}
+	for _, id := range ids {
+		if id == r.id {
+			// The candidate votes for itself and should account for this self
+			// vote once the vote has been durably persisted (since it doesn't
+			// send a MsgVote to itself). This response message will be added to
+			// msgsAfterAppend and delivered back to this node after the vote
+			// has been written to stable storage.
+			r.send(pb.Message{To: id, Term: term, Type: voteRespMsgType(voteMsg)})
+			continue
+		}
+		// TODO(pav-kv): it should be ok to simply print %+v for the lastEntryID.
+		last := r.raftLog.lastEntryID()
+		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
+			r.id, last.term, last.index, voteMsg, id, r.Term)
+
+		var ctx []byte
+		if t == campaignTransfer {
+			ctx = []byte(t)
+		}
+		r.send(pb.Message{To: id, Term: term, Type: voteMsg, Index: last.index, LogTerm: last.term, Context: ctx})
+	}
+}
+
+func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) {
+	if v {
+		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
+	} else {
+		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
+	}
+	r.trk.RecordVote(id, v)
+	return r.trk.TallyVotes()
+}
+
+func (r *raft) Step(m pb.Message) error {
+	traceReceiveMessage(r, &m)
+
+	// Handle the message term, which may result in our stepping down to a follower.
+	switch {
+	case m.Term == 0:
+		// local message
+	case m.Term > r.Term:
+		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+			force := bytes.Equal(m.Context, []byte(campaignTransfer))
+			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
+			if !force && inLease {
+				// If a server receives a RequestVote request within the minimum election timeout
+				// of hearing from a current leader, it does not update its term or grant its vote
+				last := r.raftLog.lastEntryID()
+				// TODO(pav-kv): it should be ok to simply print the %+v of the lastEntryID.
+				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
+					r.id, last.term, last.index, r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
+				return nil
+			}
+		}
+		switch {
+		case m.Type == pb.MsgPreVote:
+			// Never change our term in response to a PreVote
+		case m.Type == pb.MsgPreVoteResp && !m.Reject:
+			// We send pre-vote requests with a term in our future. If the
+			// pre-vote is granted, we will increment our term when we get a
+			// quorum. If it is not, the term comes from the node that
+			// rejected our vote so we should become a follower at the new
+			// term.
+		default:
+			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
+				r.becomeFollower(m.Term, m.From)
+			} else {
+				r.becomeFollower(m.Term, None)
+			}
+		}
+
+	case m.Term < r.Term:
+		if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
+			// We have received messages from a leader at a lower term. It is possible
+			// that these messages were simply delayed in the network, but this could
+			// also mean that this node has advanced its term number during a network
+			// partition, and it is now unable to either win an election or to rejoin
+			// the majority on the old term. If checkQuorum is false, this will be
+			// handled by incrementing term numbers in response to MsgVote with a
+			// higher term, but if checkQuorum is true we may not advance the term on
+			// MsgVote and must generate other messages to advance the term. The net
+			// result of these two features is to minimize the disruption caused by
+			// nodes that have been removed from the cluster's configuration: a
+			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
+			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
+			// disruptive term increases, by notifying leader of this node's activeness.
+			// The above comments also true for Pre-Vote
+			//
+			// When follower gets isolated, it soon starts an election ending
+			// up with a higher term than leader, although it won't receive enough
+			// votes to win the election. When it regains connectivity, this response
+			// with "pb.MsgAppResp" of higher term would force leader to step down.
+			// However, this disruption is inevitable to free this stuck node with
+			// fresh election. This can be prevented with Pre-Vote phase.
+			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
+		} else if m.Type == pb.MsgPreVote {
+			// Before Pre-Vote enable, there may have candidate with higher term,
+			// but less log. After update to Pre-Vote, the cluster may deadlock if
+			// we drop messages with a lower term.
+			last := r.raftLog.lastEntryID()
+			// TODO(pav-kv): it should be ok to simply print %+v of the lastEntryID.
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+				r.id, last.term, last.index, r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true})
+		} else if m.Type == pb.MsgStorageAppendResp {
+			if m.Index != 0 {
+				// Don't consider the appended log entries to be stable because
+				// they may have been overwritten in the unstable log during a
+				// later term. See the comment in newStorageAppendResp for more
+				// about this race.
+				r.logger.Infof("%x [term: %d] ignored entry appends from a %s message with lower term [term: %d]",
+					r.id, r.Term, m.Type, m.Term)
+			}
+			if m.Snapshot != nil {
+				// Even if the snapshot applied under a different term, its
+				// application is still valid. Snapshots carry committed
+				// (term-independent) state.
+				r.appliedSnap(m.Snapshot)
+			}
+		} else {
+			// ignore other cases
+			r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+		}
+		return nil
+	}
+
+	switch m.Type {
+	case pb.MsgHup:
+		if r.preVote {
+			r.hup(campaignPreElection)
+		} else {
+			r.hup(campaignElection)
+		}
+
+	case pb.MsgStorageAppendResp:
+		if m.Index != 0 {
+			r.raftLog.stableTo(entryID{term: m.LogTerm, index: m.Index})
+		}
+		if m.Snapshot != nil {
+			r.appliedSnap(m.Snapshot)
+		}
+
+	case pb.MsgStorageApplyResp:
+		if len(m.Entries) > 0 {
+			index := m.Entries[len(m.Entries)-1].Index
+			r.appliedTo(index, entsSize(m.Entries))
+			r.reduceUncommittedSize(payloadsSize(m.Entries))
+		}
+
+	case pb.MsgVote, pb.MsgPreVote:
+		// We can vote if this is a repeat of a vote we've already cast...
+		canVote := r.Vote == m.From ||
+			// ...we haven't voted and we don't think there's a leader yet in this term...
+			(r.Vote == None && r.lead == None) ||
+			// ...or this is a PreVote for a future term...
+			(m.Type == pb.MsgPreVote && m.Term > r.Term)
+		// ...and we believe the candidate is up to date.
+		lastID := r.raftLog.lastEntryID()
+		candLastID := entryID{term: m.LogTerm, index: m.Index}
+		if canVote && r.raftLog.isUpToDate(candLastID) {
+			// Note: it turns out that that learners must be allowed to cast votes.
+			// This seems counter- intuitive but is necessary in the situation in which
+			// a learner has been promoted (i.e. is now a voter) but has not learned
+			// about this yet.
+			// For example, consider a group in which id=1 is a learner and id=2 and
+			// id=3 are voters. A configuration change promoting 1 can be committed on
+			// the quorum `{2,3}` without the config change being appended to the
+			// learner's log. If the leader (say 2) fails, there are de facto two
+			// voters remaining. Only 3 can win an election (due to its log containing
+			// all committed entries), but to do so it will need 1 to vote. But 1
+			// considers itself a learner and will continue to do so until 3 has
+			// stepped up as leader, replicates the conf change to 1, and 1 applies it.
+			// Ultimately, by receiving a request to vote, the learner realizes that
+			// the candidate believes it to be a voter, and that it should act
+			// accordingly. The candidate's config may be stale, too; but in that case
+			// it won't win the election, at least in the absence of the bug discussed
+			// in:
+			// https://github.com/etcd-io/etcd/issues/7625#issuecomment-488798263.
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
+				r.id, lastID.term, lastID.index, r.Vote, m.Type, m.From, candLastID.term, candLastID.index, r.Term)
+			// When responding to Msg{Pre,}Vote messages we include the term
+			// from the message, not the local term. To see why, consider the
+			// case where a single node was previously partitioned away and
+			// it's local term is now out of date. If we include the local term
+			// (recall that for pre-votes we don't update the local term), the
+			// (pre-)campaigning node on the other end will proceed to ignore
+			// the message (it ignores all out of date messages).
+			// The term in the original message and current local term are the
+			// same in the case of regular votes, but different for pre-votes.
+			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
+			if m.Type == pb.MsgVote {
+				// Only record real votes.
+				r.electionElapsed = 0
+				r.Vote = m.From
+			}
+		} else {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+				r.id, lastID.term, lastID.index, r.Vote, m.Type, m.From, candLastID.term, candLastID.index, r.Term)
+			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
+		}
+
+	default:
+		err := r.step(r, m)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type stepFunc func(r *raft, m pb.Message) error
+
+func stepLeader(r *raft, m pb.Message) error {
+	// These message types do not require any progress for m.From.
+	switch m.Type {
+	case pb.MsgBeat:
+		r.bcastHeartbeat()
+		return nil
+	case pb.MsgCheckQuorum:
+		if !r.trk.QuorumActive() {
+			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+			r.becomeFollower(r.Term, None)
+		}
+		// Mark everyone (but ourselves) as inactive in preparation for the next
+		// CheckQuorum.
+		r.trk.Visit(func(id uint64, pr *tracker.Progress) {
+			if id != r.id {
+				pr.RecentActive = false
+			}
+		})
+		return nil
+	case pb.MsgProp:
+		if len(m.Entries) == 0 {
+			r.logger.Panicf("%x stepped empty MsgProp", r.id)
+		}
+		if r.trk.Progress[r.id] == nil {
+			// If we are not currently a member of the range (i.e. this node
+			// was removed from the configuration while serving as leader),
+			// drop any new proposals.
+			return ErrProposalDropped
+		}
+		if r.leadTransferee != None {
+			r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
+			return ErrProposalDropped
+		}
+
+		for i := range m.Entries {
+			e := &m.Entries[i]
+			var cc pb.ConfChangeI
+			if e.Type == pb.EntryConfChange {
+				var ccc pb.ConfChange
+				if err := ccc.Unmarshal(e.Data); err != nil {
+					panic(err)
+				}
+				cc = ccc
+			} else if e.Type == pb.EntryConfChangeV2 {
+				var ccc pb.ConfChangeV2
+				if err := ccc.Unmarshal(e.Data); err != nil {
+					panic(err)
+				}
+				cc = ccc
+			}
+			if cc != nil {
+				alreadyPending := r.pendingConfIndex > r.raftLog.applied
+				alreadyJoint := len(r.trk.Config.Voters[1]) > 0
+				wantsLeaveJoint := len(cc.AsV2().Changes) == 0
+
+				var failedCheck string
+				if alreadyPending {
+					failedCheck = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied)
+				} else if alreadyJoint && !wantsLeaveJoint {
+					failedCheck = "must transition out of joint config first"
+				} else if !alreadyJoint && wantsLeaveJoint {
+					failedCheck = "not in joint state; refusing empty conf change"
+				}
+
+				if failedCheck != "" && !r.disableConfChangeValidation {
+					r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.trk.Config, failedCheck)
+					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+				} else {
+					r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1
+					traceChangeConfEvent(cc, r)
+				}
+			}
+		}
+
+		if !r.appendEntry(m.Entries...) {
+			return ErrProposalDropped
+		}
+		r.bcastAppend()
+		return nil
+	case pb.MsgReadIndex:
+		// only one voting member (the leader) in the cluster
+		if r.trk.IsSingleton() {
+			if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None {
+				r.send(resp)
+			}
+			return nil
+		}
+
+		// Postpone read only request when this leader has not committed
+		// any log entry at its term.
+		if !r.committedEntryInCurrentTerm() {
+			r.pendingReadIndexMessages = append(r.pendingReadIndexMessages, m)
+			return nil
+		}
+
+		sendMsgReadIndexResponse(r, m)
+
+		return nil
+	case pb.MsgForgetLeader:
+		return nil // noop on leader
+	}
+
+	// All other message types require a progress for m.From (pr).
+	pr := r.trk.Progress[m.From]
+	if pr == nil {
+		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
+		return nil
+	}
+	switch m.Type {
+	case pb.MsgAppResp:
+		// NB: this code path is also hit from (*raft).advance, where the leader steps
+		// an MsgAppResp to acknowledge the appended entries in the last Ready.
+
+		pr.RecentActive = true
+
+		if m.Reject {
+			// RejectHint is the suggested next base entry for appending (i.e.
+			// we try to append entry RejectHint+1 next), and LogTerm is the
+			// term that the follower has at index RejectHint. Older versions
+			// of this library did not populate LogTerm for rejections and it
+			// is zero for followers with an empty log.
+			//
+			// Under normal circumstances, the leader's log is longer than the
+			// follower's and the follower's log is a prefix of the leader's
+			// (i.e. there is no divergent uncommitted suffix of the log on the
+			// follower). In that case, the first probe reveals where the
+			// follower's log ends (RejectHint=follower's last index) and the
+			// subsequent probe succeeds.
+			//
+			// However, when networks are partitioned or systems overloaded,
+			// large divergent log tails can occur. The naive attempt, probing
+			// entry by entry in decreasing order, will be the product of the
+			// length of the diverging tails and the network round-trip latency,
+			// which can easily result in hours of time spent probing and can
+			// even cause outright outages. The probes are thus optimized as
+			// described below.
+			r.logger.Debugf("%x received MsgAppResp(rejected, hint: (index %d, term %d)) from %x for index %d",
+				r.id, m.RejectHint, m.LogTerm, m.From, m.Index)
+			nextProbeIdx := m.RejectHint
+			if m.LogTerm > 0 {
+				// If the follower has an uncommitted log tail, we would end up
+				// probing one by one until we hit the common prefix.
+				//
+				// For example, if the leader has:
+				//
+				//   idx        1 2 3 4 5 6 7 8 9
+				//              -----------------
+				//   term (L)   1 3 3 3 5 5 5 5 5
+				//   term (F)   1 1 1 1 2 2
+				//
+				// Then, after sending an append anchored at (idx=9,term=5) we
+				// would receive a RejectHint of 6 and LogTerm of 2. Without the
+				// code below, we would try an append at index 6, which would
+				// fail again.
+				//
+				// However, looking only at what the leader knows about its own
+				// log and the rejection hint, it is clear that a probe at index
+				// 6, 5, 4, 3, and 2 must fail as well:
+				//
+				// For all of these indexes, the leader's log term is larger than
+				// the rejection's log term. If a probe at one of these indexes
+				// succeeded, its log term at that index would match the leader's,
+				// i.e. 3 or 5 in this example. But the follower already told the
+				// leader that it is still at term 2 at index 6, and since the
+				// log term only ever goes up (within a log), this is a contradiction.
+				//
+				// At index 1, however, the leader can draw no such conclusion,
+				// as its term 1 is not larger than the term 2 from the
+				// follower's rejection. We thus probe at 1, which will succeed
+				// in this example. In general, with this approach we probe at
+				// most once per term found in the leader's log.
+				//
+				// There is a similar mechanism on the follower (implemented in
+				// handleAppendEntries via a call to findConflictByTerm) that is
+				// useful if the follower has a large divergent uncommitted log
+				// tail[1], as in this example:
+				//
+				//   idx        1 2 3 4 5 6 7 8 9
+				//              -----------------
+				//   term (L)   1 3 3 3 3 3 3 3 7
+				//   term (F)   1 3 3 4 4 5 5 5 6
+				//
+				// Naively, the leader would probe at idx=9, receive a rejection
+				// revealing the log term of 6 at the follower. Since the leader's
+				// term at the previous index is already smaller than 6, the leader-
+				// side optimization discussed above is ineffective. The leader thus
+				// probes at index 8 and, naively, receives a rejection for the same
+				// index and log term 5. Again, the leader optimization does not improve
+				// over linear probing as term 5 is above the leader's term 3 for that
+				// and many preceding indexes; the leader would have to probe linearly
+				// until it would finally hit index 3, where the probe would succeed.
+				//
+				// Instead, we apply a similar optimization on the follower. When the
+				// follower receives the probe at index 8 (log term 3), it concludes
+				// that all of the leader's log preceding that index has log terms of
+				// 3 or below. The largest index in the follower's log with a log term
+				// of 3 or below is index 3. The follower will thus return a rejection
+				// for index=3, log term=3 instead. The leader's next probe will then
+				// succeed at that index.
+				//
+				// [1]: more precisely, if the log terms in the large uncommitted
+				// tail on the follower are larger than the leader's. At first,
+				// it may seem unintuitive that a follower could even have such
+				// a large tail, but it can happen:
+				//
+				// 1. Leader appends (but does not commit) entries 2 and 3, crashes.
+				//   idx        1 2 3 4 5 6 7 8 9
+				//              -----------------
+				//   term (L)   1 2 2     [crashes]
+				//   term (F)   1
+				//   term (F)   1
+				//
+				// 2. a follower becomes leader and appends entries at term 3.
+				//              -----------------
+				//   term (x)   1 2 2     [down]
+				//   term (F)   1 3 3 3 3
+				//   term (F)   1
+				//
+				// 3. term 3 leader goes down, term 2 leader returns as term 4
+				//    leader. It commits the log & entries at term 4.
+				//
+				//              -----------------
+				//   term (L)   1 2 2 2
+				//   term (x)   1 3 3 3 3 [down]
+				//   term (F)   1
+				//              -----------------
+				//   term (L)   1 2 2 2 4 4 4
+				//   term (F)   1 3 3 3 3 [gets probed]
+				//   term (F)   1 2 2 2 4 4 4
+				//
+				// 4. the leader will now probe the returning follower at index
+				//    7, the rejection points it at the end of the follower's log
+				//    which is at a higher log term than the actually committed
+				//    log.
+				nextProbeIdx, _ = r.raftLog.findConflictByTerm(m.RejectHint, m.LogTerm)
+			}
+			if pr.MaybeDecrTo(m.Index, nextProbeIdx) {
+				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+				if pr.State == tracker.StateReplicate {
+					pr.BecomeProbe()
+				}
+				r.sendAppend(m.From)
+			}
+		} else {
+			// We want to update our tracking if the response updates our
+			// matched index or if the response can move a probing peer back
+			// into StateReplicate (see heartbeat_rep_recovers_from_probing.txt
+			// for an example of the latter case).
+			// NB: the same does not make sense for StateSnapshot - if `m.Index`
+			// equals pr.Match we know we don't m.Index+1 in our log, so moving
+			// back to replicating state is not useful; besides pr.PendingSnapshot
+			// would prevent it.
+			if pr.MaybeUpdate(m.Index) || (pr.Match == m.Index && pr.State == tracker.StateProbe) {
+				switch {
+				case pr.State == tracker.StateProbe:
+					pr.BecomeReplicate()
+				case pr.State == tracker.StateSnapshot && pr.Match+1 >= r.raftLog.firstIndex():
+					// Note that we don't take into account PendingSnapshot to
+					// enter this branch. No matter at which index a snapshot
+					// was actually applied, as long as this allows catching up
+					// the follower from the log, we will accept it. This gives
+					// systems more flexibility in how they implement snapshots;
+					// see the comments on PendingSnapshot.
+					r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+					// Transition back to replicating state via probing state
+					// (which takes the snapshot into account). If we didn't
+					// move to replicating state, that would only happen with
+					// the next round of appends (but there may not be a next
+					// round for a while, exposing an inconsistent RaftStatus).
+					pr.BecomeProbe()
+					pr.BecomeReplicate()
+				case pr.State == tracker.StateReplicate:
+					pr.Inflights.FreeLE(m.Index)
+				}
+
+				if r.maybeCommit() {
+					// committed index has progressed for the term, so it is safe
+					// to respond to pending read index requests
+					releasePendingReadIndexMessages(r)
+					r.bcastAppend()
+				} else if r.id != m.From && pr.CanBumpCommit(r.raftLog.committed) {
+					// This node may be missing the latest commit index, so send it.
+					// NB: this is not strictly necessary because the periodic heartbeat
+					// messages deliver commit indices too. However, a message sent now
+					// may arrive earlier than the next heartbeat fires.
+					r.sendAppend(m.From)
+				}
+				// We've updated flow control information above, which may
+				// allow us to send multiple (size-limited) in-flight messages
+				// at once (such as when transitioning from probe to
+				// replicate, or when freeTo() covers multiple messages). If
+				// we have more entries to send, send as many messages as we
+				// can (without sending empty messages for the commit index)
+				if r.id != m.From {
+					for r.maybeSendAppend(m.From, false /* sendIfEmpty */) {
+					}
+				}
+				// Transfer leadership is in progress.
+				if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
+					r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
+					r.sendTimeoutNow(m.From)
+				}
+			}
+		}
+	case pb.MsgHeartbeatResp:
+		pr.RecentActive = true
+		pr.MsgAppFlowPaused = false
+
+		// NB: if the follower is paused (full Inflights), this will still send an
+		// empty append, allowing it to recover from situations in which all the
+		// messages that filled up Inflights in the first place were dropped. Note
+		// also that the outgoing heartbeat already communicated the commit index.
+		//
+		// If the follower is fully caught up but also in StateProbe (as can happen
+		// if ReportUnreachable was called), we also want to send an append (it will
+		// be empty) to allow the follower to transition back to StateReplicate once
+		// it responds.
+		//
+		// Note that StateSnapshot typically satisfies pr.Match < lastIndex, but
+		// `pr.Paused()` is always true for StateSnapshot, so sendAppend is a
+		// no-op.
+		if pr.Match < r.raftLog.lastIndex() || pr.State == tracker.StateProbe {
+			r.sendAppend(m.From)
+		}
+
+		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
+			return nil
+		}
+
+		if r.trk.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon {
+			return nil
+		}
+
+		rss := r.readOnly.advance(m)
+		for _, rs := range rss {
+			if resp := r.responseToReadIndexReq(rs.req, rs.index); resp.To != None {
+				r.send(resp)
+			}
+		}
+	case pb.MsgSnapStatus:
+		if pr.State != tracker.StateSnapshot {
+			return nil
+		}
+		if !m.Reject {
+			pr.BecomeProbe()
+			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		} else {
+			// NB: the order here matters or we'll be probing erroneously from
+			// the snapshot index, but the snapshot never applied.
+			pr.PendingSnapshot = 0
+			pr.BecomeProbe()
+			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		}
+		// If snapshot finish, wait for the MsgAppResp from the remote node before sending
+		// out the next MsgApp.
+		// If snapshot failure, wait for a heartbeat interval before next try
+		pr.MsgAppFlowPaused = true
+	case pb.MsgUnreachable:
+		// During optimistic replication, if the remote becomes unreachable,
+		// there is huge probability that a MsgApp is lost.
+		if pr.State == tracker.StateReplicate {
+			pr.BecomeProbe()
+		}
+		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+	case pb.MsgTransferLeader:
+		if pr.IsLearner {
+			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
+			return nil
+		}
+		leadTransferee := m.From
+		lastLeadTransferee := r.leadTransferee
+		if lastLeadTransferee != None {
+			if lastLeadTransferee == leadTransferee {
+				r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
+					r.id, r.Term, leadTransferee, leadTransferee)
+				return nil
+			}
+			r.abortLeaderTransfer()
+			r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
+		}
+		if leadTransferee == r.id {
+			r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
+			return nil
+		}
+		// Transfer leadership to third party.
+		r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
+		// Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
+		r.electionElapsed = 0
+		r.leadTransferee = leadTransferee
+		if pr.Match == r.raftLog.lastIndex() {
+			r.sendTimeoutNow(leadTransferee)
+			r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
+		} else {
+			r.sendAppend(leadTransferee)
+		}
+	}
+	return nil
+}
+
+// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
+// whether they respond to MsgVoteResp or MsgPreVoteResp.
+func stepCandidate(r *raft, m pb.Message) error {
+	// Only handle vote responses corresponding to our candidacy (while in
+	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
+	// our pre-candidate state).
+	var myVoteRespType pb.MessageType
+	if r.state == StatePreCandidate {
+		myVoteRespType = pb.MsgPreVoteResp
+	} else {
+		myVoteRespType = pb.MsgVoteResp
+	}
+	switch m.Type {
+	case pb.MsgProp:
+		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+		return ErrProposalDropped
+	case pb.MsgApp:
+		r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+		r.handleSnapshot(m)
+	case myVoteRespType:
+		gr, rj, res := r.poll(m.From, m.Type, !m.Reject)
+		r.logger.Infof("%x has received %d %s votes and %d vote rejections", r.id, gr, m.Type, rj)
+		switch res {
+		case quorum.VoteWon:
+			if r.state == StatePreCandidate {
+				r.campaign(campaignElection)
+			} else {
+				r.becomeLeader()
+				r.bcastAppend()
+			}
+		case quorum.VoteLost:
+			// pb.MsgPreVoteResp contains future term of pre-candidate
+			// m.Term > r.Term; reuse r.Term
+			r.becomeFollower(r.Term, None)
+		}
+	case pb.MsgTimeoutNow:
+		r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
+	}
+	return nil
+}
+
+func stepFollower(r *raft, m pb.Message) error {
+	switch m.Type {
+	case pb.MsgProp:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+			return ErrProposalDropped
+		} else if r.disableProposalForwarding {
+			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
+			return ErrProposalDropped
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgApp:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleSnapshot(m)
+	case pb.MsgTransferLeader:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
+			return nil
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgForgetLeader:
+		if r.readOnly.option == ReadOnlyLeaseBased {
+			r.logger.Error("ignoring MsgForgetLeader due to ReadOnlyLeaseBased")
+			return nil
+		}
+		if r.lead != None {
+			r.logger.Infof("%x forgetting leader %x at term %d", r.id, r.lead, r.Term)
+			r.lead = None
+		}
+	case pb.MsgTimeoutNow:
+		r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
+		// Leadership transfers never use pre-vote even if r.preVote is true; we
+		// know we are not recovering from a partition so there is no need for the
+		// extra round trip.
+		r.hup(campaignTransfer)
+	case pb.MsgReadIndex:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
+			return nil
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgReadIndexResp:
+		if len(m.Entries) != 1 {
+			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
+			return nil
+		}
+		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
+	}
+	return nil
+}
+
+// logSliceFromMsgApp extracts the appended logSlice from a MsgApp message.
+func logSliceFromMsgApp(m *pb.Message) logSlice {
+	// TODO(pav-kv): consider also validating the logSlice here.
+	return logSlice{
+		term:    m.Term,
+		prev:    entryID{term: m.LogTerm, index: m.Index},
+		entries: m.Entries,
+	}
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+	// TODO(pav-kv): construct logSlice up the stack next to receiving the
+	// message, and validate it before taking any action (e.g. bumping term).
+	a := logSliceFromMsgApp(&m)
+
+	if a.prev.index < r.raftLog.committed {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+		return
+	}
+	if mlastIndex, ok := r.raftLog.maybeAppend(a, m.Commit); ok {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+		return
+	}
+	r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x",
+		r.id, r.raftLog.zeroTermOnOutOfBounds(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+
+	// Our log does not match the leader's at index m.Index. Return a hint to the
+	// leader - a guess on the maximal (index, term) at which the logs match. Do
+	// this by searching through the follower's log for the maximum (index, term)
+	// pair with a term <= the MsgApp's LogTerm and an index <= the MsgApp's
+	// Index. This can help skip all indexes in the follower's uncommitted tail
+	// with terms greater than the MsgApp's LogTerm.
+	//
+	// See the other caller for findConflictByTerm (in stepLeader) for a much more
+	// detailed explanation of this mechanism.
+
+	// NB: m.Index >= raftLog.committed by now (see the early return above), and
+	// raftLog.lastIndex() >= raftLog.committed by invariant, so min of the two is
+	// also >= raftLog.committed. Hence, the findConflictByTerm argument is within
+	// the valid interval, which then will return a valid (index, term) pair with
+	// a non-zero term (unless the log is empty). However, it is safe to send a zero
+	// LogTerm in this response in any case, so we don't verify it here.
+	hintIndex := min(m.Index, r.raftLog.lastIndex())
+	hintIndex, hintTerm := r.raftLog.findConflictByTerm(hintIndex, m.LogTerm)
+	r.send(pb.Message{
+		To:         m.From,
+		Type:       pb.MsgAppResp,
+		Index:      m.Index,
+		Reject:     true,
+		RejectHint: hintIndex,
+		LogTerm:    hintTerm,
+	})
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+	r.raftLog.commitTo(m.Commit)
+	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+	// MsgSnap messages should always carry a non-nil Snapshot, but err on the
+	// side of safety and treat a nil Snapshot as a zero-valued Snapshot.
+	var s pb.Snapshot
+	if m.Snapshot != nil {
+		s = *m.Snapshot
+	}
+	sindex, sterm := s.Metadata.Index, s.Metadata.Term
+	if r.restore(s) {
+		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+	} else {
+		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+	}
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine. If this method returns false, the snapshot was
+// ignored, either because it was obsolete or because of an error.
+func (r *raft) restore(s pb.Snapshot) bool {
+	if s.Metadata.Index <= r.raftLog.committed {
+		return false
+	}
+	if r.state != StateFollower {
+		// This is defense-in-depth: if the leader somehow ended up applying a
+		// snapshot, it could move into a new term without moving into a
+		// follower state. This should never fire, but if it did, we'd have
+		// prevented damage by returning early, so log only a loud warning.
+		//
+		// At the time of writing, the instance is guaranteed to be in follower
+		// state when this method is called.
+		r.logger.Warningf("%x attempted to restore snapshot as leader; should never happen", r.id)
+		r.becomeFollower(r.Term+1, None)
+		return false
+	}
+
+	// More defense-in-depth: throw away snapshot if recipient is not in the
+	// config. This shouldn't ever happen (at the time of writing) but lots of
+	// code here and there assumes that r.id is in the progress tracker.
+	found := false
+	cs := s.Metadata.ConfState
+
+	for _, set := range [][]uint64{
+		cs.Voters,
+		cs.Learners,
+		cs.VotersOutgoing,
+		// `LearnersNext` doesn't need to be checked. According to the rules, if a peer in
+		// `LearnersNext`, it has to be in `VotersOutgoing`.
+	} {
+		for _, id := range set {
+			if id == r.id {
+				found = true
+				break
+			}
+		}
+		if found {
+			break
+		}
+	}
+	if !found {
+		r.logger.Warningf(
+			"%x attempted to restore snapshot but it is not in the ConfState %v; should never happen",
+			r.id, cs,
+		)
+		return false
+	}
+
+	// Now go ahead and actually restore.
+
+	id := entryID{term: s.Metadata.Term, index: s.Metadata.Index}
+	if r.raftLog.matchTerm(id) {
+		// TODO(pav-kv): can print %+v of the id, but it will change the format.
+		last := r.raftLog.lastEntryID()
+		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, last.index, last.term, id.index, id.term)
+		r.raftLog.commitTo(s.Metadata.Index)
+		return false
+	}
+
+	r.raftLog.restore(s)
+
+	// Reset the configuration and add the (potentially updated) peers in anew.
+	r.trk = tracker.MakeProgressTracker(r.trk.MaxInflight, r.trk.MaxInflightBytes)
+	cfg, trk, err := confchange.Restore(confchange.Changer{
+		Tracker:   r.trk,
+		LastIndex: r.raftLog.lastIndex(),
+	}, cs)
+
+	if err != nil {
+		// This should never happen. Either there's a bug in our config change
+		// handling or the client corrupted the conf change.
+		panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err))
+	}
+
+	assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, trk))
+
+	last := r.raftLog.lastEntryID()
+	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]",
+		r.id, r.raftLog.committed, last.index, last.term, id.index, id.term)
+	return true
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+	pr := r.trk.Progress[r.id]
+	return pr != nil && !pr.IsLearner && !r.raftLog.hasNextOrInProgressSnapshot()
+}
+
+func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState {
+	cfg, trk, err := func() (tracker.Config, tracker.ProgressMap, error) {
+		changer := confchange.Changer{
+			Tracker:   r.trk,
+			LastIndex: r.raftLog.lastIndex(),
+		}
+		if cc.LeaveJoint() {
+			return changer.LeaveJoint()
+		} else if autoLeave, ok := cc.EnterJoint(); ok {
+			return changer.EnterJoint(autoLeave, cc.Changes...)
+		}
+		return changer.Simple(cc.Changes...)
+	}()
+
+	if err != nil {
+		// TODO(tbg): return the error to the caller.
+		panic(err)
+	}
+
+	return r.switchToConfig(cfg, trk)
+}
+
+// switchToConfig reconfigures this node to use the provided configuration. It
+// updates the in-memory state and, when necessary, carries out additional
+// actions such as reacting to the removal of nodes or changed quorum
+// requirements.
+//
+// The inputs usually result from restoring a ConfState or applying a ConfChange.
+func (r *raft) switchToConfig(cfg tracker.Config, trk tracker.ProgressMap) pb.ConfState {
+	traceConfChangeEvent(cfg, r)
+
+	r.trk.Config = cfg
+	r.trk.Progress = trk
+
+	r.logger.Infof("%x switched to configuration %s", r.id, r.trk.Config)
+	cs := r.trk.ConfState()
+	pr, ok := r.trk.Progress[r.id]
+
+	// Update whether the node itself is a learner, resetting to false when the
+	// node is removed.
+	r.isLearner = ok && pr.IsLearner
+
+	if (!ok || r.isLearner) && r.state == StateLeader {
+		// This node is leader and was removed or demoted, step down if requested.
+		//
+		// We prevent demotions at the time writing but hypothetically we handle
+		// them the same way as removing the leader.
+		//
+		// TODO(tbg): ask follower with largest Match to TimeoutNow (to avoid
+		// interruption). This might still drop some proposals but it's better than
+		// nothing.
+		if r.stepDownOnRemoval {
+			r.becomeFollower(r.Term, None)
+		}
+		return cs
+	}
+
+	// The remaining steps only make sense if this node is the leader and there
+	// are other nodes.
+	if r.state != StateLeader || len(cs.Voters) == 0 {
+		return cs
+	}
+
+	if r.maybeCommit() {
+		// If the configuration change means that more entries are committed now,
+		// broadcast/append to everyone in the updated config.
+		r.bcastAppend()
+	} else {
+		// Otherwise, still probe the newly added replicas; there's no reason to
+		// let them wait out a heartbeat interval (or the next incoming
+		// proposal).
+		r.trk.Visit(func(id uint64, _ *tracker.Progress) {
+			if id == r.id {
+				return
+			}
+			r.maybeSendAppend(id, false /* sendIfEmpty */)
+		})
+	}
+	// If the leadTransferee was removed or demoted, abort the leadership transfer.
+	if _, tOK := r.trk.Config.Voters.IDs()[r.leadTransferee]; !tOK && r.leadTransferee != 0 {
+		r.abortLeaderTransfer()
+	}
+
+	return cs
+}
+
+func (r *raft) loadState(state pb.HardState) {
+	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+	}
+	r.raftLog.committed = state.Commit
+	r.Term = state.Term
+	r.Vote = state.Vote
+}
+
+// pastElectionTimeout returns true if r.electionElapsed is greater
+// than or equal to the randomized election timeout in
+// [electiontimeout, 2 * electiontimeout - 1].
+func (r *raft) pastElectionTimeout() bool {
+	return r.electionElapsed >= r.randomizedElectionTimeout
+}
+
+func (r *raft) resetRandomizedElectionTimeout() {
+	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
+}
+
+func (r *raft) sendTimeoutNow(to uint64) {
+	r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
+}
+
+func (r *raft) abortLeaderTransfer() {
+	r.leadTransferee = None
+}
+
+// committedEntryInCurrentTerm return true if the peer has committed an entry in its term.
+func (r *raft) committedEntryInCurrentTerm() bool {
+	// NB: r.Term is never 0 on a leader, so if zeroTermOnOutOfBounds returns 0,
+	// we won't see it as a match with r.Term.
+	return r.raftLog.zeroTermOnOutOfBounds(r.raftLog.term(r.raftLog.committed)) == r.Term
+}
+
+// responseToReadIndexReq constructs a response for `req`. If `req` comes from the peer
+// itself, a blank value will be returned.
+func (r *raft) responseToReadIndexReq(req pb.Message, readIndex uint64) pb.Message {
+	if req.From == None || req.From == r.id {
+		r.readStates = append(r.readStates, ReadState{
+			Index:      readIndex,
+			RequestCtx: req.Entries[0].Data,
+		})
+		return pb.Message{}
+	}
+	return pb.Message{
+		Type:    pb.MsgReadIndexResp,
+		To:      req.From,
+		Index:   readIndex,
+		Entries: req.Entries,
+	}
+}
+
+// increaseUncommittedSize computes the size of the proposed entries and
+// determines whether they would push leader over its maxUncommittedSize limit.
+// If the new entries would exceed the limit, the method returns false. If not,
+// the increase in uncommitted entry size is recorded and the method returns
+// true.
+//
+// Empty payloads are never refused. This is used both for appending an empty
+// entry at a new leader's term, as well as leaving a joint configuration.
+func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool {
+	s := payloadsSize(ents)
+	if r.uncommittedSize > 0 && s > 0 && r.uncommittedSize+s > r.maxUncommittedSize {
+		// If the uncommitted tail of the Raft log is empty, allow any size
+		// proposal. Otherwise, limit the size of the uncommitted tail of the
+		// log and drop any proposal that would push the size over the limit.
+		// Note the added requirement s>0 which is used to make sure that
+		// appending single empty entries to the log always succeeds, used both
+		// for replicating a new leader's initial empty entry, and for
+		// auto-leaving joint configurations.
+		return false
+	}
+	r.uncommittedSize += s
+	return true
+}
+
+// reduceUncommittedSize accounts for the newly committed entries by decreasing
+// the uncommitted entry size limit.
+func (r *raft) reduceUncommittedSize(s entryPayloadSize) {
+	if s > r.uncommittedSize {
+		// uncommittedSize may underestimate the size of the uncommitted Raft
+		// log tail but will never overestimate it. Saturate at 0 instead of
+		// allowing overflow.
+		r.uncommittedSize = 0
+	} else {
+		r.uncommittedSize -= s
+	}
+}
+
+func releasePendingReadIndexMessages(r *raft) {
+	if len(r.pendingReadIndexMessages) == 0 {
+		// Fast path for the common case to avoid a call to storage.LastIndex()
+		// via committedEntryInCurrentTerm.
+		return
+	}
+	if !r.committedEntryInCurrentTerm() {
+		r.logger.Error("pending MsgReadIndex should be released only after first commit in current term")
+		return
+	}
+
+	msgs := r.pendingReadIndexMessages
+	r.pendingReadIndexMessages = nil
+
+	for _, m := range msgs {
+		sendMsgReadIndexResponse(r, m)
+	}
+}
+
+func sendMsgReadIndexResponse(r *raft, m pb.Message) {
+	// thinking: use an internally defined context instead of the user given context.
+	// We can express this in terms of the term and index instead of a user-supplied value.
+	// This would allow multiple reads to piggyback on the same message.
+	switch r.readOnly.option {
+	// If more than the local vote is needed, go through a full broadcast.
+	case ReadOnlySafe:
+		r.readOnly.addRequest(r.raftLog.committed, m)
+		// The local node automatically acks the request.
+		r.readOnly.recvAck(r.id, m.Entries[0].Data)
+		r.bcastHeartbeatWithCtx(m.Entries[0].Data)
+	case ReadOnlyLeaseBased:
+		if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None {
+			r.send(resp)
+		}
+	}
+}
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/confchange.go b/vendor/go.etcd.io/raft/v3/raftpb/confchange.go
new file mode 100644
index 0000000..a3ddff6
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/confchange.go
@@ -0,0 +1,176 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftpb
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/gogo/protobuf/proto"
+)
+
+// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow
+// treating them in a unified manner.
+type ConfChangeI interface {
+	AsV2() ConfChangeV2
+	AsV1() (ConfChange, bool)
+}
+
+// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2
+// and returns the result along with the corresponding EntryType.
+func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) {
+	var typ EntryType
+	var ccdata []byte
+	var err error
+	if c == nil {
+		// A nil data unmarshals into an empty ConfChangeV2 and has the benefit
+		// that appendEntry can never refuse it based on its size (which
+		// registers as zero).
+		typ = EntryConfChangeV2
+		ccdata = nil
+	} else if ccv1, ok := c.AsV1(); ok {
+		typ = EntryConfChange
+		ccdata, err = ccv1.Marshal()
+	} else {
+		ccv2 := c.AsV2()
+		typ = EntryConfChangeV2
+		ccdata, err = ccv2.Marshal()
+	}
+	return typ, ccdata, err
+}
+
+// AsV2 returns a V2 configuration change carrying out the same operation.
+func (c ConfChange) AsV2() ConfChangeV2 {
+	return ConfChangeV2{
+		Changes: []ConfChangeSingle{{
+			Type:   c.Type,
+			NodeID: c.NodeID,
+		}},
+		Context: c.Context,
+	}
+}
+
+// AsV1 returns the ConfChange and true.
+func (c ConfChange) AsV1() (ConfChange, bool) {
+	return c, true
+}
+
+// AsV2 is the identity.
+func (c ConfChangeV2) AsV2() ConfChangeV2 { return c }
+
+// AsV1 returns ConfChange{} and false.
+func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false }
+
+// EnterJoint returns two bools. The second bool is true if and only if this
+// config change will use Joint Consensus, which is the case if it contains more
+// than one change or if the use of Joint Consensus was requested explicitly.
+// The first bool can only be true if second one is, and indicates whether the
+// Joint State will be left automatically.
+func (c ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) {
+	// NB: in theory, more config changes could qualify for the "simple"
+	// protocol but it depends on the config on top of which the changes apply.
+	// For example, adding two learners is not OK if both nodes are part of the
+	// base config (i.e. two voters are turned into learners in the process of
+	// applying the conf change). In practice, these distinctions should not
+	// matter, so we keep it simple and use Joint Consensus liberally.
+	if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 {
+		// Use Joint Consensus.
+		var autoLeave bool
+		switch c.Transition {
+		case ConfChangeTransitionAuto:
+			autoLeave = true
+		case ConfChangeTransitionJointImplicit:
+			autoLeave = true
+		case ConfChangeTransitionJointExplicit:
+		default:
+			panic(fmt.Sprintf("unknown transition: %+v", c))
+		}
+		return autoLeave, true
+	}
+	return false, false
+}
+
+// LeaveJoint is true if the configuration change leaves a joint configuration.
+// This is the case if the ConfChangeV2 is zero, with the possible exception of
+// the Context field.
+func (c ConfChangeV2) LeaveJoint() bool {
+	// NB: c is already a copy.
+	c.Context = nil
+	return proto.Equal(&c, &ConfChangeV2{})
+}
+
+// ConfChangesFromString parses a Space-delimited sequence of operations into a
+// slice of ConfChangeSingle. The supported operations are:
+// - vn: make n a voter,
+// - ln: make n a learner,
+// - rn: remove n, and
+// - un: update n.
+func ConfChangesFromString(s string) ([]ConfChangeSingle, error) {
+	var ccs []ConfChangeSingle
+	toks := strings.Split(strings.TrimSpace(s), " ")
+	if toks[0] == "" {
+		toks = nil
+	}
+	for _, tok := range toks {
+		if len(tok) < 2 {
+			return nil, fmt.Errorf("unknown token %s", tok)
+		}
+		var cc ConfChangeSingle
+		switch tok[0] {
+		case 'v':
+			cc.Type = ConfChangeAddNode
+		case 'l':
+			cc.Type = ConfChangeAddLearnerNode
+		case 'r':
+			cc.Type = ConfChangeRemoveNode
+		case 'u':
+			cc.Type = ConfChangeUpdateNode
+		default:
+			return nil, fmt.Errorf("unknown input: %s", tok)
+		}
+		id, err := strconv.ParseUint(tok[1:], 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		cc.NodeID = id
+		ccs = append(ccs, cc)
+	}
+	return ccs, nil
+}
+
+// ConfChangesToString is the inverse to ConfChangesFromString.
+func ConfChangesToString(ccs []ConfChangeSingle) string {
+	var buf strings.Builder
+	for i, cc := range ccs {
+		if i > 0 {
+			buf.WriteByte(' ')
+		}
+		switch cc.Type {
+		case ConfChangeAddNode:
+			buf.WriteByte('v')
+		case ConfChangeAddLearnerNode:
+			buf.WriteByte('l')
+		case ConfChangeRemoveNode:
+			buf.WriteByte('r')
+		case ConfChangeUpdateNode:
+			buf.WriteByte('u')
+		default:
+			buf.WriteString("unknown")
+		}
+		fmt.Fprintf(&buf, "%d", cc.NodeID)
+	}
+	return buf.String()
+}
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/confstate.go b/vendor/go.etcd.io/raft/v3/raftpb/confstate.go
new file mode 100644
index 0000000..3aedeb7
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/confstate.go
@@ -0,0 +1,44 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftpb
+
+import (
+	"fmt"
+	"reflect"
+	"slices"
+)
+
+// Equivalent returns a nil error if the inputs describe the same configuration.
+// On mismatch, returns a descriptive error showing the differences.
+func (cs ConfState) Equivalent(cs2 ConfState) error {
+	cs1 := cs
+	orig1, orig2 := cs1, cs2
+	s := func(sl *[]uint64) {
+		*sl = append([]uint64(nil), *sl...)
+		slices.Sort(*sl)
+	}
+
+	for _, cs := range []*ConfState{&cs1, &cs2} {
+		s(&cs.Voters)
+		s(&cs.Learners)
+		s(&cs.VotersOutgoing)
+		s(&cs.LearnersNext)
+	}
+
+	if !reflect.DeepEqual(cs1, cs2) {
+		return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2)
+	}
+	return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go b/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go
new file mode 100644
index 0000000..7dcdef0
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go
@@ -0,0 +1,3110 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft.proto
+
+package raftpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type EntryType int32
+
+const (
+	EntryNormal       EntryType = 0
+	EntryConfChange   EntryType = 1
+	EntryConfChangeV2 EntryType = 2
+)
+
+var EntryType_name = map[int32]string{
+	0: "EntryNormal",
+	1: "EntryConfChange",
+	2: "EntryConfChangeV2",
+}
+
+var EntryType_value = map[string]int32{
+	"EntryNormal":       0,
+	"EntryConfChange":   1,
+	"EntryConfChangeV2": 2,
+}
+
+func (x EntryType) Enum() *EntryType {
+	p := new(EntryType)
+	*p = x
+	return p
+}
+
+func (x EntryType) String() string {
+	return proto.EnumName(EntryType_name, int32(x))
+}
+
+func (x *EntryType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
+	if err != nil {
+		return err
+	}
+	*x = EntryType(value)
+	return nil
+}
+
+func (EntryType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{0}
+}
+
+// For description of different message types, see:
+// https://pkg.go.dev/go.etcd.io/raft/v3#hdr-MessageType
+type MessageType int32
+
+const (
+	MsgHup               MessageType = 0
+	MsgBeat              MessageType = 1
+	MsgProp              MessageType = 2
+	MsgApp               MessageType = 3
+	MsgAppResp           MessageType = 4
+	MsgVote              MessageType = 5
+	MsgVoteResp          MessageType = 6
+	MsgSnap              MessageType = 7
+	MsgHeartbeat         MessageType = 8
+	MsgHeartbeatResp     MessageType = 9
+	MsgUnreachable       MessageType = 10
+	MsgSnapStatus        MessageType = 11
+	MsgCheckQuorum       MessageType = 12
+	MsgTransferLeader    MessageType = 13
+	MsgTimeoutNow        MessageType = 14
+	MsgReadIndex         MessageType = 15
+	MsgReadIndexResp     MessageType = 16
+	MsgPreVote           MessageType = 17
+	MsgPreVoteResp       MessageType = 18
+	MsgStorageAppend     MessageType = 19
+	MsgStorageAppendResp MessageType = 20
+	MsgStorageApply      MessageType = 21
+	MsgStorageApplyResp  MessageType = 22
+	MsgForgetLeader      MessageType = 23
+)
+
+var MessageType_name = map[int32]string{
+	0:  "MsgHup",
+	1:  "MsgBeat",
+	2:  "MsgProp",
+	3:  "MsgApp",
+	4:  "MsgAppResp",
+	5:  "MsgVote",
+	6:  "MsgVoteResp",
+	7:  "MsgSnap",
+	8:  "MsgHeartbeat",
+	9:  "MsgHeartbeatResp",
+	10: "MsgUnreachable",
+	11: "MsgSnapStatus",
+	12: "MsgCheckQuorum",
+	13: "MsgTransferLeader",
+	14: "MsgTimeoutNow",
+	15: "MsgReadIndex",
+	16: "MsgReadIndexResp",
+	17: "MsgPreVote",
+	18: "MsgPreVoteResp",
+	19: "MsgStorageAppend",
+	20: "MsgStorageAppendResp",
+	21: "MsgStorageApply",
+	22: "MsgStorageApplyResp",
+	23: "MsgForgetLeader",
+}
+
+var MessageType_value = map[string]int32{
+	"MsgHup":               0,
+	"MsgBeat":              1,
+	"MsgProp":              2,
+	"MsgApp":               3,
+	"MsgAppResp":           4,
+	"MsgVote":              5,
+	"MsgVoteResp":          6,
+	"MsgSnap":              7,
+	"MsgHeartbeat":         8,
+	"MsgHeartbeatResp":     9,
+	"MsgUnreachable":       10,
+	"MsgSnapStatus":        11,
+	"MsgCheckQuorum":       12,
+	"MsgTransferLeader":    13,
+	"MsgTimeoutNow":        14,
+	"MsgReadIndex":         15,
+	"MsgReadIndexResp":     16,
+	"MsgPreVote":           17,
+	"MsgPreVoteResp":       18,
+	"MsgStorageAppend":     19,
+	"MsgStorageAppendResp": 20,
+	"MsgStorageApply":      21,
+	"MsgStorageApplyResp":  22,
+	"MsgForgetLeader":      23,
+}
+
+func (x MessageType) Enum() *MessageType {
+	p := new(MessageType)
+	*p = x
+	return p
+}
+
+func (x MessageType) String() string {
+	return proto.EnumName(MessageType_name, int32(x))
+}
+
+func (x *MessageType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
+	if err != nil {
+		return err
+	}
+	*x = MessageType(value)
+	return nil
+}
+
+func (MessageType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{1}
+}
+
+// ConfChangeTransition specifies the behavior of a configuration change with
+// respect to joint consensus.
+type ConfChangeTransition int32
+
+const (
+	// Automatically use the simple protocol if possible, otherwise fall back
+	// to ConfChangeJointImplicit. Most applications will want to use this.
+	ConfChangeTransitionAuto ConfChangeTransition = 0
+	// Use joint consensus unconditionally, and transition out of them
+	// automatically (by proposing a zero configuration change).
+	//
+	// This option is suitable for applications that want to minimize the time
+	// spent in the joint configuration and do not store the joint configuration
+	// in the state machine (outside of InitialState).
+	ConfChangeTransitionJointImplicit ConfChangeTransition = 1
+	// Use joint consensus and remain in the joint configuration until the
+	// application proposes a no-op configuration change. This is suitable for
+	// applications that want to explicitly control the transitions, for example
+	// to use a custom payload (via the Context field).
+	ConfChangeTransitionJointExplicit ConfChangeTransition = 2
+)
+
+var ConfChangeTransition_name = map[int32]string{
+	0: "ConfChangeTransitionAuto",
+	1: "ConfChangeTransitionJointImplicit",
+	2: "ConfChangeTransitionJointExplicit",
+}
+
+var ConfChangeTransition_value = map[string]int32{
+	"ConfChangeTransitionAuto":          0,
+	"ConfChangeTransitionJointImplicit": 1,
+	"ConfChangeTransitionJointExplicit": 2,
+}
+
+func (x ConfChangeTransition) Enum() *ConfChangeTransition {
+	p := new(ConfChangeTransition)
+	*p = x
+	return p
+}
+
+func (x ConfChangeTransition) String() string {
+	return proto.EnumName(ConfChangeTransition_name, int32(x))
+}
+
+func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition")
+	if err != nil {
+		return err
+	}
+	*x = ConfChangeTransition(value)
+	return nil
+}
+
+func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{2}
+}
+
+type ConfChangeType int32
+
+const (
+	ConfChangeAddNode        ConfChangeType = 0
+	ConfChangeRemoveNode     ConfChangeType = 1
+	ConfChangeUpdateNode     ConfChangeType = 2
+	ConfChangeAddLearnerNode ConfChangeType = 3
+)
+
+var ConfChangeType_name = map[int32]string{
+	0: "ConfChangeAddNode",
+	1: "ConfChangeRemoveNode",
+	2: "ConfChangeUpdateNode",
+	3: "ConfChangeAddLearnerNode",
+}
+
+var ConfChangeType_value = map[string]int32{
+	"ConfChangeAddNode":        0,
+	"ConfChangeRemoveNode":     1,
+	"ConfChangeUpdateNode":     2,
+	"ConfChangeAddLearnerNode": 3,
+}
+
+func (x ConfChangeType) Enum() *ConfChangeType {
+	p := new(ConfChangeType)
+	*p = x
+	return p
+}
+
+func (x ConfChangeType) String() string {
+	return proto.EnumName(ConfChangeType_name, int32(x))
+}
+
+func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
+	if err != nil {
+		return err
+	}
+	*x = ConfChangeType(value)
+	return nil
+}
+
+func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{3}
+}
+
+type Entry struct {
+	Term  uint64    `protobuf:"varint,2,opt,name=Term" json:"Term"`
+	Index uint64    `protobuf:"varint,3,opt,name=Index" json:"Index"`
+	Type  EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+	Data  []byte    `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
+}
+
+func (m *Entry) Reset()         { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage()    {}
+func (*Entry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{0}
+}
+func (m *Entry) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Entry.Merge(m, src)
+}
+func (m *Entry) XXX_Size() int {
+	return m.Size()
+}
+func (m *Entry) XXX_DiscardUnknown() {
+	xxx_messageInfo_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Entry proto.InternalMessageInfo
+
+type SnapshotMetadata struct {
+	ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
+	Index     uint64    `protobuf:"varint,2,opt,name=index" json:"index"`
+	Term      uint64    `protobuf:"varint,3,opt,name=term" json:"term"`
+}
+
+func (m *SnapshotMetadata) Reset()         { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage()    {}
+func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{1}
+}
+func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotMetadata.Merge(m, src)
+}
+func (m *SnapshotMetadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
+
+type Snapshot struct {
+	Data     []byte           `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+	Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{2}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+	return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+	xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+type Message struct {
+	Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+	To   uint64      `protobuf:"varint,2,opt,name=to" json:"to"`
+	From uint64      `protobuf:"varint,3,opt,name=from" json:"from"`
+	Term uint64      `protobuf:"varint,4,opt,name=term" json:"term"`
+	// logTerm is generally used for appending Raft logs to followers. For example,
+	// (type=MsgApp,index=100,logTerm=5) means the leader appends entries starting
+	// at index=101, and the term of the entry at index 100 is 5.
+	// (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some
+	// entries from its leader as it already has an entry with term 5 at index 100.
+	// (type=MsgStorageAppendResp,index=100,logTerm=5) means the local node wrote
+	// entries up to index=100 in stable storage, and the term of the entry at index
+	// 100 was 5. This doesn't always mean that the corresponding MsgStorageAppend
+	// message was the one that carried these entries, just that those entries were
+	// stable at the time of processing the corresponding MsgStorageAppend.
+	LogTerm uint64  `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+	Index   uint64  `protobuf:"varint,6,opt,name=index" json:"index"`
+	Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+	Commit  uint64  `protobuf:"varint,8,opt,name=commit" json:"commit"`
+	// (type=MsgStorageAppend,vote=5,term=10) means the local node is voting for
+	// peer 5 in term 10. For MsgStorageAppends, the term, vote, and commit fields
+	// will either all be set (to facilitate the construction of a HardState) if
+	// any of the fields have changed or will all be unset if none of the fields
+	// have changed.
+	Vote uint64 `protobuf:"varint,13,opt,name=vote" json:"vote"`
+	// snapshot is non-nil and non-empty for MsgSnap messages and nil for all other
+	// message types. However, peer nodes running older binary versions may send a
+	// non-nil, empty value for the snapshot field of non-MsgSnap messages. Code
+	// should be prepared to handle such messages.
+	Snapshot   *Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot,omitempty"`
+	Reject     bool      `protobuf:"varint,10,opt,name=reject" json:"reject"`
+	RejectHint uint64    `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+	Context    []byte    `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
+	// responses are populated by a raft node to instruct storage threads on how
+	// to respond and who to respond to when the work associated with a message
+	// is complete. Populated for MsgStorageAppend and MsgStorageApply messages.
+	Responses []Message `protobuf:"bytes,14,rep,name=responses" json:"responses"`
+}
+
+func (m *Message) Reset()         { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage()    {}
+func (*Message) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{3}
+}
+func (m *Message) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Message.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Message) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Message.Merge(m, src)
+}
+func (m *Message) XXX_Size() int {
+	return m.Size()
+}
+func (m *Message) XXX_DiscardUnknown() {
+	xxx_messageInfo_Message.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Message proto.InternalMessageInfo
+
+type HardState struct {
+	Term   uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
+	Vote   uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
+	Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
+}
+
+func (m *HardState) Reset()         { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage()    {}
+func (*HardState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{4}
+}
+func (m *HardState) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HardState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HardState.Merge(m, src)
+}
+func (m *HardState) XXX_Size() int {
+	return m.Size()
+}
+func (m *HardState) XXX_DiscardUnknown() {
+	xxx_messageInfo_HardState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HardState proto.InternalMessageInfo
+
+type ConfState struct {
+	// The voters in the incoming config. (If the configuration is not joint,
+	// then the outgoing config is empty).
+	Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"`
+	// The learners in the incoming config.
+	Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
+	// The voters in the outgoing config.
+	VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"`
+	// The nodes that will become learners when the outgoing config is removed.
+	// These nodes are necessarily currently in nodes_joint (or they would have
+	// been added to the incoming config right away).
+	LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"`
+	// If set, the config is joint and Raft will automatically transition into
+	// the final config (i.e. remove the outgoing config) when this is safe.
+	AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"`
+}
+
+func (m *ConfState) Reset()         { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage()    {}
+func (*ConfState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{5}
+}
+func (m *ConfState) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfState.Merge(m, src)
+}
+func (m *ConfState) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfState) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfState proto.InternalMessageInfo
+
+type ConfChange struct {
+	Type    ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"`
+	NodeID  uint64         `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"`
+	Context []byte         `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
+	// NB: this is used only by etcd to thread through a unique identifier.
+	// Ideally it should really use the Context instead. No counterpart to
+	// this field exists in ConfChangeV2.
+	ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"`
+}
+
+func (m *ConfChange) Reset()         { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage()    {}
+func (*ConfChange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{6}
+}
+func (m *ConfChange) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfChange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfChange.Merge(m, src)
+}
+func (m *ConfChange) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfChange) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfChange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChange proto.InternalMessageInfo
+
+// ConfChangeSingle is an individual configuration change operation. Multiple
+// such operations can be carried out atomically via a ConfChangeV2.
+type ConfChangeSingle struct {
+	Type   ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"`
+	NodeID uint64         `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"`
+}
+
+func (m *ConfChangeSingle) Reset()         { *m = ConfChangeSingle{} }
+func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) }
+func (*ConfChangeSingle) ProtoMessage()    {}
+func (*ConfChangeSingle) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{7}
+}
+func (m *ConfChangeSingle) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfChangeSingle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfChangeSingle.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfChangeSingle) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfChangeSingle.Merge(m, src)
+}
+func (m *ConfChangeSingle) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfChangeSingle) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfChangeSingle.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChangeSingle proto.InternalMessageInfo
+
+// ConfChangeV2 messages initiate configuration changes. They support both the
+// simple "one at a time" membership change protocol and full Joint Consensus
+// allowing for arbitrary changes in membership.
+//
+// The supplied context is treated as an opaque payload and can be used to
+// attach an action on the state machine to the application of the config change
+// proposal. Note that contrary to Joint Consensus as outlined in the Raft
+// paper[1], configuration changes become active when they are *applied* to the
+// state machine (not when they are appended to the log).
+//
+// The simple protocol can be used whenever only a single change is made.
+//
+// Non-simple changes require the use of Joint Consensus, for which two
+// configuration changes are run. The first configuration change specifies the
+// desired changes and transitions the Raft group into the joint configuration,
+// in which quorum requires a majority of both the pre-changes and post-changes
+// configuration. Joint Consensus avoids entering fragile intermediate
+// configurations that could compromise survivability. For example, without the
+// use of Joint Consensus and running across three availability zones with a
+// replication factor of three, it is not possible to replace a voter without
+// entering an intermediate configuration that does not survive the outage of
+// one availability zone.
+//
+// The provided ConfChangeTransition specifies how (and whether) Joint Consensus
+// is used, and assigns the task of leaving the joint configuration either to
+// Raft or the application. Leaving the joint configuration is accomplished by
+// proposing a ConfChangeV2 with only and optionally the Context field
+// populated.
+//
+// For details on Raft membership changes, see:
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+type ConfChangeV2 struct {
+	Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"`
+	Changes    []ConfChangeSingle   `protobuf:"bytes,2,rep,name=changes" json:"changes"`
+	Context    []byte               `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"`
+}
+
+func (m *ConfChangeV2) Reset()         { *m = ConfChangeV2{} }
+func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) }
+func (*ConfChangeV2) ProtoMessage()    {}
+func (*ConfChangeV2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{8}
+}
+func (m *ConfChangeV2) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfChangeV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfChangeV2.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfChangeV2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfChangeV2.Merge(m, src)
+}
+func (m *ConfChangeV2) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfChangeV2) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfChangeV2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChangeV2 proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+	proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+	proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value)
+	proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
+	proto.RegisterType((*Entry)(nil), "raftpb.Entry")
+	proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
+	proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
+	proto.RegisterType((*Message)(nil), "raftpb.Message")
+	proto.RegisterType((*HardState)(nil), "raftpb.HardState")
+	proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
+	proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
+	proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle")
+	proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2")
+}
+
+func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
+
+var fileDescriptor_b042552c306ae59b = []byte{
+	// 1102 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcb, 0x6e, 0x23, 0x45,
+	0x14, 0xed, 0x6e, 0x77, 0xfc, 0xb8, 0x76, 0x9c, 0x4a, 0xc5, 0x33, 0xd3, 0x8a, 0x22, 0x8f, 0xf1,
+	0x0c, 0x1a, 0x2b, 0x68, 0x02, 0x32, 0x12, 0x42, 0xec, 0xf2, 0x18, 0x94, 0xa0, 0x38, 0x0c, 0x4e,
+	0x26, 0x0b, 0x24, 0x14, 0x55, 0xdc, 0x95, 0x4e, 0x83, 0x5d, 0xd5, 0xaa, 0x2e, 0x87, 0x64, 0x83,
+	0x10, 0x5f, 0xc0, 0x92, 0x0d, 0x5b, 0x3e, 0x80, 0x8f, 0x40, 0x59, 0x66, 0xc9, 0x6a, 0xc4, 0x24,
+	0x7f, 0xc0, 0x17, 0xa0, 0xaa, 0xae, 0x7e, 0xd8, 0x89, 0x66, 0xc1, 0xae, 0xea, 0xdc, 0x53, 0xf7,
+	0x9e, 0x7b, 0x6e, 0x57, 0x35, 0x80, 0x20, 0x67, 0x72, 0x23, 0x12, 0x5c, 0x72, 0x5c, 0x56, 0xeb,
+	0xe8, 0x74, 0xb5, 0x15, 0xf0, 0x80, 0x6b, 0xe8, 0x63, 0xb5, 0x4a, 0xa2, 0xdd, 0x9f, 0x60, 0xe1,
+	0x15, 0x93, 0xe2, 0x0a, 0x7b, 0xe0, 0x1e, 0x51, 0x31, 0xf1, 0x9c, 0x8e, 0xdd, 0x73, 0xb7, 0xdc,
+	0xeb, 0xb7, 0x4f, 0xad, 0xa1, 0x46, 0xf0, 0x2a, 0x2c, 0xec, 0x31, 0x9f, 0x5e, 0x7a, 0xa5, 0x42,
+	0x28, 0x81, 0xf0, 0x47, 0xe0, 0x1e, 0x5d, 0x45, 0xd4, 0xb3, 0x3b, 0x76, 0xaf, 0xd9, 0x5f, 0xde,
+	0x48, 0x6a, 0x6d, 0xe8, 0x94, 0x2a, 0x90, 0x25, 0xba, 0x8a, 0x28, 0xc6, 0xe0, 0xee, 0x10, 0x49,
+	0x3c, 0xb7, 0x63, 0xf7, 0x1a, 0x43, 0xbd, 0xee, 0xfe, 0x6c, 0x03, 0x3a, 0x64, 0x24, 0x8a, 0xcf,
+	0xb9, 0x1c, 0x50, 0x49, 0x7c, 0x22, 0x09, 0xfe, 0x0c, 0x60, 0xc4, 0xd9, 0xd9, 0x49, 0x2c, 0x89,
+	0x4c, 0x72, 0xd7, 0xf3, 0xdc, 0xdb, 0x9c, 0x9d, 0x1d, 0xaa, 0x80, 0xc9, 0x5d, 0x1b, 0xa5, 0x80,
+	0x52, 0x1a, 0x6a, 0xa5, 0xc5, 0x26, 0x12, 0x48, 0xf5, 0x27, 0x55, 0x7f, 0xc5, 0x26, 0x34, 0xd2,
+	0xfd, 0x16, 0xaa, 0xa9, 0x02, 0x25, 0x51, 0x29, 0xd0, 0x35, 0x1b, 0x43, 0xbd, 0xc6, 0x5f, 0x40,
+	0x75, 0x62, 0x94, 0xe9, 0xc4, 0xf5, 0xbe, 0x97, 0x6a, 0x99, 0x57, 0x6e, 0xf2, 0x66, 0xfc, 0xee,
+	0xbf, 0x25, 0xa8, 0x0c, 0x68, 0x1c, 0x93, 0x80, 0xe2, 0x97, 0xe0, 0xca, 0xdc, 0xab, 0x95, 0x34,
+	0x87, 0x09, 0x17, 0xdd, 0x52, 0x34, 0xdc, 0x02, 0x47, 0xf2, 0x99, 0x4e, 0x1c, 0xc9, 0x55, 0x1b,
+	0x67, 0x82, 0xcf, 0xb5, 0xa1, 0x90, 0xac, 0x41, 0x77, 0xbe, 0x41, 0xdc, 0x86, 0xca, 0x98, 0x07,
+	0x7a, 0xba, 0x0b, 0x85, 0x60, 0x0a, 0xe6, 0xb6, 0x95, 0xef, 0xdb, 0xf6, 0x12, 0x2a, 0x94, 0x49,
+	0x11, 0xd2, 0xd8, 0xab, 0x74, 0x4a, 0xbd, 0x7a, 0x7f, 0x71, 0x66, 0xc6, 0x69, 0x2a, 0xc3, 0xc1,
+	0x6b, 0x50, 0x1e, 0xf1, 0xc9, 0x24, 0x94, 0x5e, 0xb5, 0x90, 0xcb, 0x60, 0x4a, 0xe2, 0x05, 0x97,
+	0xd4, 0x5b, 0x2c, 0x4a, 0x54, 0x08, 0xee, 0x43, 0x35, 0x36, 0x5e, 0x7a, 0x35, 0xed, 0x31, 0x9a,
+	0xf7, 0x58, 0xf3, 0xed, 0x61, 0xc6, 0x53, 0xb5, 0x04, 0xfd, 0x9e, 0x8e, 0xa4, 0x07, 0x1d, 0xbb,
+	0x57, 0x4d, 0x6b, 0x25, 0x18, 0x7e, 0x0e, 0x90, 0xac, 0x76, 0x43, 0x26, 0xbd, 0x7a, 0xa1, 0x62,
+	0x01, 0x57, 0xd6, 0x8c, 0x38, 0x93, 0xf4, 0x52, 0x7a, 0x0d, 0x35, 0x72, 0x53, 0x24, 0x05, 0xf1,
+	0xa7, 0x50, 0x13, 0x34, 0x8e, 0x38, 0x8b, 0x69, 0xec, 0x35, 0xb5, 0x01, 0x4b, 0x73, 0x83, 0x4b,
+	0x3f, 0xc3, 0x8c, 0xd7, 0xfd, 0x0e, 0x6a, 0xbb, 0x44, 0xf8, 0xc9, 0x37, 0x99, 0x8e, 0xc5, 0xbe,
+	0x37, 0x96, 0xd4, 0x0d, 0xe7, 0x9e, 0x1b, 0xb9, 0x8b, 0xa5, 0xfb, 0x2e, 0x76, 0xff, 0xb4, 0xa1,
+	0x96, 0x5d, 0x02, 0xfc, 0x18, 0xca, 0xea, 0x8c, 0x88, 0x3d, 0xbb, 0x53, 0xea, 0xb9, 0x43, 0xb3,
+	0xc3, 0xab, 0x50, 0x1d, 0x53, 0x22, 0x98, 0x8a, 0x38, 0x3a, 0x92, 0xed, 0xf1, 0x0b, 0x58, 0x4a,
+	0x58, 0x27, 0x7c, 0x2a, 0x03, 0x1e, 0xb2, 0xc0, 0x2b, 0x69, 0x4a, 0x33, 0x81, 0xbf, 0x36, 0x28,
+	0x7e, 0x06, 0x8b, 0xe9, 0xa1, 0x13, 0xa6, 0x4c, 0x72, 0x35, 0xad, 0x91, 0x82, 0x07, 0xca, 0xa3,
+	0x67, 0x00, 0x64, 0x2a, 0xf9, 0xc9, 0x98, 0x92, 0x0b, 0xaa, 0xbf, 0xb0, 0x74, 0x16, 0x35, 0x85,
+	0xef, 0x2b, 0xb8, 0xfb, 0xbb, 0x0d, 0xa0, 0x44, 0x6f, 0x9f, 0x13, 0x16, 0x50, 0xfc, 0x89, 0xb9,
+	0x0b, 0x8e, 0xbe, 0x0b, 0x8f, 0x8b, 0x77, 0x3b, 0x61, 0xdc, 0xbb, 0x0e, 0x2f, 0xa0, 0xc2, 0xb8,
+	0x4f, 0x4f, 0x42, 0xdf, 0x98, 0xd2, 0x54, 0xc1, 0xdb, 0xb7, 0x4f, 0xcb, 0x07, 0xdc, 0xa7, 0x7b,
+	0x3b, 0xc3, 0xb2, 0x0a, 0xef, 0xf9, 0xd8, 0xcb, 0x47, 0x9a, 0x3c, 0x34, 0xd9, 0x30, 0x57, 0xc1,
+	0x09, 0x7d, 0x33, 0x08, 0x30, 0xa7, 0x9d, 0xbd, 0x9d, 0xa1, 0x13, 0xfa, 0xdd, 0x09, 0xa0, 0xbc,
+	0xf8, 0x61, 0xc8, 0x82, 0x71, 0x2e, 0xd2, 0xfe, 0x3f, 0x22, 0x9d, 0xf7, 0x89, 0xec, 0xfe, 0x61,
+	0x43, 0x23, 0xcf, 0x73, 0xdc, 0xc7, 0x5b, 0x00, 0x52, 0x10, 0x16, 0x87, 0x32, 0xe4, 0xcc, 0x54,
+	0x5c, 0x7b, 0xa0, 0x62, 0xc6, 0x49, 0x3f, 0xe6, 0xfc, 0x14, 0xfe, 0x1c, 0x2a, 0x23, 0xcd, 0x4a,
+	0x26, 0x5e, 0x78, 0xa7, 0xe6, 0x5b, 0x4b, 0xaf, 0xad, 0xa1, 0x17, 0x3d, 0x2b, 0xcd, 0x78, 0xb6,
+	0xbe, 0x0b, 0xb5, 0xec, 0x31, 0xc7, 0x4b, 0x50, 0xd7, 0x9b, 0x03, 0x2e, 0x26, 0x64, 0x8c, 0x2c,
+	0xbc, 0x02, 0x4b, 0x1a, 0xc8, 0xf3, 0x23, 0x1b, 0x3f, 0x82, 0xe5, 0x39, 0xf0, 0xb8, 0x8f, 0x9c,
+	0xf5, 0xbf, 0x4a, 0x50, 0x2f, 0xbc, 0x75, 0x18, 0xa0, 0x3c, 0x88, 0x83, 0xdd, 0x69, 0x84, 0x2c,
+	0x5c, 0x87, 0xca, 0x20, 0x0e, 0xb6, 0x28, 0x91, 0xc8, 0x36, 0x9b, 0xd7, 0x82, 0x47, 0xc8, 0x31,
+	0xac, 0xcd, 0x28, 0x42, 0x25, 0xdc, 0x04, 0x48, 0xd6, 0x43, 0x1a, 0x47, 0xc8, 0x35, 0xc4, 0x63,
+	0x2e, 0x29, 0x5a, 0x50, 0xda, 0xcc, 0x46, 0x47, 0xcb, 0x26, 0xaa, 0x5e, 0x0f, 0x54, 0xc1, 0x08,
+	0x1a, 0xaa, 0x18, 0x25, 0x42, 0x9e, 0xaa, 0x2a, 0x55, 0xdc, 0x02, 0x54, 0x44, 0xf4, 0xa1, 0x1a,
+	0xc6, 0xd0, 0x1c, 0xc4, 0xc1, 0x1b, 0x26, 0x28, 0x19, 0x9d, 0x93, 0xd3, 0x31, 0x45, 0x80, 0x97,
+	0x61, 0xd1, 0x24, 0x52, 0x37, 0x6e, 0x1a, 0xa3, 0xba, 0xa1, 0x6d, 0x9f, 0xd3, 0xd1, 0x0f, 0xdf,
+	0x4c, 0xb9, 0x98, 0x4e, 0x50, 0x43, 0xb5, 0x3d, 0x88, 0x03, 0x3d, 0xa0, 0x33, 0x2a, 0xf6, 0x29,
+	0xf1, 0xa9, 0x40, 0x8b, 0xe6, 0xf4, 0x51, 0x38, 0xa1, 0x7c, 0x2a, 0x0f, 0xf8, 0x8f, 0xa8, 0x69,
+	0xc4, 0x0c, 0x29, 0xf1, 0xf5, 0x4f, 0x14, 0x2d, 0x19, 0x31, 0x19, 0xa2, 0xc5, 0x20, 0xd3, 0xef,
+	0x6b, 0x41, 0x75, 0x8b, 0xcb, 0xa6, 0xaa, 0xd9, 0x6b, 0x0e, 0x36, 0x27, 0x0f, 0x25, 0x17, 0x24,
+	0xa0, 0x9b, 0x51, 0x44, 0x99, 0x8f, 0x56, 0xb0, 0x07, 0xad, 0x79, 0x54, 0xf3, 0x5b, 0x6a, 0x62,
+	0x33, 0x91, 0xf1, 0x15, 0x7a, 0x84, 0x9f, 0xc0, 0xca, 0x1c, 0xa8, 0xd9, 0x8f, 0x0d, 0xfb, 0x4b,
+	0x2e, 0x02, 0x2a, 0x4d, 0x47, 0x4f, 0xd6, 0x7f, 0xb1, 0xa1, 0xf5, 0xd0, 0x17, 0x89, 0xd7, 0xc0,
+	0x7b, 0x08, 0xdf, 0x9c, 0x4a, 0x8e, 0x2c, 0xfc, 0x21, 0x7c, 0xf0, 0x50, 0xf4, 0x2b, 0x1e, 0x32,
+	0xb9, 0x37, 0x89, 0xc6, 0xe1, 0x28, 0x54, 0xd3, 0x7f, 0x1f, 0xed, 0xd5, 0xa5, 0xa1, 0x39, 0xeb,
+	0x57, 0xd0, 0x9c, 0xbd, 0x87, 0xca, 0xff, 0x1c, 0xd9, 0xf4, 0x7d, 0x75, 0xe3, 0x90, 0xa5, 0xac,
+	0xc8, 0xe1, 0x21, 0x9d, 0xf0, 0x0b, 0xaa, 0x23, 0xf6, 0x6c, 0xe4, 0x4d, 0xe4, 0x13, 0x99, 0x44,
+	0x9c, 0xd9, 0x46, 0x36, 0x7d, 0x7f, 0x3f, 0x79, 0xee, 0x74, 0xb4, 0xb4, 0xf5, 0xfc, 0xfa, 0x5d,
+	0xdb, 0xba, 0x79, 0xd7, 0xb6, 0xae, 0x6f, 0xdb, 0xf6, 0xcd, 0x6d, 0xdb, 0xfe, 0xe7, 0xb6, 0x6d,
+	0xff, 0x7a, 0xd7, 0xb6, 0x7e, 0xbb, 0x6b, 0x5b, 0x37, 0x77, 0x6d, 0xeb, 0xef, 0xbb, 0xb6, 0xf5,
+	0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0x2b, 0x47, 0x0c, 0x83, 0x09, 0x00, 0x00,
+}
+
+func (m *Entry) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Data != nil {
+		i -= len(m.Data)
+		copy(dAtA[i:], m.Data)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x10
+	{
+		size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	if m.Data != nil {
+		i -= len(m.Data)
+		copy(dAtA[i:], m.Data)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Message) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Message) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Responses) > 0 {
+		for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRaft(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x72
+		}
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
+	i--
+	dAtA[i] = 0x68
+	if m.Context != nil {
+		i -= len(m.Context)
+		copy(dAtA[i:], m.Context)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+		i--
+		dAtA[i] = 0x62
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
+	i--
+	dAtA[i] = 0x58
+	i--
+	if m.Reject {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	if m.Snapshot != nil {
+		{
+			size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaft(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+	i--
+	dAtA[i] = 0x40
+	if len(m.Entries) > 0 {
+		for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRaft(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x30
+	i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
+	i--
+	dAtA[i] = 0x28
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x20
+	i = encodeVarintRaft(dAtA, i, uint64(m.From))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.To))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *HardState) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfState) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.AutoLeave {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x28
+	if len(m.LearnersNext) > 0 {
+		for iNdEx := len(m.LearnersNext) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.LearnersNext[iNdEx]))
+			i--
+			dAtA[i] = 0x20
+		}
+	}
+	if len(m.VotersOutgoing) > 0 {
+		for iNdEx := len(m.VotersOutgoing) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.VotersOutgoing[iNdEx]))
+			i--
+			dAtA[i] = 0x18
+		}
+	}
+	if len(m.Learners) > 0 {
+		for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
+			i--
+			dAtA[i] = 0x10
+		}
+	}
+	if len(m.Voters) > 0 {
+		for iNdEx := len(m.Voters) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.Voters[iNdEx]))
+			i--
+			dAtA[i] = 0x8
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfChange) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Context != nil {
+		i -= len(m.Context)
+		copy(dAtA[i:], m.Context)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.ID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChangeSingle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChangeV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Context != nil {
+		i -= len(m.Context)
+		copy(dAtA[i:], m.Context)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Changes) > 0 {
+		for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRaft(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Transition))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRaft(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *Entry) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Index))
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	return n
+}
+
+func (m *SnapshotMetadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ConfState.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	n += 1 + sovRaft(uint64(m.Index))
+	n += 1 + sovRaft(uint64(m.Term))
+	return n
+}
+
+func (m *Snapshot) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	l = m.Metadata.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	return n
+}
+
+func (m *Message) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.To))
+	n += 1 + sovRaft(uint64(m.From))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.LogTerm))
+	n += 1 + sovRaft(uint64(m.Index))
+	if len(m.Entries) > 0 {
+		for _, e := range m.Entries {
+			l = e.Size()
+			n += 1 + l + sovRaft(uint64(l))
+		}
+	}
+	n += 1 + sovRaft(uint64(m.Commit))
+	if m.Snapshot != nil {
+		l = m.Snapshot.Size()
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	n += 2
+	n += 1 + sovRaft(uint64(m.RejectHint))
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	n += 1 + sovRaft(uint64(m.Vote))
+	if len(m.Responses) > 0 {
+		for _, e := range m.Responses {
+			l = e.Size()
+			n += 1 + l + sovRaft(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *HardState) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Vote))
+	n += 1 + sovRaft(uint64(m.Commit))
+	return n
+}
+
+func (m *ConfState) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Voters) > 0 {
+		for _, e := range m.Voters {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if len(m.Learners) > 0 {
+		for _, e := range m.Learners {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if len(m.VotersOutgoing) > 0 {
+		for _, e := range m.VotersOutgoing {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if len(m.LearnersNext) > 0 {
+		for _, e := range m.LearnersNext {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	n += 2
+	return n
+}
+
+func (m *ConfChange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.ID))
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.NodeID))
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	return n
+}
+
+func (m *ConfChangeSingle) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.NodeID))
+	return n
+}
+
+func (m *ConfChangeV2) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Transition))
+	if len(m.Changes) > 0 {
+		for _, e := range m.Changes {
+			l = e.Size()
+			n += 1 + l + sovRaft(uint64(l))
+		}
+	}
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	return n
+}
+
+func sovRaft(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaft(x uint64) (n int) {
+	return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Entry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= EntryType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Message) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Message: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= MessageType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+			}
+			m.To = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.To |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+			}
+			m.From = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.From |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
+			}
+			m.LogTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.LogTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Entries = append(m.Entries, Entry{})
+			if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Commit |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Snapshot == nil {
+				m.Snapshot = &Snapshot{}
+			}
+			if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Reject = bool(v != 0)
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
+			}
+			m.RejectHint = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RejectHint |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+			}
+			m.Vote = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Vote |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Responses = append(m.Responses, Message{})
+			if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HardState) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HardState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+			}
+			m.Vote = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Vote |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Commit |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfState) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Voters = append(m.Voters, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Voters) == 0 {
+					m.Voters = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Voters = append(m.Voters, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType)
+			}
+		case 2:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Learners = append(m.Learners, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Learners) == 0 {
+					m.Learners = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Learners = append(m.Learners, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
+			}
+		case 3:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.VotersOutgoing = append(m.VotersOutgoing, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.VotersOutgoing) == 0 {
+					m.VotersOutgoing = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.VotersOutgoing = append(m.VotersOutgoing, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType)
+			}
+		case 4:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.LearnersNext = append(m.LearnersNext, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.LearnersNext) == 0 {
+					m.LearnersNext = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.LearnersNext = append(m.LearnersNext, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType)
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.AutoLeave = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfChange) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= ConfChangeType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodeID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= ConfChangeType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodeID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfChangeV2) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType)
+			}
+			m.Transition = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Transition |= ConfChangeTransition(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Changes = append(m.Changes, ConfChangeSingle{})
+			if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRaft(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRaft
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupRaft
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthRaft
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthRaft        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRaft          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupRaft = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/raft.proto b/vendor/go.etcd.io/raft/v3/raftpb/raft.proto
new file mode 100644
index 0000000..a8598ee
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/raft.proto
@@ -0,0 +1,214 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+option (gogoproto.goproto_unkeyed_all) = false;
+option (gogoproto.goproto_unrecognized_all) = false;
+option (gogoproto.goproto_sizecache_all) = false;
+
+enum EntryType {
+	EntryNormal       = 0;
+	EntryConfChange   = 1; // corresponds to pb.ConfChange
+	EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2
+}
+
+message Entry {
+	optional uint64     Term  = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional uint64     Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
+	optional bytes      Data  = 4;
+}
+
+message SnapshotMetadata {
+	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+	optional uint64    index      = 2 [(gogoproto.nullable) = false];
+	optional uint64    term       = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+	optional bytes            data     = 1;
+	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+// For description of different message types, see:
+// https://pkg.go.dev/go.etcd.io/raft/v3#hdr-MessageType
+enum MessageType {
+	MsgHup               = 0;
+	MsgBeat              = 1;
+	MsgProp              = 2;
+	MsgApp               = 3;
+	MsgAppResp           = 4;
+	MsgVote              = 5;
+	MsgVoteResp          = 6;
+	MsgSnap              = 7;
+	MsgHeartbeat         = 8;
+	MsgHeartbeatResp     = 9;
+	MsgUnreachable       = 10;
+	MsgSnapStatus        = 11;
+	MsgCheckQuorum       = 12;
+	MsgTransferLeader    = 13;
+	MsgTimeoutNow        = 14;
+	MsgReadIndex         = 15;
+	MsgReadIndexResp     = 16;
+	MsgPreVote           = 17;
+	MsgPreVoteResp       = 18;
+	MsgStorageAppend     = 19;
+	MsgStorageAppendResp = 20;
+	MsgStorageApply      = 21;
+	MsgStorageApplyResp  = 22;
+	MsgForgetLeader      = 23;
+	// NOTE: when adding new message types, remember to update the isLocalMsg and
+	// isResponseMsg arrays in raft/util.go and update the corresponding tests in
+	// raft/util_test.go.
+}
+
+message Message {
+	optional MessageType type        = 1  [(gogoproto.nullable) = false];
+	optional uint64      to          = 2  [(gogoproto.nullable) = false];
+	optional uint64      from        = 3  [(gogoproto.nullable) = false];
+	optional uint64      term        = 4  [(gogoproto.nullable) = false];
+	// logTerm is generally used for appending Raft logs to followers. For example,
+	// (type=MsgApp,index=100,logTerm=5) means the leader appends entries starting
+	// at index=101, and the term of the entry at index 100 is 5.
+	// (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some
+	// entries from its leader as it already has an entry with term 5 at index 100.
+	// (type=MsgStorageAppendResp,index=100,logTerm=5) means the local node wrote
+	// entries up to index=100 in stable storage, and the term of the entry at index
+	// 100 was 5. This doesn't always mean that the corresponding MsgStorageAppend
+	// message was the one that carried these entries, just that those entries were
+	// stable at the time of processing the corresponding MsgStorageAppend.
+	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
+	optional uint64      index       = 6  [(gogoproto.nullable) = false];
+	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
+	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
+	// (type=MsgStorageAppend,vote=5,term=10) means the local node is voting for
+	// peer 5 in term 10. For MsgStorageAppends, the term, vote, and commit fields
+	// will either all be set (to facilitate the construction of a HardState) if
+	// any of the fields have changed or will all be unset if none of the fields
+	// have changed.
+	optional uint64      vote        = 13 [(gogoproto.nullable) = false];
+	// snapshot is non-nil and non-empty for MsgSnap messages and nil for all other
+	// message types. However, peer nodes running older binary versions may send a
+	// non-nil, empty value for the snapshot field of non-MsgSnap messages. Code
+	// should be prepared to handle such messages.
+	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = true];
+	optional bool        reject      = 10 [(gogoproto.nullable) = false];
+	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
+	optional bytes       context     = 12 [(gogoproto.nullable) = true];
+	// responses are populated by a raft node to instruct storage threads on how
+	// to respond and who to respond to when the work associated with a message
+	// is complete. Populated for MsgStorageAppend and MsgStorageApply messages.
+	repeated Message     responses   = 14 [(gogoproto.nullable) = false];
+}
+
+message HardState {
+	optional uint64 term   = 1 [(gogoproto.nullable) = false];
+	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
+	optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+// ConfChangeTransition specifies the behavior of a configuration change with
+// respect to joint consensus.
+enum ConfChangeTransition {
+	// Automatically use the simple protocol if possible, otherwise fall back
+	// to ConfChangeJointImplicit. Most applications will want to use this.
+	ConfChangeTransitionAuto          = 0;
+	// Use joint consensus unconditionally, and transition out of them
+	// automatically (by proposing a zero configuration change).
+	//
+	// This option is suitable for applications that want to minimize the time
+	// spent in the joint configuration and do not store the joint configuration
+	// in the state machine (outside of InitialState).
+	ConfChangeTransitionJointImplicit = 1;
+	// Use joint consensus and remain in the joint configuration until the
+	// application proposes a no-op configuration change. This is suitable for
+	// applications that want to explicitly control the transitions, for example
+	// to use a custom payload (via the Context field).
+	ConfChangeTransitionJointExplicit = 2;
+}
+
+message ConfState {
+	// The voters in the incoming config. (If the configuration is not joint,
+	// then the outgoing config is empty).
+	repeated uint64 voters = 1;
+	// The learners in the incoming config.
+	repeated uint64 learners          = 2;
+	// The voters in the outgoing config.
+	repeated uint64 voters_outgoing   = 3;
+	// The nodes that will become learners when the outgoing config is removed.
+	// These nodes are necessarily currently in nodes_joint (or they would have
+	// been added to the incoming config right away).
+	repeated uint64 learners_next     = 4;
+	// If set, the config is joint and Raft will automatically transition into
+	// the final config (i.e. remove the outgoing config) when this is safe.
+	optional bool   auto_leave        = 5 [(gogoproto.nullable) = false];
+}
+
+enum ConfChangeType {
+	ConfChangeAddNode        = 0;
+	ConfChangeRemoveNode     = 1;
+	ConfChangeUpdateNode     = 2;
+	ConfChangeAddLearnerNode = 3;
+}
+
+message ConfChange {
+	optional ConfChangeType  type    = 2 [(gogoproto.nullable) = false];
+	optional uint64          node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"];
+	optional bytes           context = 4;
+
+	// NB: this is used only by etcd to thread through a unique identifier.
+	// Ideally it should really use the Context instead. No counterpart to
+	// this field exists in ConfChangeV2.
+	optional uint64          id      = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"];
+}
+
+// ConfChangeSingle is an individual configuration change operation. Multiple
+// such operations can be carried out atomically via a ConfChangeV2.
+message ConfChangeSingle {
+	optional ConfChangeType  type    = 1 [(gogoproto.nullable) = false];
+	optional uint64          node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"];
+}
+
+// ConfChangeV2 messages initiate configuration changes. They support both the
+// simple "one at a time" membership change protocol and full Joint Consensus
+// allowing for arbitrary changes in membership.
+//
+// The supplied context is treated as an opaque payload and can be used to
+// attach an action on the state machine to the application of the config change
+// proposal. Note that contrary to Joint Consensus as outlined in the Raft
+// paper[1], configuration changes become active when they are *applied* to the
+// state machine (not when they are appended to the log).
+//
+// The simple protocol can be used whenever only a single change is made.
+//
+// Non-simple changes require the use of Joint Consensus, for which two
+// configuration changes are run. The first configuration change specifies the
+// desired changes and transitions the Raft group into the joint configuration,
+// in which quorum requires a majority of both the pre-changes and post-changes
+// configuration. Joint Consensus avoids entering fragile intermediate
+// configurations that could compromise survivability. For example, without the
+// use of Joint Consensus and running across three availability zones with a
+// replication factor of three, it is not possible to replace a voter without
+// entering an intermediate configuration that does not survive the outage of
+// one availability zone.
+//
+// The provided ConfChangeTransition specifies how (and whether) Joint Consensus
+// is used, and assigns the task of leaving the joint configuration either to
+// Raft or the application. Leaving the joint configuration is accomplished by
+// proposing a ConfChangeV2 with only and optionally the Context field
+// populated.
+//
+// For details on Raft membership changes, see:
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+message ConfChangeV2 {
+	optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false];
+	repeated ConfChangeSingle     changes =    2 [(gogoproto.nullable) = false];
+	optional bytes                context =    3;
+}
diff --git a/vendor/go.etcd.io/raft/v3/rawnode.go b/vendor/go.etcd.io/raft/v3/rawnode.go
new file mode 100644
index 0000000..a4da2ae
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/rawnode.go
@@ -0,0 +1,562 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+	"go.etcd.io/raft/v3/tracker"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.trk for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+	raft               *raft
+	asyncStorageWrites bool
+
+	// Mutable fields.
+	prevSoftSt     *SoftState
+	prevHardSt     pb.HardState
+	stepsOnAdvance []pb.Message
+}
+
+// NewRawNode instantiates a RawNode from the given configuration.
+//
+// See Bootstrap() for bootstrapping an initial state; this replaces the former
+// 'peers' argument to this method (with identical behavior). However, It is
+// recommended that instead of calling Bootstrap, applications bootstrap their
+// state manually by setting up a Storage that has a first index > 1 and which
+// stores the desired ConfState as its InitialState.
+func NewRawNode(config *Config) (*RawNode, error) {
+	r := newRaft(config)
+	rn := &RawNode{
+		raft: r,
+	}
+	rn.asyncStorageWrites = config.AsyncStorageWrites
+	ss := r.softState()
+	rn.prevSoftSt = &ss
+	rn.prevHardSt = r.hardState()
+	return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+	rn.raft.tick()
+}
+
+// TickQuiesced advances the internal logical clock by a single tick without
+// performing any other state machine processing. It allows the caller to avoid
+// periodic heartbeats and elections when all of the peers in a Raft group are
+// known to be at the same state. Expected usage is to periodically invoke Tick
+// or TickQuiesced depending on whether the group is "active" or "quiesced".
+//
+// WARNING: Be very careful about using this method as it subverts the Raft
+// state machine. You should probably be using Tick instead.
+//
+// DEPRECATED: This method will be removed in a future release.
+func (rn *RawNode) TickQuiesced() {
+	rn.raft.electionElapsed++
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgHup,
+	})
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		From: rn.raft.id,
+		Entries: []pb.Entry{
+			{Data: data},
+		}})
+}
+
+// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for
+// details.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error {
+	m, err := confChangeToMsg(cc)
+	if err != nil {
+		return err
+	}
+	return rn.raft.Step(m)
+}
+
+// ApplyConfChange applies a config change to the local node. The app must call
+// this when it applies a configuration change, except when it decides to reject
+// the configuration change, in which case no call must take place.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
+	cs := rn.raft.applyConfChange(cc.AsV2())
+	return &cs
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+	// Ignore unexpected local messages receiving over network.
+	if IsLocalMsg(m.Type) && !IsLocalMsgTarget(m.From) {
+		return ErrStepLocalMsg
+	}
+	if IsResponseMsg(m.Type) && !IsLocalMsgTarget(m.From) && rn.raft.trk.Progress[m.From] == nil {
+		return ErrStepPeerNotFound
+	}
+	return rn.raft.Step(m)
+}
+
+// Ready returns the outstanding work that the application needs to handle. This
+// includes appending and applying entries or a snapshot, updating the HardState,
+// and sending messages. The returned Ready() *must* be handled and subsequently
+// passed back via Advance().
+func (rn *RawNode) Ready() Ready {
+	rd := rn.readyWithoutAccept()
+	rn.acceptReady(rd)
+	return rd
+}
+
+// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there
+// is no obligation that the Ready must be handled.
+func (rn *RawNode) readyWithoutAccept() Ready {
+	r := rn.raft
+
+	rd := Ready{
+		Entries:          r.raftLog.nextUnstableEnts(),
+		CommittedEntries: r.raftLog.nextCommittedEnts(rn.applyUnstableEntries()),
+		Messages:         r.msgs,
+	}
+	if softSt := r.softState(); !softSt.equal(rn.prevSoftSt) {
+		// Allocate only when SoftState changes.
+		escapingSoftSt := softSt
+		rd.SoftState = &escapingSoftSt
+	}
+	if hardSt := r.hardState(); !isHardStateEqual(hardSt, rn.prevHardSt) {
+		rd.HardState = hardSt
+	}
+	if r.raftLog.hasNextUnstableSnapshot() {
+		rd.Snapshot = *r.raftLog.nextUnstableSnapshot()
+	}
+	if len(r.readStates) != 0 {
+		rd.ReadStates = r.readStates
+	}
+	rd.MustSync = MustSync(r.hardState(), rn.prevHardSt, len(rd.Entries))
+
+	if rn.asyncStorageWrites {
+		// If async storage writes are enabled, enqueue messages to
+		// local storage threads, where applicable.
+		if needStorageAppendMsg(r, rd) {
+			m := newStorageAppendMsg(r, rd)
+			rd.Messages = append(rd.Messages, m)
+		}
+		if needStorageApplyMsg(rd) {
+			m := newStorageApplyMsg(r, rd)
+			rd.Messages = append(rd.Messages, m)
+		}
+	} else {
+		// If async storage writes are disabled, immediately enqueue
+		// msgsAfterAppend to be sent out. The Ready struct contract
+		// mandates that Messages cannot be sent until after Entries
+		// are written to stable storage.
+		for _, m := range r.msgsAfterAppend {
+			if m.To != r.id {
+				rd.Messages = append(rd.Messages, m)
+			}
+		}
+	}
+
+	return rd
+}
+
+// MustSync returns true if the hard state and count of Raft entries indicate
+// that a synchronous write to persistent storage is required.
+func MustSync(st, prevst pb.HardState, entsnum int) bool {
+	// Persistent state on all servers:
+	// (Updated on stable storage before responding to RPCs)
+	// currentTerm
+	// votedFor
+	// log entries[]
+	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
+}
+
+func needStorageAppendMsg(r *raft, rd Ready) bool {
+	// Return true if log entries, hard state, or a snapshot need to be written
+	// to stable storage. Also return true if any messages are contingent on all
+	// prior MsgStorageAppend being processed.
+	return len(rd.Entries) > 0 ||
+		!IsEmptyHardState(rd.HardState) ||
+		!IsEmptySnap(rd.Snapshot) ||
+		len(r.msgsAfterAppend) > 0
+}
+
+func needStorageAppendRespMsg(r *raft, rd Ready) bool {
+	// Return true if raft needs to hear about stabilized entries or an applied
+	// snapshot. See the comment in newStorageAppendRespMsg, which explains why
+	// we check hasNextOrInProgressUnstableEnts instead of len(rd.Entries) > 0.
+	return r.raftLog.hasNextOrInProgressUnstableEnts() ||
+		!IsEmptySnap(rd.Snapshot)
+}
+
+// newStorageAppendMsg creates the message that should be sent to the local
+// append thread to instruct it to append log entries, write an updated hard
+// state, and apply a snapshot. The message also carries a set of responses
+// that should be delivered after the rest of the message is processed. Used
+// with AsyncStorageWrites.
+func newStorageAppendMsg(r *raft, rd Ready) pb.Message {
+	m := pb.Message{
+		Type:    pb.MsgStorageAppend,
+		To:      LocalAppendThread,
+		From:    r.id,
+		Entries: rd.Entries,
+	}
+	if !IsEmptyHardState(rd.HardState) {
+		// If the Ready includes a HardState update, assign each of its fields
+		// to the corresponding fields in the Message. This allows clients to
+		// reconstruct the HardState and save it to stable storage.
+		//
+		// If the Ready does not include a HardState update, make sure to not
+		// assign a value to any of the fields so that a HardState reconstructed
+		// from them will be empty (return true from raft.IsEmptyHardState).
+		m.Term = rd.Term
+		m.Vote = rd.Vote
+		m.Commit = rd.Commit
+	}
+	if !IsEmptySnap(rd.Snapshot) {
+		snap := rd.Snapshot
+		m.Snapshot = &snap
+	}
+	// Attach all messages in msgsAfterAppend as responses to be delivered after
+	// the message is processed, along with a self-directed MsgStorageAppendResp
+	// to acknowledge the entry stability.
+	//
+	// NB: it is important for performance that MsgStorageAppendResp message be
+	// handled after self-directed MsgAppResp messages on the leader (which will
+	// be contained in msgsAfterAppend). This ordering allows the MsgAppResp
+	// handling to use a fast-path in r.raftLog.term() before the newly appended
+	// entries are removed from the unstable log.
+	m.Responses = r.msgsAfterAppend
+	if needStorageAppendRespMsg(r, rd) {
+		m.Responses = append(m.Responses, newStorageAppendRespMsg(r, rd))
+	}
+	return m
+}
+
+// newStorageAppendRespMsg creates the message that should be returned to node
+// after the unstable log entries, hard state, and snapshot in the current Ready
+// (along with those in all prior Ready structs) have been saved to stable
+// storage.
+func newStorageAppendRespMsg(r *raft, rd Ready) pb.Message {
+	m := pb.Message{
+		Type: pb.MsgStorageAppendResp,
+		To:   r.id,
+		From: LocalAppendThread,
+		// Dropped after term change, see below.
+		Term: r.Term,
+	}
+	if r.raftLog.hasNextOrInProgressUnstableEnts() {
+		// If the raft log has unstable entries, attach the last index and term of the
+		// append to the response message. This (index, term) tuple will be handed back
+		// and consulted when the stability of those log entries is signaled to the
+		// unstable. If the (index, term) match the unstable log by the time the
+		// response is received (unstable.stableTo), the unstable log can be truncated.
+		//
+		// However, with just this logic, there would be an ABA problem[^1] that could
+		// lead to the unstable log and the stable log getting out of sync temporarily
+		// and leading to an inconsistent view. Consider the following example with 5
+		// nodes, A B C D E:
+		//
+		//  1. A is the leader.
+		//  2. A proposes some log entries but only B receives these entries.
+		//  3. B gets the Ready and the entries are appended asynchronously.
+		//  4. A crashes and C becomes leader after getting a vote from D and E.
+		//  5. C proposes some log entries and B receives these entries, overwriting the
+		//     previous unstable log entries that are in the process of being appended.
+		//     The entries have a larger term than the previous entries but the same
+		//     indexes. It begins appending these new entries asynchronously.
+		//  6. C crashes and A restarts and becomes leader again after getting the vote
+		//     from D and E.
+		//  7. B receives the entries from A which are the same as the ones from step 2,
+		//     overwriting the previous unstable log entries that are in the process of
+		//     being appended from step 5. The entries have the original terms and
+		//     indexes from step 2. Recall that log entries retain their original term
+		//     numbers when a leader replicates entries from previous terms. It begins
+		//     appending these new entries asynchronously.
+		//  8. The asynchronous log appends from the first Ready complete and stableTo
+		//     is called.
+		//  9. However, the log entries from the second Ready are still in the
+		//     asynchronous append pipeline and will overwrite (in stable storage) the
+		//     entries from the first Ready at some future point. We can't truncate the
+		//     unstable log yet or a future read from Storage might see the entries from
+		//     step 5 before they have been replaced by the entries from step 7.
+		//     Instead, we must wait until we are sure that the entries are stable and
+		//     that no in-progress appends might overwrite them before removing entries
+		//     from the unstable log.
+		//
+		// To prevent these kinds of problems, we also attach the current term to the
+		// MsgStorageAppendResp (above). If the term has changed by the time the
+		// MsgStorageAppendResp if returned, the response is ignored and the unstable
+		// log is not truncated. The unstable log is only truncated when the term has
+		// remained unchanged from the time that the MsgStorageAppend was sent to the
+		// time that the MsgStorageAppendResp is received, indicating that no-one else
+		// is in the process of truncating the stable log.
+		//
+		// However, this replaces a correctness problem with a liveness problem. If we
+		// only attempted to truncate the unstable log when appending new entries but
+		// also occasionally dropped these responses, then quiescence of new log entries
+		// could lead to the unstable log never being truncated.
+		//
+		// To combat this, we attempt to truncate the log on all MsgStorageAppendResp
+		// messages where the unstable log is not empty, not just those associated with
+		// entry appends. This includes MsgStorageAppendResp messages associated with an
+		// updated HardState, which occur after a term change.
+		//
+		// In other words, we set Index and LogTerm in a block that looks like:
+		//
+		//  if r.raftLog.hasNextOrInProgressUnstableEnts() { ... }
+		//
+		// not like:
+		//
+		//  if len(rd.Entries) > 0 { ... }
+		//
+		// To do so, we attach r.raftLog.lastIndex() and r.raftLog.lastTerm(), not the
+		// (index, term) of the last entry in rd.Entries. If rd.Entries is not empty,
+		// these will be the same. However, if rd.Entries is empty, we still want to
+		// attest that this (index, term) is correct at the current term, in case the
+		// MsgStorageAppend that contained the last entry in the unstable slice carried
+		// an earlier term and was dropped.
+		//
+		// A MsgStorageAppend with a new term is emitted on each term change. This is
+		// the same condition that causes MsgStorageAppendResp messages with earlier
+		// terms to be ignored. As a result, we are guaranteed that, assuming a bounded
+		// number of term changes, there will eventually be a MsgStorageAppendResp
+		// message that is not ignored. This means that entries in the unstable log
+		// which have been appended to stable storage will eventually be truncated and
+		// dropped from memory.
+		//
+		// [^1]: https://en.wikipedia.org/wiki/ABA_problem
+		last := r.raftLog.lastEntryID()
+		m.Index = last.index
+		m.LogTerm = last.term
+	}
+	if !IsEmptySnap(rd.Snapshot) {
+		snap := rd.Snapshot
+		m.Snapshot = &snap
+	}
+	return m
+}
+
+func needStorageApplyMsg(rd Ready) bool     { return len(rd.CommittedEntries) > 0 }
+func needStorageApplyRespMsg(rd Ready) bool { return needStorageApplyMsg(rd) }
+
+// newStorageApplyMsg creates the message that should be sent to the local
+// apply thread to instruct it to apply committed log entries. The message
+// also carries a response that should be delivered after the rest of the
+// message is processed. Used with AsyncStorageWrites.
+func newStorageApplyMsg(r *raft, rd Ready) pb.Message {
+	ents := rd.CommittedEntries
+	return pb.Message{
+		Type:    pb.MsgStorageApply,
+		To:      LocalApplyThread,
+		From:    r.id,
+		Term:    0, // committed entries don't apply under a specific term
+		Entries: ents,
+		Responses: []pb.Message{
+			newStorageApplyRespMsg(r, ents),
+		},
+	}
+}
+
+// newStorageApplyRespMsg creates the message that should be returned to node
+// after the committed entries in the current Ready (along with those in all
+// prior Ready structs) have been applied to the local state machine.
+func newStorageApplyRespMsg(r *raft, ents []pb.Entry) pb.Message {
+	return pb.Message{
+		Type:    pb.MsgStorageApplyResp,
+		To:      r.id,
+		From:    LocalApplyThread,
+		Term:    0, // committed entries don't apply under a specific term
+		Entries: ents,
+	}
+}
+
+// acceptReady is called when the consumer of the RawNode has decided to go
+// ahead and handle a Ready. Nothing must alter the state of the RawNode between
+// this call and the prior call to Ready().
+func (rn *RawNode) acceptReady(rd Ready) {
+	if rd.SoftState != nil {
+		rn.prevSoftSt = rd.SoftState
+	}
+	if !IsEmptyHardState(rd.HardState) {
+		rn.prevHardSt = rd.HardState
+	}
+	if len(rd.ReadStates) != 0 {
+		rn.raft.readStates = nil
+	}
+	if !rn.asyncStorageWrites {
+		if len(rn.stepsOnAdvance) != 0 {
+			rn.raft.logger.Panicf("two accepted Ready structs without call to Advance")
+		}
+		for _, m := range rn.raft.msgsAfterAppend {
+			if m.To == rn.raft.id {
+				rn.stepsOnAdvance = append(rn.stepsOnAdvance, m)
+			}
+		}
+		if needStorageAppendRespMsg(rn.raft, rd) {
+			m := newStorageAppendRespMsg(rn.raft, rd)
+			rn.stepsOnAdvance = append(rn.stepsOnAdvance, m)
+		}
+		if needStorageApplyRespMsg(rd) {
+			m := newStorageApplyRespMsg(rn.raft, rd.CommittedEntries)
+			rn.stepsOnAdvance = append(rn.stepsOnAdvance, m)
+		}
+	}
+	rn.raft.msgs = nil
+	rn.raft.msgsAfterAppend = nil
+	rn.raft.raftLog.acceptUnstable()
+	if len(rd.CommittedEntries) > 0 {
+		ents := rd.CommittedEntries
+		index := ents[len(ents)-1].Index
+		rn.raft.raftLog.acceptApplying(index, entsSize(ents), rn.applyUnstableEntries())
+	}
+
+	traceReady(rn.raft)
+}
+
+// applyUnstableEntries returns whether entries are allowed to be applied once
+// they are known to be committed but before they have been written locally to
+// stable storage.
+func (rn *RawNode) applyUnstableEntries() bool {
+	return !rn.asyncStorageWrites
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+func (rn *RawNode) HasReady() bool {
+	// TODO(nvanbenschoten): order these cases in terms of cost and frequency.
+	r := rn.raft
+	if softSt := r.softState(); !softSt.equal(rn.prevSoftSt) {
+		return true
+	}
+	if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+		return true
+	}
+	if r.raftLog.hasNextUnstableSnapshot() {
+		return true
+	}
+	if len(r.msgs) > 0 || len(r.msgsAfterAppend) > 0 {
+		return true
+	}
+	if r.raftLog.hasNextUnstableEnts() || r.raftLog.hasNextCommittedEnts(rn.applyUnstableEntries()) {
+		return true
+	}
+	if len(r.readStates) != 0 {
+		return true
+	}
+	return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+//
+// NOTE: Advance must not be called when using AsyncStorageWrites. Response messages from
+// the local append and apply threads take its place.
+func (rn *RawNode) Advance(_ Ready) {
+	// The actions performed by this function are encoded into stepsOnAdvance in
+	// acceptReady. In earlier versions of this library, they were computed from
+	// the provided Ready struct. Retain the unused parameter for compatibility.
+	if rn.asyncStorageWrites {
+		rn.raft.logger.Panicf("Advance must not be called when using AsyncStorageWrites")
+	}
+	for i, m := range rn.stepsOnAdvance {
+		_ = rn.raft.Step(m)
+		rn.stepsOnAdvance[i] = pb.Message{}
+	}
+	rn.stepsOnAdvance = rn.stepsOnAdvance[:0]
+}
+
+// Status returns the current status of the given group. This allocates, see
+// BasicStatus and WithProgress for allocation-friendlier choices.
+func (rn *RawNode) Status() Status {
+	status := getStatus(rn.raft)
+	return status
+}
+
+// BasicStatus returns a BasicStatus. Notably this does not contain the
+// Progress map; see WithProgress for an allocation-free way to inspect it.
+func (rn *RawNode) BasicStatus() BasicStatus {
+	return getBasicStatus(rn.raft)
+}
+
+// ProgressType indicates the type of replica a Progress corresponds to.
+type ProgressType byte
+
+const (
+	// ProgressTypePeer accompanies a Progress for a regular peer replica.
+	ProgressTypePeer ProgressType = iota
+	// ProgressTypeLearner accompanies a Progress for a learner replica.
+	ProgressTypeLearner
+)
+
+// WithProgress is a helper to introspect the Progress for this node and its
+// peers.
+func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) {
+	rn.raft.trk.Visit(func(id uint64, pr *tracker.Progress) {
+		typ := ProgressTypePeer
+		if pr.IsLearner {
+			typ = ProgressTypeLearner
+		}
+		p := *pr
+		p.Inflights = nil
+		visitor(id, typ, p)
+	})
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}
+
+// TransferLeader tries to transfer leadership to the given transferee.
+func (rn *RawNode) TransferLeader(transferee uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
+}
+
+// ForgetLeader forgets a follower's current leader, changing it to None.
+// See (Node).ForgetLeader for details.
+func (rn *RawNode) ForgetLeader() error {
+	return rn.raft.Step(pb.Message{Type: pb.MsgForgetLeader})
+}
+
+// ReadIndex requests a read state. The read state will be set in ready.
+// Read State has a read index. Once the application advances further than the read
+// index, any linearizable read requests issued before the read request can be
+// processed safely. The read state will have the same rctx attached.
+func (rn *RawNode) ReadIndex(rctx []byte) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/go.etcd.io/raft/v3/read_only.go b/vendor/go.etcd.io/raft/v3/read_only.go
new file mode 100644
index 0000000..5a53f0c
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/read_only.go
@@ -0,0 +1,121 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "go.etcd.io/raft/v3/raftpb"
+
+// ReadState provides state for read only query.
+// It's caller's responsibility to call ReadIndex first before getting
+// this state from ready, it's also caller's duty to differentiate if this
+// state is what it requests through RequestCtx, eg. given a unique id as
+// RequestCtx
+type ReadState struct {
+	Index      uint64
+	RequestCtx []byte
+}
+
+type readIndexStatus struct {
+	req   pb.Message
+	index uint64
+	// NB: this never records 'false', but it's more convenient to use this
+	// instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If
+	// this becomes performance sensitive enough (doubtful), quorum.VoteResult
+	// can change to an API that is closer to that of CommittedIndex.
+	acks map[uint64]bool
+}
+
+type readOnly struct {
+	option           ReadOnlyOption
+	pendingReadIndex map[string]*readIndexStatus
+	readIndexQueue   []string
+}
+
+func newReadOnly(option ReadOnlyOption) *readOnly {
+	return &readOnly{
+		option:           option,
+		pendingReadIndex: make(map[string]*readIndexStatus),
+	}
+}
+
+// addRequest adds a read only request into readonly struct.
+// `index` is the commit index of the raft state machine when it received
+// the read only request.
+// `m` is the original read only request message from the local or remote node.
+func (ro *readOnly) addRequest(index uint64, m pb.Message) {
+	s := string(m.Entries[0].Data)
+	if _, ok := ro.pendingReadIndex[s]; ok {
+		return
+	}
+	ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)}
+	ro.readIndexQueue = append(ro.readIndexQueue, s)
+}
+
+// recvAck notifies the readonly struct that the raft state machine received
+// an acknowledgment of the heartbeat that attached with the read only request
+// context.
+func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool {
+	rs, ok := ro.pendingReadIndex[string(context)]
+	if !ok {
+		return nil
+	}
+
+	rs.acks[id] = true
+	return rs.acks
+}
+
+// advance advances the read only request queue kept by the readonly struct.
+// It dequeues the requests until it finds the read only request that has
+// the same context as the given `m`.
+func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
+	var (
+		i     int
+		found bool
+	)
+
+	ctx := string(m.Context)
+	var rss []*readIndexStatus
+
+	for _, okctx := range ro.readIndexQueue {
+		i++
+		rs, ok := ro.pendingReadIndex[okctx]
+		if !ok {
+			panic("cannot find corresponding read state from pending map")
+		}
+		rss = append(rss, rs)
+		if okctx == ctx {
+			found = true
+			break
+		}
+	}
+
+	if found {
+		ro.readIndexQueue = ro.readIndexQueue[i:]
+		for _, rs := range rss {
+			delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
+		}
+		return rss
+	}
+
+	return nil
+}
+
+// lastPendingRequestCtx returns the context of the last pending read only
+// request in readonly struct.
+func (ro *readOnly) lastPendingRequestCtx() string {
+	if len(ro.readIndexQueue) == 0 {
+		return ""
+	}
+	return ro.readIndexQueue[len(ro.readIndexQueue)-1]
+}
diff --git a/vendor/go.etcd.io/raft/v3/state_trace.go b/vendor/go.etcd.io/raft/v3/state_trace.go
new file mode 100644
index 0000000..8712dc6
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/state_trace.go
@@ -0,0 +1,339 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build with_tla
+
+package raft
+
+import (
+	"strconv"
+	"time"
+
+	"go.etcd.io/raft/v3/raftpb"
+	"go.etcd.io/raft/v3/tracker"
+)
+
+const StateTraceDeployed = true
+
+type stateMachineEventType int
+
+const (
+	rsmInitState stateMachineEventType = iota
+	rsmBecomeCandidate
+	rsmBecomeFollower
+	rsmBecomeLeader
+	rsmCommit
+	rsmReplicate
+	rsmChangeConf
+	rsmApplyConfChange
+	rsmReady
+	rsmSendAppendEntriesRequest
+	rsmReceiveAppendEntriesRequest
+	rsmSendAppendEntriesResponse
+	rsmReceiveAppendEntriesResponse
+	rsmSendRequestVoteRequest
+	rsmReceiveRequestVoteRequest
+	rsmSendRequestVoteResponse
+	rsmReceiveRequestVoteResponse
+	rsmSendSnapshot
+	rsmReceiveSnapshot
+)
+
+func (e stateMachineEventType) String() string {
+	return []string{
+		"InitState",
+		"BecomeCandidate",
+		"BecomeFollower",
+		"BecomeLeader",
+		"Commit",
+		"Replicate",
+		"ChangeConf",
+		"ApplyConfChange",
+		"Ready",
+		"SendAppendEntriesRequest",
+		"ReceiveAppendEntriesRequest",
+		"SendAppendEntriesResponse",
+		"ReceiveAppendEntriesResponse",
+		"SendRequestVoteRequest",
+		"ReceiveRequestVoteRequest",
+		"SendRequestVoteResponse",
+		"ReceiveRequestVoteResponse",
+		"SendSnapshot",
+		"ReceiveSnapshot",
+	}[e]
+}
+
+const (
+	ConfChangeAddNewServer string = "AddNewServer"
+	ConfChangeRemoveServer string = "RemoveServer"
+	ConfChangeAddLearner   string = "AddLearner"
+)
+
+type TracingEvent struct {
+	Name       string             `json:"name"`
+	NodeID     string             `json:"nid"`
+	State      TracingState       `json:"state"`
+	Role       string             `json:"role"`
+	LogSize    uint64             `json:"log"`
+	Conf       [2][]string        `json:"conf"`
+	Message    *TracingMessage    `json:"msg,omitempty"`
+	ConfChange *TracingConfChange `json:"cc,omitempty"`
+	Properties map[string]any     `json:"prop,omitempty"`
+}
+
+type TracingState struct {
+	Term   uint64 `json:"term"`
+	Vote   string `json:"vote"`
+	Commit uint64 `json:"commit"`
+}
+
+type TracingMessage struct {
+	Type        string `json:"type"`
+	Term        uint64 `json:"term"`
+	From        string `json:"from"`
+	To          string `json:"to"`
+	EntryLength int    `json:"entries"`
+	LogTerm     uint64 `json:"logTerm"`
+	Index       uint64 `json:"index"`
+	Commit      uint64 `json:"commit"`
+	Vote        string `json:"vote"`
+	Reject      bool   `json:"reject"`
+	RejectHint  uint64 `json:"rejectHint"`
+}
+
+type SingleConfChange struct {
+	NodeID string `json:"nid"`
+	Action string `json:"action"`
+}
+
+type TracingConfChange struct {
+	Changes []SingleConfChange `json:"changes,omitempty"`
+	NewConf []string           `json:"newconf,omitempty"`
+}
+
+func makeTracingState(r *raft) TracingState {
+	hs := r.hardState()
+	return TracingState{
+		Term:   hs.Term,
+		Vote:   strconv.FormatUint(hs.Vote, 10),
+		Commit: hs.Commit,
+	}
+}
+
+func makeTracingMessage(m *raftpb.Message) *TracingMessage {
+	if m == nil {
+		return nil
+	}
+
+	logTerm := m.LogTerm
+	entries := len(m.Entries)
+	index := m.Index
+	if m.Type == raftpb.MsgSnap {
+		index = 0
+		logTerm = 0
+		entries = int(m.Snapshot.Metadata.Index)
+	}
+	return &TracingMessage{
+		Type:        m.Type.String(),
+		Term:        m.Term,
+		From:        strconv.FormatUint(m.From, 10),
+		To:          strconv.FormatUint(m.To, 10),
+		EntryLength: entries,
+		LogTerm:     logTerm,
+		Index:       index,
+		Commit:      m.Commit,
+		Vote:        strconv.FormatUint(m.Vote, 10),
+		Reject:      m.Reject,
+		RejectHint:  m.RejectHint,
+	}
+}
+
+type TraceLogger interface {
+	TraceEvent(*TracingEvent)
+}
+
+func traceEvent(evt stateMachineEventType, r *raft, m *raftpb.Message, prop map[string]any) {
+	if r.traceLogger == nil {
+		return
+	}
+
+	r.traceLogger.TraceEvent(&TracingEvent{
+		Name:       evt.String(),
+		NodeID:     strconv.FormatUint(r.id, 10),
+		State:      makeTracingState(r),
+		LogSize:    r.raftLog.lastIndex(),
+		Conf:       [2][]string{formatConf(r.trk.Voters[0].Slice()), formatConf(r.trk.Voters[1].Slice())},
+		Role:       r.state.String(),
+		Message:    makeTracingMessage(m),
+		Properties: prop,
+	})
+}
+
+func traceNodeEvent(evt stateMachineEventType, r *raft) {
+	traceEvent(evt, r, nil, nil)
+}
+
+func formatConf(s []uint64) []string {
+	if s == nil {
+		return []string{}
+	}
+
+	r := make([]string, len(s))
+	for i, v := range s {
+		r[i] = strconv.FormatUint(v, 10)
+	}
+	return r
+}
+
+// Use following helper functions to trace specific state and/or
+// transition at corresponding code lines
+func traceInitState(r *raft) {
+	if r.traceLogger == nil {
+		return
+	}
+
+	traceNodeEvent(rsmInitState, r)
+}
+
+func traceReady(r *raft) {
+	traceNodeEvent(rsmReady, r)
+}
+
+func traceCommit(r *raft) {
+	traceNodeEvent(rsmCommit, r)
+}
+
+func traceReplicate(r *raft, es ...raftpb.Entry) {
+	for i := range es {
+		if es[i].Type == raftpb.EntryNormal {
+			traceNodeEvent(rsmReplicate, r)
+		}
+	}
+}
+
+func traceBecomeFollower(r *raft) {
+	traceNodeEvent(rsmBecomeFollower, r)
+}
+
+func traceBecomeCandidate(r *raft) {
+	traceNodeEvent(rsmBecomeCandidate, r)
+}
+
+func traceBecomeLeader(r *raft) {
+	traceNodeEvent(rsmBecomeLeader, r)
+}
+
+func traceChangeConfEvent(cci raftpb.ConfChangeI, r *raft) {
+	cc2 := cci.AsV2()
+	cc := &TracingConfChange{
+		Changes: []SingleConfChange{},
+		NewConf: []string{},
+	}
+	for _, c := range cc2.Changes {
+		switch c.Type {
+		case raftpb.ConfChangeAddNode:
+			cc.Changes = append(cc.Changes, SingleConfChange{
+				NodeID: strconv.FormatUint(c.NodeID, 10),
+				Action: ConfChangeAddNewServer,
+			})
+		case raftpb.ConfChangeRemoveNode:
+			cc.Changes = append(cc.Changes, SingleConfChange{
+				NodeID: strconv.FormatUint(c.NodeID, 10),
+				Action: ConfChangeRemoveServer,
+			})
+		case raftpb.ConfChangeAddLearnerNode:
+			cc.Changes = append(cc.Changes, SingleConfChange{
+				NodeID: strconv.FormatUint(c.NodeID, 10),
+				Action: ConfChangeAddLearner,
+			})
+		}
+	}
+
+	if len(cc.Changes) == 0 {
+		return
+	}
+
+	p := map[string]any{}
+	p["cc"] = cc
+	traceEvent(rsmChangeConf, r, nil, p)
+}
+
+func traceConfChangeEvent(cfg tracker.Config, r *raft) {
+	if r.traceLogger == nil {
+		return
+	}
+
+	cc := &TracingConfChange{
+		Changes: []SingleConfChange{},
+		NewConf: formatConf(cfg.Voters[0].Slice()),
+	}
+
+	p := map[string]any{}
+	p["cc"] = cc
+	traceEvent(rsmApplyConfChange, r, nil, p)
+}
+
+func traceSendMessage(r *raft, m *raftpb.Message) {
+	if r.traceLogger == nil {
+		return
+	}
+
+	prop := map[string]any{}
+
+	var evt stateMachineEventType
+	switch m.Type {
+	case raftpb.MsgApp:
+		evt = rsmSendAppendEntriesRequest
+		if p, exist := r.trk.Progress[m.From]; exist {
+			prop["match"] = p.Match
+			prop["next"] = p.Next
+		}
+
+	case raftpb.MsgHeartbeat, raftpb.MsgSnap:
+		evt = rsmSendAppendEntriesRequest
+	case raftpb.MsgAppResp, raftpb.MsgHeartbeatResp:
+		evt = rsmSendAppendEntriesResponse
+	case raftpb.MsgVote:
+		evt = rsmSendRequestVoteRequest
+	case raftpb.MsgVoteResp:
+		evt = rsmSendRequestVoteResponse
+	default:
+		return
+	}
+
+	traceEvent(evt, r, m, prop)
+}
+
+func traceReceiveMessage(r *raft, m *raftpb.Message) {
+	if r.traceLogger == nil {
+		return
+	}
+
+	var evt stateMachineEventType
+	switch m.Type {
+	case raftpb.MsgApp, raftpb.MsgHeartbeat, raftpb.MsgSnap:
+		evt = rsmReceiveAppendEntriesRequest
+	case raftpb.MsgAppResp, raftpb.MsgHeartbeatResp:
+		evt = rsmReceiveAppendEntriesResponse
+	case raftpb.MsgVote:
+		evt = rsmReceiveRequestVoteRequest
+	case raftpb.MsgVoteResp:
+		evt = rsmReceiveRequestVoteResponse
+	default:
+		return
+	}
+
+	time.Sleep(time.Millisecond) // sleep 1ms to reduce time shift impact accross node
+	traceEvent(evt, r, m, nil)
+}
diff --git a/vendor/go.etcd.io/raft/v3/state_trace_nop.go b/vendor/go.etcd.io/raft/v3/state_trace_nop.go
new file mode 100644
index 0000000..cdeb99b
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/state_trace_nop.go
@@ -0,0 +1,50 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !with_tla
+
+package raft
+
+import (
+	"go.etcd.io/raft/v3/raftpb"
+	"go.etcd.io/raft/v3/tracker"
+)
+
+const StateTraceDeployed = false
+
+type TraceLogger interface{}
+
+type TracingEvent struct{}
+
+func traceInitState(*raft) {}
+
+func traceReady(*raft) {}
+
+func traceCommit(*raft) {}
+
+func traceReplicate(*raft, ...raftpb.Entry) {}
+
+func traceBecomeFollower(*raft) {}
+
+func traceBecomeCandidate(*raft) {}
+
+func traceBecomeLeader(*raft) {}
+
+func traceChangeConfEvent(raftpb.ConfChangeI, *raft) {}
+
+func traceConfChangeEvent(tracker.Config, *raft) {}
+
+func traceSendMessage(*raft, *raftpb.Message) {}
+
+func traceReceiveMessage(*raft, *raftpb.Message) {}
diff --git a/vendor/go.etcd.io/raft/v3/status.go b/vendor/go.etcd.io/raft/v3/status.go
new file mode 100644
index 0000000..ded0c0e
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/status.go
@@ -0,0 +1,105 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+	"go.etcd.io/raft/v3/tracker"
+)
+
+// Status contains information about this Raft peer and its view of the system.
+// The Progress is only populated on the leader.
+type Status struct {
+	BasicStatus
+	Config   tracker.Config
+	Progress map[uint64]tracker.Progress
+}
+
+// BasicStatus contains basic information about the Raft peer. It does not allocate.
+type BasicStatus struct {
+	ID uint64
+
+	pb.HardState
+	SoftState
+
+	Applied uint64
+
+	LeadTransferee uint64
+}
+
+func getProgressCopy(r *raft) map[uint64]tracker.Progress {
+	m := make(map[uint64]tracker.Progress)
+	r.trk.Visit(func(id uint64, pr *tracker.Progress) {
+		p := *pr
+		p.Inflights = pr.Inflights.Clone()
+		pr = nil
+
+		m[id] = p
+	})
+	return m
+}
+
+func getBasicStatus(r *raft) BasicStatus {
+	s := BasicStatus{
+		ID:             r.id,
+		LeadTransferee: r.leadTransferee,
+	}
+	s.HardState = r.hardState()
+	s.SoftState = r.softState()
+	s.Applied = r.raftLog.applied
+	return s
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+	var s Status
+	s.BasicStatus = getBasicStatus(r)
+	if s.RaftState == StateLeader {
+		s.Progress = getProgressCopy(r)
+	}
+	s.Config = r.trk.Config.Clone()
+	return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
+		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
+
+	if len(s.Progress) == 0 {
+		j += "},"
+	} else {
+		for k, v := range s.Progress {
+			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+			j += subj
+		}
+		// remove the trailing ","
+		j = j[:len(j)-1] + "},"
+	}
+
+	j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
+	return []byte(j), nil
+}
+
+func (s Status) String() string {
+	b, err := s.MarshalJSON()
+	if err != nil {
+		getLogger().Panicf("unexpected error: %v", err)
+	}
+	return string(b)
+}
diff --git a/vendor/go.etcd.io/raft/v3/storage.go b/vendor/go.etcd.io/raft/v3/storage.go
new file mode 100644
index 0000000..f616c31
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/storage.go
@@ -0,0 +1,310 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+	"sync"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+// ErrUnavailable is returned by Storage interface when the requested log entries
+// are unavailable.
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
+// snapshot is temporarily unavailable.
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+	// TODO(tbg): split this into two interfaces, LogStorage and StateStorage.
+
+	// InitialState returns the saved HardState and ConfState information.
+	InitialState() (pb.HardState, pb.ConfState, error)
+
+	// Entries returns a slice of consecutive log entries in the range [lo, hi),
+	// starting from lo. The maxSize limits the total size of the log entries
+	// returned, but Entries returns at least one entry if any.
+	//
+	// The caller of Entries owns the returned slice, and may append to it. The
+	// individual entries in the slice must not be mutated, neither by the Storage
+	// implementation nor the caller. Note that raft may forward these entries
+	// back to the application via Ready struct, so the corresponding handler must
+	// not mutate entries either (see comments in Ready struct).
+	//
+	// Since the caller may append to the returned slice, Storage implementation
+	// must protect its state from corruption that such appends may cause. For
+	// example, common ways to do so are:
+	//  - allocate the slice before returning it (safest option),
+	//  - return a slice protected by Go full slice expression, which causes
+	//  copying on appends (see MemoryStorage).
+	//
+	// Returns ErrCompacted if entry lo has been compacted, or ErrUnavailable if
+	// encountered an unavailable entry in [lo, hi).
+	Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+
+	// Term returns the term of entry i, which must be in the range
+	// [FirstIndex()-1, LastIndex()]. The term of the entry before
+	// FirstIndex is retained for matching purposes even though the
+	// rest of that entry may not be available.
+	Term(i uint64) (uint64, error)
+	// LastIndex returns the index of the last entry in the log.
+	LastIndex() (uint64, error)
+	// FirstIndex returns the index of the first log entry that is
+	// possibly available via Entries (older entries have been incorporated
+	// into the latest Snapshot; if storage only contains the dummy entry the
+	// first log entry is not available).
+	FirstIndex() (uint64, error)
+	// Snapshot returns the most recent snapshot.
+	// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+	// so raft state machine could know that Storage needs some time to prepare
+	// snapshot and call Snapshot later.
+	Snapshot() (pb.Snapshot, error)
+}
+
+type inMemStorageCallStats struct {
+	initialState, firstIndex, lastIndex, entries, term, snapshot int
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+	// Protects access to all fields. Most methods of MemoryStorage are
+	// run on the raft goroutine, but Append() is run on an application
+	// goroutine.
+	sync.Mutex
+
+	hardState pb.HardState
+	snapshot  pb.Snapshot
+	// ents[i] has raft log position i+snapshot.Metadata.Index
+	ents []pb.Entry
+
+	callStats inMemStorageCallStats
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+	return &MemoryStorage{
+		// When starting from scratch populate the list with a dummy entry at term zero.
+		ents: make([]pb.Entry, 1),
+	}
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+	ms.callStats.initialState++
+	return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.hardState = st
+	return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.callStats.entries++
+	offset := ms.ents[0].Index
+	if lo <= offset {
+		return nil, ErrCompacted
+	}
+	if hi > ms.lastIndex()+1 {
+		getLogger().Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+	}
+	// only contains dummy entries.
+	if len(ms.ents) == 1 {
+		return nil, ErrUnavailable
+	}
+
+	ents := limitSize(ms.ents[lo-offset:hi-offset], entryEncodingSize(maxSize))
+	// NB: use the full slice expression to limit what the caller can do with the
+	// returned slice. For example, an append will reallocate and copy this slice
+	// instead of corrupting the neighbouring ms.ents.
+	return ents[:len(ents):len(ents)], nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.callStats.term++
+	offset := ms.ents[0].Index
+	if i < offset {
+		return 0, ErrCompacted
+	}
+	if int(i-offset) >= len(ms.ents) {
+		return 0, ErrUnavailable
+	}
+	return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.callStats.lastIndex++
+	return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+	return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.callStats.firstIndex++
+	return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+	return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.callStats.snapshot++
+	return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+	ms.Lock()
+	defer ms.Unlock()
+
+	//handle check for old snapshot being applied
+	msIndex := ms.snapshot.Metadata.Index
+	snapIndex := snap.Metadata.Index
+	if msIndex >= snapIndex {
+		return ErrSnapOutOfDate
+	}
+
+	ms.snapshot = snap
+	ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+	return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	if i <= ms.snapshot.Metadata.Index {
+		return pb.Snapshot{}, ErrSnapOutOfDate
+	}
+
+	offset := ms.ents[0].Index
+	if i > ms.lastIndex() {
+		getLogger().Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+	}
+
+	ms.snapshot.Metadata.Index = i
+	ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+	if cs != nil {
+		ms.snapshot.Metadata.ConfState = *cs
+	}
+	ms.snapshot.Data = data
+	return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if compactIndex <= offset {
+		return ErrCompacted
+	}
+	if compactIndex > ms.lastIndex() {
+		getLogger().Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+	}
+
+	i := compactIndex - offset
+	// NB: allocate a new slice instead of reusing the old ms.ents. Entries in
+	// ms.ents are immutable, and can be referenced from outside MemoryStorage
+	// through slices returned by ms.Entries().
+	ents := make([]pb.Entry, 1, uint64(len(ms.ents))-i)
+	ents[0].Index = ms.ents[i].Index
+	ents[0].Term = ms.ents[i].Term
+	ents = append(ents, ms.ents[i+1:]...)
+	ms.ents = ents
+	return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+	if len(entries) == 0 {
+		return nil
+	}
+
+	ms.Lock()
+	defer ms.Unlock()
+
+	first := ms.firstIndex()
+	last := entries[0].Index + uint64(len(entries)) - 1
+
+	// shortcut if there is no new entry.
+	if last < first {
+		return nil
+	}
+	// truncate compacted entries
+	if first > entries[0].Index {
+		entries = entries[first-entries[0].Index:]
+	}
+
+	offset := entries[0].Index - ms.ents[0].Index
+	switch {
+	case uint64(len(ms.ents)) > offset:
+		// NB: full slice expression protects ms.ents at index >= offset from
+		// rewrites, as they may still be referenced from outside MemoryStorage.
+		ms.ents = append(ms.ents[:offset:offset], entries...)
+	case uint64(len(ms.ents)) == offset:
+		ms.ents = append(ms.ents, entries...)
+	default:
+		getLogger().Panicf("missing log entry [last: %d, append at: %d]",
+			ms.lastIndex(), entries[0].Index)
+	}
+	return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/tracker/inflights.go b/vendor/go.etcd.io/raft/v3/tracker/inflights.go
new file mode 100644
index 0000000..cb091e5
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/inflights.go
@@ -0,0 +1,143 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+// inflight describes an in-flight MsgApp message.
+type inflight struct {
+	index uint64 // the index of the last entry inside the message
+	bytes uint64 // the total byte size of the entries in the message
+}
+
+// Inflights limits the number of MsgApp (represented by the largest index
+// contained within) sent to followers but not yet acknowledged by them. Callers
+// use Full() to check whether more messages can be sent, call Add() whenever
+// they are sending a new append, and release "quota" via FreeLE() whenever an
+// ack is received.
+type Inflights struct {
+	// the starting index in the buffer
+	start int
+
+	count int    // number of inflight messages in the buffer
+	bytes uint64 // number of inflight bytes
+
+	size     int    // the max number of inflight messages
+	maxBytes uint64 // the max total byte size of inflight messages
+
+	// buffer is a ring buffer containing info about all in-flight messages.
+	buffer []inflight
+}
+
+// NewInflights sets up an Inflights that allows up to size inflight messages,
+// with the total byte size up to maxBytes. If maxBytes is 0 then there is no
+// byte size limit. The maxBytes limit is soft, i.e. we accept a single message
+// that brings it from size < maxBytes to size >= maxBytes.
+func NewInflights(size int, maxBytes uint64) *Inflights {
+	return &Inflights{
+		size:     size,
+		maxBytes: maxBytes,
+	}
+}
+
+// Clone returns an *Inflights that is identical to but shares no memory with
+// the receiver.
+func (in *Inflights) Clone() *Inflights {
+	ins := *in
+	ins.buffer = append([]inflight(nil), in.buffer...)
+	return &ins
+}
+
+// Add notifies the Inflights that a new message with the given index and byte
+// size is being dispatched. Full() must be called prior to Add() to verify that
+// there is room for one more message, and consecutive calls to Add() must
+// provide a monotonic sequence of indexes.
+func (in *Inflights) Add(index, bytes uint64) {
+	if in.Full() {
+		panic("cannot add into a Full inflights")
+	}
+	next := in.start + in.count
+	size := in.size
+	if next >= size {
+		next -= size
+	}
+	if next >= len(in.buffer) {
+		in.grow()
+	}
+	in.buffer[next] = inflight{index: index, bytes: bytes}
+	in.count++
+	in.bytes += bytes
+}
+
+// grow the inflight buffer by doubling up to inflights.size. We grow on demand
+// instead of preallocating to inflights.size to handle systems which have
+// thousands of Raft groups per process.
+func (in *Inflights) grow() {
+	newSize := len(in.buffer) * 2
+	if newSize == 0 {
+		newSize = 1
+	} else if newSize > in.size {
+		newSize = in.size
+	}
+	newBuffer := make([]inflight, newSize)
+	copy(newBuffer, in.buffer)
+	in.buffer = newBuffer
+}
+
+// FreeLE frees the inflights smaller or equal to the given `to` flight.
+func (in *Inflights) FreeLE(to uint64) {
+	if in.count == 0 || to < in.buffer[in.start].index {
+		// out of the left side of the window
+		return
+	}
+
+	idx := in.start
+	var i int
+	var bytes uint64
+	for i = 0; i < in.count; i++ {
+		if to < in.buffer[idx].index { // found the first large inflight
+			break
+		}
+		bytes += in.buffer[idx].bytes
+
+		// increase index and maybe rotate
+		size := in.size
+		if idx++; idx >= size {
+			idx -= size
+		}
+	}
+	// free i inflights and set new start index
+	in.count -= i
+	in.bytes -= bytes
+	in.start = idx
+	if in.count == 0 {
+		// inflights is empty, reset the start index so that we don't grow the
+		// buffer unnecessarily.
+		in.start = 0
+	}
+}
+
+// Full returns true if no more messages can be sent at the moment.
+func (in *Inflights) Full() bool {
+	return in.count == in.size || (in.maxBytes != 0 && in.bytes >= in.maxBytes)
+}
+
+// Count returns the number of inflight messages.
+func (in *Inflights) Count() int { return in.count }
+
+// reset frees all inflights.
+func (in *Inflights) reset() {
+	in.start = 0
+	in.count = 0
+	in.bytes = 0
+}
diff --git a/vendor/go.etcd.io/raft/v3/tracker/progress.go b/vendor/go.etcd.io/raft/v3/tracker/progress.go
new file mode 100644
index 0000000..5716661
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/progress.go
@@ -0,0 +1,314 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+import (
+	"fmt"
+	"slices"
+	"strings"
+)
+
+// Progress represents a follower’s progress in the view of the leader. Leader
+// maintains progresses of all followers, and sends entries to the follower
+// based on its progress.
+//
+// NB(tbg): Progress is basically a state machine whose transitions are mostly
+// strewn around `*raft.raft`. Additionally, some fields are only used when in a
+// certain State. All of this isn't ideal.
+type Progress struct {
+	// Match is the index up to which the follower's log is known to match the
+	// leader's.
+	Match uint64
+	// Next is the log index of the next entry to send to this follower. All
+	// entries with indices in (Match, Next) interval are already in flight.
+	//
+	// Invariant: 0 <= Match < Next.
+	// NB: it follows that Next >= 1.
+	//
+	// In StateSnapshot, Next == PendingSnapshot + 1.
+	Next uint64
+
+	// sentCommit is the highest commit index in flight to the follower.
+	//
+	// Generally, it is monotonic, but con regress in some cases, e.g. when
+	// converting to `StateProbe` or when receiving a rejection from a follower.
+	//
+	// In StateSnapshot, sentCommit == PendingSnapshot == Next-1.
+	sentCommit uint64
+
+	// State defines how the leader should interact with the follower.
+	//
+	// When in StateProbe, leader sends at most one replication message
+	// per heartbeat interval. It also probes actual progress of the follower.
+	//
+	// When in StateReplicate, leader optimistically increases next
+	// to the latest entry sent after sending replication message. This is
+	// an optimized state for fast replicating log entries to the follower.
+	//
+	// When in StateSnapshot, leader should have sent out snapshot
+	// before and stops sending any replication message.
+	State StateType
+
+	// PendingSnapshot is used in StateSnapshot and tracks the last index of the
+	// leader at the time at which it realized a snapshot was necessary. This
+	// matches the index in the MsgSnap message emitted from raft.
+	//
+	// While there is a pending snapshot, replication to the follower is paused.
+	// The follower will transition back to StateReplicate if the leader
+	// receives an MsgAppResp from it that reconnects the follower to the
+	// leader's log (such an MsgAppResp is emitted when the follower applies a
+	// snapshot). It may be surprising that PendingSnapshot is not taken into
+	// account here, but consider that complex systems may delegate the sending
+	// of snapshots to alternative datasources (i.e. not the leader). In such
+	// setups, it is difficult to manufacture a snapshot at a particular index
+	// requested by raft and the actual index may be ahead or behind. This
+	// should be okay, as long as the snapshot allows replication to resume.
+	//
+	// The follower will transition to StateProbe if ReportSnapshot is called on
+	// the leader; if SnapshotFinish is passed then PendingSnapshot becomes the
+	// basis for the next attempt to append. In practice, the first mechanism is
+	// the one that is relevant in most cases. However, if this MsgAppResp is
+	// lost (fallible network) then the second mechanism ensures that in this
+	// case the follower does not erroneously remain in StateSnapshot.
+	PendingSnapshot uint64
+
+	// RecentActive is true if the progress is recently active. Receiving any messages
+	// from the corresponding follower indicates the progress is active.
+	// RecentActive can be reset to false after an election timeout.
+	// This is always true on the leader.
+	RecentActive bool
+
+	// MsgAppFlowPaused is used when the MsgApp flow to a node is throttled. This
+	// happens in StateProbe, or StateReplicate with saturated Inflights. In both
+	// cases, we need to continue sending MsgApp once in a while to guarantee
+	// progress, but we only do so when MsgAppFlowPaused is false (it is reset on
+	// receiving a heartbeat response), to not overflow the receiver. See
+	// IsPaused().
+	MsgAppFlowPaused bool
+
+	// Inflights is a sliding window for the inflight messages.
+	// Each inflight message contains one or more log entries.
+	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
+	// Thus inflight effectively limits both the number of inflight messages
+	// and the bandwidth each Progress can use.
+	// When inflights is Full, no more message should be sent.
+	// When a leader sends out a message, the index of the last
+	// entry should be added to inflights. The index MUST be added
+	// into inflights in order.
+	// When a leader receives a reply, the previous inflights should
+	// be freed by calling inflights.FreeLE with the index of the last
+	// received entry.
+	Inflights *Inflights
+
+	// IsLearner is true if this progress is tracked for a learner.
+	IsLearner bool
+}
+
+// ResetState moves the Progress into the specified State, resetting MsgAppFlowPaused,
+// PendingSnapshot, and Inflights.
+func (pr *Progress) ResetState(state StateType) {
+	pr.MsgAppFlowPaused = false
+	pr.PendingSnapshot = 0
+	pr.State = state
+	pr.Inflights.reset()
+}
+
+// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or,
+// optionally and if larger, the index of the pending snapshot.
+func (pr *Progress) BecomeProbe() {
+	// If the original state is StateSnapshot, progress knows that
+	// the pending snapshot has been sent to this peer successfully, then
+	// probes from pendingSnapshot + 1.
+	if pr.State == StateSnapshot {
+		pendingSnapshot := pr.PendingSnapshot
+		pr.ResetState(StateProbe)
+		pr.Next = max(pr.Match+1, pendingSnapshot+1)
+	} else {
+		pr.ResetState(StateProbe)
+		pr.Next = pr.Match + 1
+	}
+	pr.sentCommit = min(pr.sentCommit, pr.Next-1)
+}
+
+// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1.
+func (pr *Progress) BecomeReplicate() {
+	pr.ResetState(StateReplicate)
+	pr.Next = pr.Match + 1
+}
+
+// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending
+// snapshot index.
+func (pr *Progress) BecomeSnapshot(snapshoti uint64) {
+	pr.ResetState(StateSnapshot)
+	pr.PendingSnapshot = snapshoti
+	pr.Next = snapshoti + 1
+	pr.sentCommit = snapshoti
+}
+
+// SentEntries updates the progress on the given number of consecutive entries
+// being sent in a MsgApp, with the given total bytes size, appended at log
+// indices >= pr.Next.
+//
+// Must be used with StateProbe or StateReplicate.
+func (pr *Progress) SentEntries(entries int, bytes uint64) {
+	switch pr.State {
+	case StateReplicate:
+		if entries > 0 {
+			pr.Next += uint64(entries)
+			pr.Inflights.Add(pr.Next-1, bytes)
+		}
+		// If this message overflows the in-flights tracker, or it was already full,
+		// consider this message being a probe, so that the flow is paused.
+		pr.MsgAppFlowPaused = pr.Inflights.Full()
+	case StateProbe:
+		// TODO(pavelkalinnikov): this condition captures the previous behaviour,
+		// but we should set MsgAppFlowPaused unconditionally for simplicity, because any
+		// MsgApp in StateProbe is a probe, not only non-empty ones.
+		if entries > 0 {
+			pr.MsgAppFlowPaused = true
+		}
+	default:
+		panic(fmt.Sprintf("sending append in unhandled state %s", pr.State))
+	}
+}
+
+// CanBumpCommit returns true if sending the given commit index can potentially
+// advance the follower's commit index.
+func (pr *Progress) CanBumpCommit(index uint64) bool {
+	// Sending the given commit index may bump the follower's commit index up to
+	// Next-1 in normal operation, or higher in some rare cases. Allow sending a
+	// commit index eagerly only if we haven't already sent one that bumps the
+	// follower's commit all the way to Next-1.
+	return index > pr.sentCommit && pr.sentCommit < pr.Next-1
+}
+
+// SentCommit updates the sentCommit.
+func (pr *Progress) SentCommit(commit uint64) {
+	pr.sentCommit = commit
+}
+
+// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the
+// index acked by it. The method returns false if the given n index comes from
+// an outdated message. Otherwise it updates the progress and returns true.
+func (pr *Progress) MaybeUpdate(n uint64) bool {
+	if n <= pr.Match {
+		return false
+	}
+	pr.Match = n
+	pr.Next = max(pr.Next, n+1) // invariant: Match < Next
+	pr.MsgAppFlowPaused = false
+	return true
+}
+
+// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The
+// arguments are the index of the append message rejected by the follower, and
+// the hint that we want to decrease to.
+//
+// Rejections can happen spuriously as messages are sent out of order or
+// duplicated. In such cases, the rejection pertains to an index that the
+// Progress already knows were previously acknowledged, and false is returned
+// without changing the Progress.
+//
+// If the rejection is genuine, Next is lowered sensibly, and the Progress is
+// cleared for sending log entries.
+func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool {
+	if pr.State == StateReplicate {
+		// The rejection must be stale if the progress has matched and "rejected"
+		// is smaller than "match".
+		if rejected <= pr.Match {
+			return false
+		}
+		// Directly decrease next to match + 1.
+		//
+		// TODO(tbg): why not use matchHint if it's larger?
+		pr.Next = pr.Match + 1
+		// Regress the sentCommit since it unlikely has been applied.
+		pr.sentCommit = min(pr.sentCommit, pr.Next-1)
+		return true
+	}
+
+	// The rejection must be stale if "rejected" does not match next - 1. This
+	// is because non-replicating followers are probed one entry at a time.
+	// The check is a best effort assuming message reordering is rare.
+	if pr.Next-1 != rejected {
+		return false
+	}
+
+	pr.Next = max(min(rejected, matchHint+1), pr.Match+1)
+	// Regress the sentCommit since it unlikely has been applied.
+	pr.sentCommit = min(pr.sentCommit, pr.Next-1)
+	pr.MsgAppFlowPaused = false
+	return true
+}
+
+// IsPaused returns whether sending log entries to this node has been throttled.
+// This is done when a node has rejected recent MsgApps, is currently waiting
+// for a snapshot, or has reached the MaxInflightMsgs limit. In normal
+// operation, this is false. A throttled node will be contacted less frequently
+// until it has reached a state in which it's able to accept a steady stream of
+// log entries again.
+func (pr *Progress) IsPaused() bool {
+	switch pr.State {
+	case StateProbe:
+		return pr.MsgAppFlowPaused
+	case StateReplicate:
+		return pr.MsgAppFlowPaused
+	case StateSnapshot:
+		return true
+	default:
+		panic("unexpected state")
+	}
+}
+
+func (pr *Progress) String() string {
+	var buf strings.Builder
+	fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next)
+	if pr.IsLearner {
+		fmt.Fprint(&buf, " learner")
+	}
+	if pr.IsPaused() {
+		fmt.Fprint(&buf, " paused")
+	}
+	if pr.PendingSnapshot > 0 {
+		fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot)
+	}
+	if !pr.RecentActive {
+		fmt.Fprint(&buf, " inactive")
+	}
+	if n := pr.Inflights.Count(); n > 0 {
+		fmt.Fprintf(&buf, " inflight=%d", n)
+		if pr.Inflights.Full() {
+			fmt.Fprint(&buf, "[full]")
+		}
+	}
+	return buf.String()
+}
+
+// ProgressMap is a map of *Progress.
+type ProgressMap map[uint64]*Progress
+
+// String prints the ProgressMap in sorted key order, one Progress per line.
+func (m ProgressMap) String() string {
+	ids := make([]uint64, 0, len(m))
+	for k := range m {
+		ids = append(ids, k)
+	}
+	slices.Sort(ids)
+	var buf strings.Builder
+	for _, id := range ids {
+		fmt.Fprintf(&buf, "%d: %s\n", id, m[id])
+	}
+	return buf.String()
+}
diff --git a/vendor/go.etcd.io/raft/v3/tracker/state.go b/vendor/go.etcd.io/raft/v3/tracker/state.go
new file mode 100644
index 0000000..7dbdd63
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/state.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+// StateType is the state of a tracked follower.
+type StateType uint64
+
+const (
+	// StateProbe indicates a follower whose last index isn't known. Such a
+	// follower is "probed" (i.e. an append sent periodically) to narrow down
+	// its last index. In the ideal (and common) case, only one round of probing
+	// is necessary as the follower will react with a hint. Followers that are
+	// probed over extended periods of time are often offline.
+	StateProbe StateType = iota
+	// StateReplicate is the state steady in which a follower eagerly receives
+	// log entries to append to its log.
+	StateReplicate
+	// StateSnapshot indicates a follower that needs log entries not available
+	// from the leader's Raft log. Such a follower needs a full snapshot to
+	// return to StateReplicate.
+	StateSnapshot
+)
+
+var prstmap = [...]string{
+	"StateProbe",
+	"StateReplicate",
+	"StateSnapshot",
+}
+
+func (st StateType) String() string { return prstmap[st] }
diff --git a/vendor/go.etcd.io/raft/v3/tracker/tracker.go b/vendor/go.etcd.io/raft/v3/tracker/tracker.go
new file mode 100644
index 0000000..17c4c93
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/tracker.go
@@ -0,0 +1,281 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+import (
+	"fmt"
+	"slices"
+	"strings"
+
+	"go.etcd.io/raft/v3/quorum"
+	pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// Config reflects the configuration tracked in a ProgressTracker.
+type Config struct {
+	Voters quorum.JointConfig
+	// AutoLeave is true if the configuration is joint and a transition to the
+	// incoming configuration should be carried out automatically by Raft when
+	// this is possible. If false, the configuration will be joint until the
+	// application initiates the transition manually.
+	AutoLeave bool
+	// Learners is a set of IDs corresponding to the learners active in the
+	// current configuration.
+	//
+	// Invariant: Learners and Voters does not intersect, i.e. if a peer is in
+	// either half of the joint config, it can't be a learner; if it is a
+	// learner it can't be in either half of the joint config. This invariant
+	// simplifies the implementation since it allows peers to have clarity about
+	// its current role without taking into account joint consensus.
+	Learners map[uint64]struct{}
+	// When we turn a voter into a learner during a joint consensus transition,
+	// we cannot add the learner directly when entering the joint state. This is
+	// because this would violate the invariant that the intersection of
+	// voters and learners is empty. For example, assume a Voter is removed and
+	// immediately re-added as a learner (or in other words, it is demoted):
+	//
+	// Initially, the configuration will be
+	//
+	//   voters:   {1 2 3}
+	//   learners: {}
+	//
+	// and we want to demote 3. Entering the joint configuration, we naively get
+	//
+	//   voters:   {1 2} & {1 2 3}
+	//   learners: {3}
+	//
+	// but this violates the invariant (3 is both voter and learner). Instead,
+	// we get
+	//
+	//   voters:   {1 2} & {1 2 3}
+	//   learners: {}
+	//   next_learners: {3}
+	//
+	// Where 3 is now still purely a voter, but we are remembering the intention
+	// to make it a learner upon transitioning into the final configuration:
+	//
+	//   voters:   {1 2}
+	//   learners: {3}
+	//   next_learners: {}
+	//
+	// Note that next_learners is not used while adding a learner that is not
+	// also a voter in the joint config. In this case, the learner is added
+	// right away when entering the joint configuration, so that it is caught up
+	// as soon as possible.
+	LearnersNext map[uint64]struct{}
+}
+
+func (c Config) String() string {
+	var buf strings.Builder
+	fmt.Fprintf(&buf, "voters=%s", c.Voters)
+	if c.Learners != nil {
+		fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String())
+	}
+	if c.LearnersNext != nil {
+		fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String())
+	}
+	if c.AutoLeave {
+		fmt.Fprint(&buf, " autoleave")
+	}
+	return buf.String()
+}
+
+// Clone returns a copy of the Config that shares no memory with the original.
+func (c *Config) Clone() Config {
+	clone := func(m map[uint64]struct{}) map[uint64]struct{} {
+		if m == nil {
+			return nil
+		}
+		mm := make(map[uint64]struct{}, len(m))
+		for k := range m {
+			mm[k] = struct{}{}
+		}
+		return mm
+	}
+	return Config{
+		Voters:       quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])},
+		Learners:     clone(c.Learners),
+		LearnersNext: clone(c.LearnersNext),
+	}
+}
+
+// ProgressTracker tracks the currently active configuration and the information
+// known about the nodes and learners in it. In particular, it tracks the match
+// index for each peer which in turn allows reasoning about the committed index.
+type ProgressTracker struct {
+	Config
+
+	Progress ProgressMap
+
+	Votes map[uint64]bool
+
+	MaxInflight      int
+	MaxInflightBytes uint64
+}
+
+// MakeProgressTracker initializes a ProgressTracker.
+func MakeProgressTracker(maxInflight int, maxBytes uint64) ProgressTracker {
+	p := ProgressTracker{
+		MaxInflight:      maxInflight,
+		MaxInflightBytes: maxBytes,
+		Config: Config{
+			Voters: quorum.JointConfig{
+				quorum.MajorityConfig{},
+				nil, // only populated when used
+			},
+			Learners:     nil, // only populated when used
+			LearnersNext: nil, // only populated when used
+		},
+		Votes:    map[uint64]bool{},
+		Progress: map[uint64]*Progress{},
+	}
+	return p
+}
+
+// ConfState returns a ConfState representing the active configuration.
+func (p *ProgressTracker) ConfState() pb.ConfState {
+	return pb.ConfState{
+		Voters:         p.Voters[0].Slice(),
+		VotersOutgoing: p.Voters[1].Slice(),
+		Learners:       quorum.MajorityConfig(p.Learners).Slice(),
+		LearnersNext:   quorum.MajorityConfig(p.LearnersNext).Slice(),
+		AutoLeave:      p.AutoLeave,
+	}
+}
+
+// IsSingleton returns true if (and only if) there is only one voting member
+// (i.e. the leader) in the current configuration.
+func (p *ProgressTracker) IsSingleton() bool {
+	return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0
+}
+
+type matchAckIndexer map[uint64]*Progress
+
+var _ quorum.AckedIndexer = matchAckIndexer(nil)
+
+// AckedIndex implements IndexLookuper.
+func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) {
+	pr, ok := l[id]
+	if !ok {
+		return 0, false
+	}
+	return quorum.Index(pr.Match), true
+}
+
+// Committed returns the largest log index known to be committed based on what
+// the voting members of the group have acknowledged.
+func (p *ProgressTracker) Committed() uint64 {
+	return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress)))
+}
+
+// Visit invokes the supplied closure for all tracked progresses in stable order.
+func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) {
+	n := len(p.Progress)
+	// We need to sort the IDs and don't want to allocate since this is hot code.
+	// The optimization here mirrors that in `(MajorityConfig).CommittedIndex`,
+	// see there for details.
+	var sl [7]uint64
+	var ids []uint64
+	if len(sl) >= n {
+		ids = sl[:n]
+	} else {
+		ids = make([]uint64, n)
+	}
+	for id := range p.Progress {
+		n--
+		ids[n] = id
+	}
+	slices.Sort(ids)
+	for _, id := range ids {
+		f(id, p.Progress[id])
+	}
+}
+
+// QuorumActive returns true if the quorum is active from the view of the local
+// raft state machine. Otherwise, it returns false.
+func (p *ProgressTracker) QuorumActive() bool {
+	votes := map[uint64]bool{}
+	p.Visit(func(id uint64, pr *Progress) {
+		if pr.IsLearner {
+			return
+		}
+		votes[id] = pr.RecentActive
+	})
+
+	return p.Voters.VoteResult(votes) == quorum.VoteWon
+}
+
+// VoterNodes returns a sorted slice of voters.
+func (p *ProgressTracker) VoterNodes() []uint64 {
+	m := p.Voters.IDs()
+	nodes := make([]uint64, 0, len(m))
+	for id := range m {
+		nodes = append(nodes, id)
+	}
+	slices.Sort(nodes)
+	return nodes
+}
+
+// LearnerNodes returns a sorted slice of learners.
+func (p *ProgressTracker) LearnerNodes() []uint64 {
+	if len(p.Learners) == 0 {
+		return nil
+	}
+	nodes := make([]uint64, 0, len(p.Learners))
+	for id := range p.Learners {
+		nodes = append(nodes, id)
+	}
+	slices.Sort(nodes)
+	return nodes
+}
+
+// ResetVotes prepares for a new round of vote counting via recordVote.
+func (p *ProgressTracker) ResetVotes() {
+	p.Votes = map[uint64]bool{}
+}
+
+// RecordVote records that the node with the given id voted for this Raft
+// instance if v == true (and declined it otherwise).
+func (p *ProgressTracker) RecordVote(id uint64, v bool) {
+	_, ok := p.Votes[id]
+	if !ok {
+		p.Votes[id] = v
+	}
+}
+
+// TallyVotes returns the number of granted and rejected Votes, and whether the
+// election outcome is known.
+func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) {
+	// Make sure to populate granted/rejected correctly even if the Votes slice
+	// contains members no longer part of the configuration. This doesn't really
+	// matter in the way the numbers are used (they're informational), but might
+	// as well get it right.
+	for id, pr := range p.Progress {
+		if pr.IsLearner {
+			continue
+		}
+		v, voted := p.Votes[id]
+		if !voted {
+			continue
+		}
+		if v {
+			granted++
+		} else {
+			rejected++
+		}
+	}
+	result := p.Voters.VoteResult(p.Votes)
+	return granted, rejected, result
+}
diff --git a/vendor/go.etcd.io/raft/v3/types.go b/vendor/go.etcd.io/raft/v3/types.go
new file mode 100644
index 0000000..bb24d43
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/types.go
@@ -0,0 +1,106 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// entryID uniquely identifies a raft log entry.
+//
+// Every entry is associated with a leadership term which issued this entry and
+// initially appended it to the log. There can only be one leader at any term,
+// and a leader never issues two entries with the same index.
+type entryID struct {
+	term  uint64
+	index uint64
+}
+
+// pbEntryID returns the ID of the given pb.Entry.
+func pbEntryID(entry *pb.Entry) entryID {
+	return entryID{term: entry.Term, index: entry.Index}
+}
+
+// logSlice describes a correct slice of a raft log.
+//
+// Every log slice is considered in a context of a specific leader term. This
+// term does not necessarily match entryID.term of the entries, since a leader
+// log contains both entries from its own term, and some earlier terms.
+//
+// Two slices with a matching logSlice.term are guaranteed to be consistent,
+// i.e. they never contain two different entries at the same index. The reverse
+// is not true: two slices with different logSlice.term may contain both
+// matching and mismatching entries. Specifically, logs at two different leader
+// terms share a common prefix, after which they *permanently* diverge.
+//
+// A well-formed logSlice conforms to raft safety properties. It provides the
+// following guarantees:
+//
+//  1. entries[i].Index == prev.index + 1 + i,
+//  2. prev.term <= entries[0].Term,
+//  3. entries[i-1].Term <= entries[i].Term,
+//  4. entries[len-1].Term <= term.
+//
+// Property (1) means the slice is contiguous. Properties (2) and (3) mean that
+// the terms of the entries in a log never regress. Property (4) means that a
+// leader log at a specific term never has entries from higher terms.
+//
+// Users of this struct can assume the invariants hold true. Exception is the
+// "gateway" code that initially constructs logSlice, such as when its content
+// is sourced from a message that was received via transport, or from Storage,
+// or in a test code that manually hard-codes this struct. In these cases, the
+// invariants should be validated using the valid() method.
+type logSlice struct {
+	// term is the leader term containing the given entries in its log.
+	term uint64
+	// prev is the ID of the entry immediately preceding the entries.
+	prev entryID
+	// entries contains the consecutive entries representing this slice.
+	entries []pb.Entry
+}
+
+// lastIndex returns the index of the last entry in this log slice. Returns
+// prev.index if there are no entries.
+func (s logSlice) lastIndex() uint64 {
+	return s.prev.index + uint64(len(s.entries))
+}
+
+// lastEntryID returns the ID of the last entry in this log slice, or prev if
+// there are no entries.
+func (s logSlice) lastEntryID() entryID {
+	if ln := len(s.entries); ln != 0 {
+		return pbEntryID(&s.entries[ln-1])
+	}
+	return s.prev
+}
+
+// valid returns nil iff the logSlice is a well-formed log slice. See logSlice
+// comment for details on what constitutes a valid raft log slice.
+func (s logSlice) valid() error {
+	prev := s.prev
+	for i := range s.entries {
+		id := pbEntryID(&s.entries[i])
+		if id.term < prev.term || id.index != prev.index+1 {
+			return fmt.Errorf("leader term %d: entries %+v and %+v not consistent", s.term, prev, id)
+		}
+		prev = id
+	}
+	if s.term < prev.term {
+		return fmt.Errorf("leader term %d: entry %+v has a newer term", s.term, prev)
+	}
+	return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/util.go b/vendor/go.etcd.io/raft/v3/util.go
new file mode 100644
index 0000000..8f78178
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/util.go
@@ -0,0 +1,325 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+
+	pb "go.etcd.io/raft/v3/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+var isLocalMsg = [...]bool{
+	pb.MsgHup:               true,
+	pb.MsgBeat:              true,
+	pb.MsgUnreachable:       true,
+	pb.MsgSnapStatus:        true,
+	pb.MsgCheckQuorum:       true,
+	pb.MsgStorageAppend:     true,
+	pb.MsgStorageAppendResp: true,
+	pb.MsgStorageApply:      true,
+	pb.MsgStorageApplyResp:  true,
+}
+
+var isResponseMsg = [...]bool{
+	pb.MsgAppResp:           true,
+	pb.MsgVoteResp:          true,
+	pb.MsgHeartbeatResp:     true,
+	pb.MsgUnreachable:       true,
+	pb.MsgReadIndexResp:     true,
+	pb.MsgPreVoteResp:       true,
+	pb.MsgStorageAppendResp: true,
+	pb.MsgStorageApplyResp:  true,
+}
+
+func isMsgInArray(msgt pb.MessageType, arr []bool) bool {
+	i := int(msgt)
+	return i < len(arr) && arr[i]
+}
+
+func IsLocalMsg(msgt pb.MessageType) bool {
+	return isMsgInArray(msgt, isLocalMsg[:])
+}
+
+func IsResponseMsg(msgt pb.MessageType) bool {
+	return isMsgInArray(msgt, isResponseMsg[:])
+}
+
+func IsLocalMsgTarget(id uint64) bool {
+	return id == LocalAppendThread || id == LocalApplyThread
+}
+
+// voteResponseType maps vote and prevote message types to their corresponding responses.
+func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
+	switch msgt {
+	case pb.MsgVote:
+		return pb.MsgVoteResp
+	case pb.MsgPreVote:
+		return pb.MsgPreVoteResp
+	default:
+		panic(fmt.Sprintf("not a vote message: %s", msgt))
+	}
+}
+
+func DescribeHardState(hs pb.HardState) string {
+	var buf strings.Builder
+	fmt.Fprintf(&buf, "Term:%d", hs.Term)
+	if hs.Vote != 0 {
+		fmt.Fprintf(&buf, " Vote:%d", hs.Vote)
+	}
+	fmt.Fprintf(&buf, " Commit:%d", hs.Commit)
+	return buf.String()
+}
+
+func DescribeSoftState(ss SoftState) string {
+	return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState)
+}
+
+func DescribeConfState(state pb.ConfState) string {
+	return fmt.Sprintf(
+		"Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v",
+		state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave,
+	)
+}
+
+func DescribeSnapshot(snap pb.Snapshot) string {
+	m := snap.Metadata
+	return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState))
+}
+
+func DescribeReady(rd Ready, f EntryFormatter) string {
+	var buf strings.Builder
+	if rd.SoftState != nil {
+		fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState))
+		buf.WriteByte('\n')
+	}
+	if !IsEmptyHardState(rd.HardState) {
+		fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState))
+		buf.WriteByte('\n')
+	}
+	if len(rd.ReadStates) > 0 {
+		fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates)
+	}
+	if len(rd.Entries) > 0 {
+		buf.WriteString("Entries:\n")
+		fmt.Fprint(&buf, DescribeEntries(rd.Entries, f))
+	}
+	if !IsEmptySnap(rd.Snapshot) {
+		fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot))
+	}
+	if len(rd.CommittedEntries) > 0 {
+		buf.WriteString("CommittedEntries:\n")
+		fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f))
+	}
+	if len(rd.Messages) > 0 {
+		buf.WriteString("Messages:\n")
+		for _, msg := range rd.Messages {
+			fmt.Fprint(&buf, DescribeMessage(msg, f))
+			buf.WriteByte('\n')
+		}
+	}
+	if buf.Len() > 0 {
+		return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String())
+	}
+	return "<empty Ready>"
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+	return describeMessageWithIndent("", m, f)
+}
+
+func describeMessageWithIndent(indent string, m pb.Message, f EntryFormatter) string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%s%s->%s %v Term:%d Log:%d/%d", indent,
+		describeTarget(m.From), describeTarget(m.To), m.Type, m.Term, m.LogTerm, m.Index)
+	if m.Reject {
+		fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint)
+	}
+	if m.Commit != 0 {
+		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+	}
+	if m.Vote != 0 {
+		fmt.Fprintf(&buf, " Vote:%d", m.Vote)
+	}
+	if ln := len(m.Entries); ln == 1 {
+		fmt.Fprintf(&buf, " Entries:[%s]", DescribeEntry(m.Entries[0], f))
+	} else if ln > 1 {
+		fmt.Fprint(&buf, " Entries:[")
+		for _, e := range m.Entries {
+			fmt.Fprintf(&buf, "\n%s  ", indent)
+			buf.WriteString(DescribeEntry(e, f))
+		}
+		fmt.Fprintf(&buf, "\n%s]", indent)
+	}
+	if s := m.Snapshot; s != nil && !IsEmptySnap(*s) {
+		fmt.Fprintf(&buf, "\n%s  Snapshot: %s", indent, DescribeSnapshot(*s))
+	}
+	if len(m.Responses) > 0 {
+		fmt.Fprintf(&buf, " Responses:[")
+		for _, m := range m.Responses {
+			buf.WriteString("\n")
+			buf.WriteString(describeMessageWithIndent(indent+"  ", m, f))
+		}
+		fmt.Fprintf(&buf, "\n%s]", indent)
+	}
+	return buf.String()
+}
+
+func describeTarget(id uint64) string {
+	switch id {
+	case None:
+		return "None"
+	case LocalAppendThread:
+		return "AppendThread"
+	case LocalApplyThread:
+		return "ApplyThread"
+	default:
+		return fmt.Sprintf("%x", id)
+	}
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+	if f == nil {
+		f = func(data []byte) string { return fmt.Sprintf("%q", data) }
+	}
+
+	formatConfChange := func(cc pb.ConfChangeI) string {
+		// TODO(tbg): give the EntryFormatter a type argument so that it gets
+		// a chance to expose the Context.
+		return pb.ConfChangesToString(cc.AsV2().Changes)
+	}
+
+	var formatted string
+	switch e.Type {
+	case pb.EntryNormal:
+		formatted = f(e.Data)
+	case pb.EntryConfChange:
+		var cc pb.ConfChange
+		if err := cc.Unmarshal(e.Data); err != nil {
+			formatted = err.Error()
+		} else {
+			formatted = formatConfChange(cc)
+		}
+	case pb.EntryConfChangeV2:
+		var cc pb.ConfChangeV2
+		if err := cc.Unmarshal(e.Data); err != nil {
+			formatted = err.Error()
+		} else {
+			formatted = formatConfChange(cc)
+		}
+	}
+	if formatted != "" {
+		formatted = " " + formatted
+	}
+	return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted)
+}
+
+// DescribeEntries calls DescribeEntry for each Entry, adding a newline to
+// each.
+func DescribeEntries(ents []pb.Entry, f EntryFormatter) string {
+	var buf bytes.Buffer
+	for _, e := range ents {
+		_, _ = buf.WriteString(DescribeEntry(e, f) + "\n")
+	}
+	return buf.String()
+}
+
+// entryEncodingSize represents the protocol buffer encoding size of one or more
+// entries.
+type entryEncodingSize uint64
+
+func entsSize(ents []pb.Entry) entryEncodingSize {
+	var size entryEncodingSize
+	for _, ent := range ents {
+		size += entryEncodingSize(ent.Size())
+	}
+	return size
+}
+
+// limitSize returns the longest prefix of the given entries slice, such that
+// its total byte size does not exceed maxSize. Always returns a non-empty slice
+// if the input is non-empty, so, as an exception, if the size of the first
+// entry exceeds maxSize, a non-empty slice with just this entry is returned.
+func limitSize(ents []pb.Entry, maxSize entryEncodingSize) []pb.Entry {
+	if len(ents) == 0 {
+		return ents
+	}
+	size := ents[0].Size()
+	for limit := 1; limit < len(ents); limit++ {
+		size += ents[limit].Size()
+		if entryEncodingSize(size) > maxSize {
+			return ents[:limit]
+		}
+	}
+	return ents
+}
+
+// entryPayloadSize represents the size of one or more entries' payloads.
+// Notably, it does not depend on its Index or Term. Entries with empty
+// payloads, like those proposed after a leadership change, are considered
+// to be zero size.
+type entryPayloadSize uint64
+
+// payloadSize is the size of the payload of the provided entry.
+func payloadSize(e pb.Entry) entryPayloadSize {
+	return entryPayloadSize(len(e.Data))
+}
+
+// payloadsSize is the size of the payloads of the provided entries.
+func payloadsSize(ents []pb.Entry) entryPayloadSize {
+	var s entryPayloadSize
+	for _, e := range ents {
+		s += payloadSize(e)
+	}
+	return s
+}
+
+func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) {
+	err := cs1.Equivalent(cs2)
+	if err == nil {
+		return
+	}
+	l.Panic(err)
+}
+
+// extend appends vals to the given dst slice. It differs from the standard
+// slice append only in the way it allocates memory. If cap(dst) is not enough
+// for appending the values, precisely size len(dst)+len(vals) is allocated.
+//
+// Use this instead of standard append in situations when this is the last
+// append to dst, so there is no sense in allocating more than needed.
+func extend(dst, vals []pb.Entry) []pb.Entry {
+	need := len(dst) + len(vals)
+	if need <= cap(dst) {
+		return append(dst, vals...) // does not allocate
+	}
+	buf := make([]pb.Entry, need, need) // allocates precisely what's needed
+	copy(buf, dst)
+	copy(buf[len(dst):], vals)
+	return buf
+}