blob: e2a261cd7b80aac2ef2d8fb51f983c9f89afd55b [file] [log] [blame]
Abhay Kumar40252eb2025-10-13 13:25:53 +00001// Copyright 2015 The etcd Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package raft
16
17import (
18 "context"
19 "errors"
20
21 pb "go.etcd.io/raft/v3/raftpb"
22)
23
24type SnapshotStatus int
25
26const (
27 SnapshotFinish SnapshotStatus = 1
28 SnapshotFailure SnapshotStatus = 2
29)
30
31var (
32 emptyState = pb.HardState{}
33
34 // ErrStopped is returned by methods on Nodes that have been stopped.
35 ErrStopped = errors.New("raft: stopped")
36)
37
38// SoftState provides state that is useful for logging and debugging.
39// The state is volatile and does not need to be persisted to the WAL.
40type SoftState struct {
41 Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
42 RaftState StateType
43}
44
45func (a *SoftState) equal(b *SoftState) bool {
46 return a.Lead == b.Lead && a.RaftState == b.RaftState
47}
48
49// Ready encapsulates the entries and messages that are ready to read,
50// be saved to stable storage, committed or sent to other peers.
51// All fields in Ready are read-only.
52type Ready struct {
53 // The current volatile state of a Node.
54 // SoftState will be nil if there is no update.
55 // It is not required to consume or store SoftState.
56 *SoftState
57
58 // The current state of a Node to be saved to stable storage BEFORE
59 // Messages are sent.
60 //
61 // HardState will be equal to empty state if there is no update.
62 //
63 // If async storage writes are enabled, this field does not need to be acted
64 // on immediately. It will be reflected in a MsgStorageAppend message in the
65 // Messages slice.
66 pb.HardState
67
68 // ReadStates can be used for node to serve linearizable read requests locally
69 // when its applied index is greater than the index in ReadState.
70 // Note that the readState will be returned when raft receives msgReadIndex.
71 // The returned is only valid for the request that requested to read.
72 ReadStates []ReadState
73
74 // Entries specifies entries to be saved to stable storage BEFORE
75 // Messages are sent.
76 //
77 // If async storage writes are enabled, this field does not need to be acted
78 // on immediately. It will be reflected in a MsgStorageAppend message in the
79 // Messages slice.
80 Entries []pb.Entry
81
82 // Snapshot specifies the snapshot to be saved to stable storage.
83 //
84 // If async storage writes are enabled, this field does not need to be acted
85 // on immediately. It will be reflected in a MsgStorageAppend message in the
86 // Messages slice.
87 Snapshot pb.Snapshot
88
89 // CommittedEntries specifies entries to be committed to a
90 // store/state-machine. These have previously been appended to stable
91 // storage.
92 //
93 // If async storage writes are enabled, this field does not need to be acted
94 // on immediately. It will be reflected in a MsgStorageApply message in the
95 // Messages slice.
96 CommittedEntries []pb.Entry
97
98 // Messages specifies outbound messages.
99 //
100 // If async storage writes are not enabled, these messages must be sent
101 // AFTER Entries are appended to stable storage.
102 //
103 // If async storage writes are enabled, these messages can be sent
104 // immediately as the messages that have the completion of the async writes
105 // as a precondition are attached to the individual MsgStorage{Append,Apply}
106 // messages instead.
107 //
108 // If it contains a MsgSnap message, the application MUST report back to raft
109 // when the snapshot has been received or has failed by calling ReportSnapshot.
110 Messages []pb.Message
111
112 // MustSync indicates whether the HardState and Entries must be durably
113 // written to disk or if a non-durable write is permissible.
114 MustSync bool
115}
116
117func isHardStateEqual(a, b pb.HardState) bool {
118 return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
119}
120
121// IsEmptyHardState returns true if the given HardState is empty.
122func IsEmptyHardState(st pb.HardState) bool {
123 return isHardStateEqual(st, emptyState)
124}
125
126// IsEmptySnap returns true if the given Snapshot is empty.
127func IsEmptySnap(sp pb.Snapshot) bool {
128 return sp.Metadata.Index == 0
129}
130
131// Node represents a node in a raft cluster.
132type Node interface {
133 // Tick increments the internal logical clock for the Node by a single tick. Election
134 // timeouts and heartbeat timeouts are in units of ticks.
135 Tick()
136 // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
137 Campaign(ctx context.Context) error
138 // Propose proposes that data be appended to the log. Note that proposals can be lost without
139 // notice, therefore it is user's job to ensure proposal retries.
140 Propose(ctx context.Context, data []byte) error
141 // ProposeConfChange proposes a configuration change. Like any proposal, the
142 // configuration change may be dropped with or without an error being
143 // returned. In particular, configuration changes are dropped unless the
144 // leader has certainty that there is no prior unapplied configuration
145 // change in its log.
146 //
147 // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
148 // message. The latter allows arbitrary configuration changes via joint
149 // consensus, notably including replacing a voter. Passing a ConfChangeV2
150 // message is only allowed if all Nodes participating in the cluster run a
151 // version of this library aware of the V2 API. See pb.ConfChangeV2 for
152 // usage details and semantics.
153 ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
154
155 // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
156 Step(ctx context.Context, msg pb.Message) error
157
158 // Ready returns a channel that returns the current point-in-time state.
159 // Users of the Node must call Advance after retrieving the state returned by Ready (unless
160 // async storage writes is enabled, in which case it should never be called).
161 //
162 // NOTE: No committed entries from the next Ready may be applied until all committed entries
163 // and snapshots from the previous one have finished.
164 Ready() <-chan Ready
165
166 // Advance notifies the Node that the application has saved progress up to the last Ready.
167 // It prepares the node to return the next available Ready.
168 //
169 // The application should generally call Advance after it applies the entries in last Ready.
170 //
171 // However, as an optimization, the application may call Advance while it is applying the
172 // commands. For example. when the last Ready contains a snapshot, the application might take
173 // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
174 // progress, it can call Advance before finishing applying the last ready.
175 //
176 // NOTE: Advance must not be called when using AsyncStorageWrites. Response messages from the
177 // local append and apply threads take its place.
178 Advance()
179 // ApplyConfChange applies a config change (previously passed to
180 // ProposeConfChange) to the node. This must be called whenever a config
181 // change is observed in Ready.CommittedEntries, except when the app decides
182 // to reject the configuration change (i.e. treats it as a noop instead), in
183 // which case it must not be called.
184 //
185 // Returns an opaque non-nil ConfState protobuf which must be recorded in
186 // snapshots.
187 ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
188
189 // TransferLeadership attempts to transfer leadership to the given transferee.
190 TransferLeadership(ctx context.Context, lead, transferee uint64)
191
192 // ForgetLeader forgets a follower's current leader, changing it to None. It
193 // remains a leaderless follower in the current term, without campaigning.
194 //
195 // This is useful with PreVote+CheckQuorum, where followers will normally not
196 // grant pre-votes if they've heard from the leader in the past election
197 // timeout interval. Leaderless followers can grant pre-votes immediately, so
198 // if a quorum of followers have strong reason to believe the leader is dead
199 // (for example via a side-channel or external failure detector) and forget it
200 // then they can elect a new leader immediately, without waiting out the
201 // election timeout. They will also revert to normal followers if they hear
202 // from the leader again, or transition to candidates on an election timeout.
203 //
204 // For example, consider a three-node cluster where 1 is the leader and 2+3
205 // have just received a heartbeat from it. If 2 and 3 believe the leader has
206 // now died (maybe they know that an orchestration system shut down 1's VM),
207 // we can instruct 2 to forget the leader and 3 to campaign. 2 will then be
208 // able to grant 3's pre-vote and elect 3 as leader immediately (normally 2
209 // would reject the vote until an election timeout passes because it has heard
210 // from the leader recently). However, 3 can not campaign unilaterally, a
211 // quorum have to agree that the leader is dead, which avoids disrupting the
212 // leader if individual nodes are wrong about it being dead.
213 //
214 // This does nothing with ReadOnlyLeaseBased, since it would allow a new
215 // leader to be elected without the old leader knowing.
216 ForgetLeader(ctx context.Context) error
217
218 // ReadIndex request a read state. The read state will be set in the ready.
219 // Read state has a read index. Once the application advances further than the read
220 // index, any linearizable read requests issued before the read request can be
221 // processed safely. The read state will have the same rctx attached.
222 // Note that request can be lost without notice, therefore it is user's job
223 // to ensure read index retries.
224 ReadIndex(ctx context.Context, rctx []byte) error
225
226 // Status returns the current status of the raft state machine.
227 Status() Status
228 // ReportUnreachable reports the given node is not reachable for the last send.
229 ReportUnreachable(id uint64)
230 // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
231 // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
232 // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
233 // snapshot (for e.g., while streaming it from leader to follower), should be reported to the
234 // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
235 // log probes until the follower can apply the snapshot and advance its state. If the follower
236 // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
237 // updates from the leader. Therefore, it is crucial that the application ensures that any
238 // failure in snapshot sending is caught and reported back to the leader; so it can resume raft
239 // log probing in the follower.
240 ReportSnapshot(id uint64, status SnapshotStatus)
241 // Stop performs any necessary termination of the Node.
242 Stop()
243}
244
245type Peer struct {
246 ID uint64
247 Context []byte
248}
249
250func setupNode(c *Config, peers []Peer) *node {
251 if len(peers) == 0 {
252 panic("no peers given; use RestartNode instead")
253 }
254 rn, err := NewRawNode(c)
255 if err != nil {
256 panic(err)
257 }
258 err = rn.Bootstrap(peers)
259 if err != nil {
260 c.Logger.Warningf("error occurred during starting a new node: %v", err)
261 }
262
263 n := newNode(rn)
264 return &n
265}
266
267// StartNode returns a new Node given configuration and a list of raft peers.
268// It appends a ConfChangeAddNode entry for each given peer to the initial log.
269//
270// Peers must not be zero length; call RestartNode in that case.
271func StartNode(c *Config, peers []Peer) Node {
272 n := setupNode(c, peers)
273 go n.run()
274 return n
275}
276
277// RestartNode is similar to StartNode but does not take a list of peers.
278// The current membership of the cluster will be restored from the Storage.
279// If the caller has an existing state machine, pass in the last log index that
280// has been applied to it; otherwise use zero.
281func RestartNode(c *Config) Node {
282 rn, err := NewRawNode(c)
283 if err != nil {
284 panic(err)
285 }
286 n := newNode(rn)
287 go n.run()
288 return &n
289}
290
291type msgWithResult struct {
292 m pb.Message
293 result chan error
294}
295
296// node is the canonical implementation of the Node interface
297type node struct {
298 propc chan msgWithResult
299 recvc chan pb.Message
300 confc chan pb.ConfChangeV2
301 confstatec chan pb.ConfState
302 readyc chan Ready
303 advancec chan struct{}
304 tickc chan struct{}
305 done chan struct{}
306 stop chan struct{}
307 status chan chan Status
308
309 rn *RawNode
310}
311
312func newNode(rn *RawNode) node {
313 return node{
314 propc: make(chan msgWithResult),
315 recvc: make(chan pb.Message),
316 confc: make(chan pb.ConfChangeV2),
317 confstatec: make(chan pb.ConfState),
318 readyc: make(chan Ready),
319 advancec: make(chan struct{}),
320 // make tickc a buffered chan, so raft node can buffer some ticks when the node
321 // is busy processing raft messages. Raft node will resume process buffered
322 // ticks when it becomes idle.
323 tickc: make(chan struct{}, 128),
324 done: make(chan struct{}),
325 stop: make(chan struct{}),
326 status: make(chan chan Status),
327 rn: rn,
328 }
329}
330
331func (n *node) Stop() {
332 select {
333 case n.stop <- struct{}{}:
334 // Not already stopped, so trigger it
335 case <-n.done:
336 // Node has already been stopped - no need to do anything
337 return
338 }
339 // Block until the stop has been acknowledged by run()
340 <-n.done
341}
342
343func (n *node) run() {
344 var propc chan msgWithResult
345 var readyc chan Ready
346 var advancec chan struct{}
347 var rd Ready
348
349 r := n.rn.raft
350
351 lead := None
352
353 for {
354 if advancec == nil && n.rn.HasReady() {
355 // Populate a Ready. Note that this Ready is not guaranteed to
356 // actually be handled. We will arm readyc, but there's no guarantee
357 // that we will actually send on it. It's possible that we will
358 // service another channel instead, loop around, and then populate
359 // the Ready again. We could instead force the previous Ready to be
360 // handled first, but it's generally good to emit larger Readys plus
361 // it simplifies testing (by emitting less frequently and more
362 // predictably).
363 rd = n.rn.readyWithoutAccept()
364 readyc = n.readyc
365 }
366
367 if lead != r.lead {
368 if r.hasLeader() {
369 if lead == None {
370 r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
371 } else {
372 r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
373 }
374 propc = n.propc
375 } else {
376 r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
377 propc = nil
378 }
379 lead = r.lead
380 }
381
382 select {
383 // TODO: maybe buffer the config propose if there exists one (the way
384 // described in raft dissertation)
385 // Currently it is dropped in Step silently.
386 case pm := <-propc:
387 m := pm.m
388 m.From = r.id
389 err := r.Step(m)
390 if pm.result != nil {
391 pm.result <- err
392 close(pm.result)
393 }
394 case m := <-n.recvc:
395 if IsResponseMsg(m.Type) && !IsLocalMsgTarget(m.From) && r.trk.Progress[m.From] == nil {
396 // Filter out response message from unknown From.
397 break
398 }
399 r.Step(m)
400 case cc := <-n.confc:
401 _, okBefore := r.trk.Progress[r.id]
402 cs := r.applyConfChange(cc)
403 // If the node was removed, block incoming proposals. Note that we
404 // only do this if the node was in the config before. Nodes may be
405 // a member of the group without knowing this (when they're catching
406 // up on the log and don't have the latest config) and we don't want
407 // to block the proposal channel in that case.
408 //
409 // NB: propc is reset when the leader changes, which, if we learn
410 // about it, sort of implies that we got readded, maybe? This isn't
411 // very sound and likely has bugs.
412 if _, okAfter := r.trk.Progress[r.id]; okBefore && !okAfter {
413 var found bool
414 for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
415 for _, id := range sl {
416 if id == r.id {
417 found = true
418 break
419 }
420 }
421 if found {
422 break
423 }
424 }
425 if !found {
426 propc = nil
427 }
428 }
429 select {
430 case n.confstatec <- cs:
431 case <-n.done:
432 }
433 case <-n.tickc:
434 n.rn.Tick()
435 case readyc <- rd:
436 n.rn.acceptReady(rd)
437 if !n.rn.asyncStorageWrites {
438 advancec = n.advancec
439 } else {
440 rd = Ready{}
441 }
442 readyc = nil
443 case <-advancec:
444 n.rn.Advance(rd)
445 rd = Ready{}
446 advancec = nil
447 case c := <-n.status:
448 c <- getStatus(r)
449 case <-n.stop:
450 close(n.done)
451 return
452 }
453 }
454}
455
456// Tick increments the internal logical clock for this Node. Election timeouts
457// and heartbeat timeouts are in units of ticks.
458func (n *node) Tick() {
459 select {
460 case n.tickc <- struct{}{}:
461 case <-n.done:
462 default:
463 n.rn.raft.logger.Warningf("%x A tick missed to fire. Node blocks too long!", n.rn.raft.id)
464 }
465}
466
467func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
468
469func (n *node) Propose(ctx context.Context, data []byte) error {
470 return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
471}
472
473func (n *node) Step(ctx context.Context, m pb.Message) error {
474 // Ignore unexpected local messages receiving over network.
475 if IsLocalMsg(m.Type) && !IsLocalMsgTarget(m.From) {
476 // TODO: return an error?
477 return nil
478 }
479 return n.step(ctx, m)
480}
481
482func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
483 typ, data, err := pb.MarshalConfChange(c)
484 if err != nil {
485 return pb.Message{}, err
486 }
487 return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
488}
489
490func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
491 msg, err := confChangeToMsg(cc)
492 if err != nil {
493 return err
494 }
495 return n.Step(ctx, msg)
496}
497
498func (n *node) step(ctx context.Context, m pb.Message) error {
499 return n.stepWithWaitOption(ctx, m, false)
500}
501
502func (n *node) stepWait(ctx context.Context, m pb.Message) error {
503 return n.stepWithWaitOption(ctx, m, true)
504}
505
506// Step advances the state machine using msgs. The ctx.Err() will be returned,
507// if any.
508func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
509 if m.Type != pb.MsgProp {
510 select {
511 case n.recvc <- m:
512 return nil
513 case <-ctx.Done():
514 return ctx.Err()
515 case <-n.done:
516 return ErrStopped
517 }
518 }
519 ch := n.propc
520 pm := msgWithResult{m: m}
521 if wait {
522 pm.result = make(chan error, 1)
523 }
524 select {
525 case ch <- pm:
526 if !wait {
527 return nil
528 }
529 case <-ctx.Done():
530 return ctx.Err()
531 case <-n.done:
532 return ErrStopped
533 }
534 select {
535 case err := <-pm.result:
536 if err != nil {
537 return err
538 }
539 case <-ctx.Done():
540 return ctx.Err()
541 case <-n.done:
542 return ErrStopped
543 }
544 return nil
545}
546
547func (n *node) Ready() <-chan Ready { return n.readyc }
548
549func (n *node) Advance() {
550 select {
551 case n.advancec <- struct{}{}:
552 case <-n.done:
553 }
554}
555
556func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
557 var cs pb.ConfState
558 select {
559 case n.confc <- cc.AsV2():
560 case <-n.done:
561 }
562 select {
563 case cs = <-n.confstatec:
564 case <-n.done:
565 }
566 return &cs
567}
568
569func (n *node) Status() Status {
570 c := make(chan Status)
571 select {
572 case n.status <- c:
573 return <-c
574 case <-n.done:
575 return Status{}
576 }
577}
578
579func (n *node) ReportUnreachable(id uint64) {
580 select {
581 case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
582 case <-n.done:
583 }
584}
585
586func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
587 rej := status == SnapshotFailure
588
589 select {
590 case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
591 case <-n.done:
592 }
593}
594
595func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
596 select {
597 // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
598 case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
599 case <-n.done:
600 case <-ctx.Done():
601 }
602}
603
604func (n *node) ForgetLeader(ctx context.Context) error {
605 return n.step(ctx, pb.Message{Type: pb.MsgForgetLeader})
606}
607
608func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
609 return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
610}