rippled
Loading...
Searching...
No Matches
Consensus_test.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012-2016 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <test/csf.h>
21#include <test/unit_test/SuiteJournal.h>
22
23#include <xrpld/consensus/Consensus.h>
24
25#include <xrpl/beast/unit_test.h>
26#include <xrpl/json/to_string.h>
27
28namespace ripple {
29namespace test {
30
32{
34
35public:
36 Consensus_test() : journal_("Consensus_test", *this)
37 {
38 }
39
40 void
42 {
43 using namespace std::chrono_literals;
44 testcase("should close ledger");
45
46 // Use default parameters
47 ConsensusParms const p{};
48
49 // Bizarre times forcibly close
50 BEAST_EXPECT(shouldCloseLedger(
51 true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
52 BEAST_EXPECT(shouldCloseLedger(
53 true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
54 BEAST_EXPECT(shouldCloseLedger(
55 true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
56
57 // Rest of network has closed
58 BEAST_EXPECT(
59 shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
60
61 // No transactions means wait until end of internval
62 BEAST_EXPECT(
63 !shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
64 BEAST_EXPECT(
65 shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
66
67 // Enforce minimum ledger open time
68 BEAST_EXPECT(
69 !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
70
71 // Don't go too much faster than last time
72 BEAST_EXPECT(
73 !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
74
75 BEAST_EXPECT(
76 shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
77 }
78
79 void
81 {
82 using namespace std::chrono_literals;
83 testcase("check consensus");
84
85 // Use default parameterss
86 ConsensusParms const p{};
87
89 // Disputes still in doubt
90 //
91 // Not enough time has elapsed
92 BEAST_EXPECT(
94 checkConsensus(10, 2, 2, 0, 3s, 2s, false, p, true, journal_));
95
96 // If not enough peers have propsed, ensure
97 // more time for proposals
98 BEAST_EXPECT(
100 checkConsensus(10, 2, 2, 0, 3s, 4s, false, p, true, journal_));
101
102 // Enough time has elapsed and we all agree
103 BEAST_EXPECT(
105 checkConsensus(10, 2, 2, 0, 3s, 10s, false, p, true, journal_));
106
107 // Enough time has elapsed and we don't yet agree
108 BEAST_EXPECT(
110 checkConsensus(10, 2, 1, 0, 3s, 10s, false, p, true, journal_));
111
112 // Our peers have moved on
113 // Enough time has elapsed and we all agree
114 BEAST_EXPECT(
116 checkConsensus(10, 2, 1, 8, 3s, 10s, false, p, true, journal_));
117
118 // If no peers, don't agree until time has passed.
119 BEAST_EXPECT(
121 checkConsensus(0, 0, 0, 0, 3s, 10s, false, p, true, journal_));
122
123 // Agree if no peers and enough time has passed.
124 BEAST_EXPECT(
126 checkConsensus(0, 0, 0, 0, 3s, 16s, false, p, true, journal_));
127
128 // Expire if too much time has passed without agreement
129 BEAST_EXPECT(
131 checkConsensus(10, 8, 1, 0, 1s, 19s, false, p, true, journal_));
132
134 // Stalled
135 //
136 // Not enough time has elapsed
137 BEAST_EXPECT(
139 checkConsensus(10, 2, 2, 0, 3s, 2s, true, p, true, journal_));
140
141 // If not enough peers have propsed, ensure
142 // more time for proposals
143 BEAST_EXPECT(
145 checkConsensus(10, 2, 2, 0, 3s, 4s, true, p, true, journal_));
146
147 // Enough time has elapsed and we all agree
148 BEAST_EXPECT(
150 checkConsensus(10, 2, 2, 0, 3s, 10s, true, p, true, journal_));
151
152 // Enough time has elapsed and we don't yet agree, but there's nothing
153 // left to dispute
154 BEAST_EXPECT(
156 checkConsensus(10, 2, 1, 0, 3s, 10s, true, p, true, journal_));
157
158 // Our peers have moved on
159 // Enough time has elapsed and we all agree, nothing left to dispute
160 BEAST_EXPECT(
162 checkConsensus(10, 2, 1, 8, 3s, 10s, true, p, true, journal_));
163
164 // If no peers, don't agree until time has passed.
165 BEAST_EXPECT(
167 checkConsensus(0, 0, 0, 0, 3s, 10s, true, p, true, journal_));
168
169 // Agree if no peers and enough time has passed.
170 BEAST_EXPECT(
172 checkConsensus(0, 0, 0, 0, 3s, 16s, true, p, true, journal_));
173
174 // We are done if there's nothing left to dispute, no matter how much
175 // time has passed
176 BEAST_EXPECT(
178 checkConsensus(10, 8, 1, 0, 1s, 19s, true, p, true, journal_));
179 }
180
181 void
183 {
184 using namespace std::chrono_literals;
185 using namespace csf;
186 testcase("standalone");
187
188 Sim s;
189 PeerGroup peers = s.createGroup(1);
190 Peer* peer = peers[0];
191 peer->targetLedgers = 1;
192 peer->start();
193 peer->submit(Tx{1});
194
195 s.scheduler.step();
196
197 // Inspect that the proper ledger was created
198 auto const& lcl = peer->lastClosedLedger;
199 BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
200 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
201 BEAST_EXPECT(lcl.txs().size() == 1);
202 BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
203 BEAST_EXPECT(peer->prevProposers == 0);
204 }
205
206 void
208 {
209 using namespace csf;
210 using namespace std::chrono;
211 testcase("peers agree");
212
213 ConsensusParms const parms{};
214 Sim sim;
215 PeerGroup peers = sim.createGroup(5);
216
217 // Connected trust and network graphs with single fixed delay
218 peers.trustAndConnect(
219 peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
220
221 // everyone submits their own ID as a TX
222 for (Peer* p : peers)
223 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
224
225 sim.run(1);
226
227 // All peers are in sync
228 if (BEAST_EXPECT(sim.synchronized()))
229 {
230 for (Peer const* peer : peers)
231 {
232 auto const& lcl = peer->lastClosedLedger;
233 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
234 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
235 // All peers proposed
236 BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
237 // All transactions were accepted
238 for (std::uint32_t i = 0; i < peers.size(); ++i)
239 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
240 }
241 }
242 }
243
244 void
246 {
247 using namespace csf;
248 using namespace std::chrono;
249 testcase("slow peers");
250
251 // Several tests of a complete trust graph with a subset of peers
252 // that have significantly longer network delays to the rest of the
253 // network
254
255 // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
256 {
257 ConsensusParms const parms{};
258 Sim sim;
259 PeerGroup slow = sim.createGroup(1);
260 PeerGroup fast = sim.createGroup(4);
261 PeerGroup network = fast + slow;
262
263 // Fully connected trust graph
264 network.trust(network);
265
266 // Fast and slow network connections
267 fast.connect(
268 fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
269
270 slow.connect(
271 network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
272
273 // All peers submit their own ID as a transaction
274 for (Peer* peer : network)
275 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
276
277 sim.run(1);
278
279 // Verify all peers have same LCL but are missing transaction 0
280 // All peers are in sync even with a slower peer 0
281 if (BEAST_EXPECT(sim.synchronized()))
282 {
283 for (Peer* peer : network)
284 {
285 auto const& lcl = peer->lastClosedLedger;
286 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
287 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
288
289 BEAST_EXPECT(peer->prevProposers == network.size() - 1);
290 BEAST_EXPECT(
291 peer->prevRoundTime == network[0]->prevRoundTime);
292
293 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
294 for (std::uint32_t i = 2; i < network.size(); ++i)
295 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
296
297 // Tx 0 didn't make it
298 BEAST_EXPECT(
299 peer->openTxs.find(Tx{0}) != peer->openTxs.end());
300 }
301 }
302 }
303
304 // Test when the slow peers delay a consensus quorum (4/6 agree)
305 {
306 // Run two tests
307 // 1. The slow peers are participating in consensus
308 // 2. The slow peers are just observing
309
310 for (auto isParticipant : {true, false})
311 {
312 ConsensusParms const parms{};
313
314 Sim sim;
315 PeerGroup slow = sim.createGroup(2);
316 PeerGroup fast = sim.createGroup(4);
317 PeerGroup network = fast + slow;
318
319 // Connected trust graph
320 network.trust(network);
321
322 // Fast and slow network connections
323 fast.connect(
324 fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
325
326 slow.connect(
327 network,
328 round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
329
330 for (Peer* peer : slow)
331 peer->runAsValidator = isParticipant;
332
333 // All peers submit their own ID as a transaction and relay it
334 // to peers
335 for (Peer* peer : network)
336 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
337
338 sim.run(1);
339
340 if (BEAST_EXPECT(sim.synchronized()))
341 {
342 // Verify all peers have same LCL but are missing
343 // transaction 0,1 which was not received by all peers
344 // before the ledger closed
345 for (Peer* peer : network)
346 {
347 // Closed ledger has all but transaction 0,1
348 auto const& lcl = peer->lastClosedLedger;
349 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
350 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
351 BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
352 for (std::uint32_t i = slow.size(); i < network.size();
353 ++i)
354 BEAST_EXPECT(
355 lcl.txs().find(Tx{i}) != lcl.txs().end());
356
357 // Tx 0-1 didn't make it
358 BEAST_EXPECT(
359 peer->openTxs.find(Tx{0}) != peer->openTxs.end());
360 BEAST_EXPECT(
361 peer->openTxs.find(Tx{1}) != peer->openTxs.end());
362 }
363
364 Peer const* slowPeer = slow[0];
365 if (isParticipant)
366 BEAST_EXPECT(
367 slowPeer->prevProposers == network.size() - 1);
368 else
369 BEAST_EXPECT(slowPeer->prevProposers == fast.size());
370
371 for (Peer* peer : fast)
372 {
373 // Due to the network link delay settings
374 // Peer 0 initially proposes {0}
375 // Peer 1 initially proposes {1}
376 // Peers 2-5 initially propose {2,3,4,5}
377 // Since peers 2-5 agree, 4/6 > the initial 50% needed
378 // to include a disputed transaction, so Peer 0/1 switch
379 // to agree with those peers. Peer 0/1 then closes with
380 // an 80% quorum of agreeing positions (5/6) match.
381 //
382 // Peers 2-5 do not change position, since tx 0 or tx 1
383 // have less than the 50% initial threshold. They also
384 // cannot declare consensus, since 4/6 agreeing
385 // positions are < 80% threshold. They therefore need an
386 // additional timerEntry call to see the updated
387 // positions from Peer 0 & 1.
388
389 if (isParticipant)
390 {
391 BEAST_EXPECT(
392 peer->prevProposers == network.size() - 1);
393 BEAST_EXPECT(
394 peer->prevRoundTime > slowPeer->prevRoundTime);
395 }
396 else
397 {
398 BEAST_EXPECT(
399 peer->prevProposers == fast.size() - 1);
400 // so all peers should have closed together
401 BEAST_EXPECT(
402 peer->prevRoundTime == slowPeer->prevRoundTime);
403 }
404 }
405 }
406 }
407 }
408 }
409
410 void
412 {
413 using namespace csf;
414 using namespace std::chrono;
415 testcase("close time disagree");
416
417 // This is a very specialized test to get ledgers to disagree on
418 // the close time. It unfortunately assumes knowledge about current
419 // timing constants. This is a necessary evil to get coverage up
420 // pending more extensive refactorings of timing constants.
421
422 // In order to agree-to-disagree on the close time, there must be no
423 // clear majority of nodes agreeing on a close time. This test
424 // sets a relative offset to the peers internal clocks so that they
425 // send proposals with differing times.
426
427 // However, agreement is on the effective close time, not the
428 // exact close time. The minimum closeTimeResolution is given by
429 // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
430 // the skews need to be at least 10 seconds to have different effective
431 // close times.
432
433 // Complicating this matter is that nodes will ignore proposals
434 // with times more than proposeFRESHNESS =20s in the past. So at
435 // the minimum granularity, we have at most 3 types of skews
436 // (0s,10s,20s).
437
438 // This test therefore has 6 nodes, with 2 nodes having each type of
439 // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
440 // actual close time.
441
442 ConsensusParms const parms{};
443 Sim sim;
444
445 PeerGroup groupA = sim.createGroup(2);
446 PeerGroup groupB = sim.createGroup(2);
447 PeerGroup groupC = sim.createGroup(2);
448 PeerGroup network = groupA + groupB + groupC;
449
450 network.trust(network);
451 network.connect(
452 network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
453
454 // Run consensus without skew until we have a short close time
455 // resolution
456 Peer* firstPeer = *groupA.begin();
457 while (firstPeer->lastClosedLedger.closeTimeResolution() >=
458 parms.proposeFRESHNESS)
459 sim.run(1);
460
461 // Introduce a shift on the time of 2/3 of peers
462 for (Peer* peer : groupA)
463 peer->clockSkew = parms.proposeFRESHNESS / 2;
464 for (Peer* peer : groupB)
465 peer->clockSkew = parms.proposeFRESHNESS;
466
467 sim.run(1);
468
469 // All nodes agreed to disagree on the close time
470 if (BEAST_EXPECT(sim.synchronized()))
471 {
472 for (Peer* peer : network)
473 BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
474 }
475 }
476
477 void
479 {
480 using namespace csf;
481 using namespace std::chrono;
482 testcase("wrong LCL");
483
484 // Specialized test to exercise a temporary fork in which some peers
485 // are working on an incorrect prior ledger.
486
487 ConsensusParms const parms{};
488
489 // Vary the time it takes to process validations to exercise detecting
490 // the wrong LCL at different phases of consensus
491 for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
492 {
493 // Consider 10 peers:
494 // 0 1 2 3 4 5 6 7 8 9
495 // minority majorityA majorityB
496 //
497 // Nodes 0-1 trust nodes 0-4
498 // Nodes 2-9 trust nodes 2-9
499 //
500 // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
501 // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
502 // nodes will instead accept the ledger with tx 1.
503
504 // Nodes 0-1 will detect this mismatch during a subsequent round
505 // since nodes 2-4 will validate a different ledger.
506
507 // Nodes 0-1 will acquire the proper ledger from the network and
508 // resume consensus and eventually generate the dominant network
509 // ledger.
510
511 // This topology can potentially fork with the above trust relations
512 // but that is intended for this test.
513
514 Sim sim;
515
516 PeerGroup minority = sim.createGroup(2);
517 PeerGroup majorityA = sim.createGroup(3);
518 PeerGroup majorityB = sim.createGroup(5);
519
520 PeerGroup majority = majorityA + majorityB;
521 PeerGroup network = minority + majority;
522
523 SimDuration delay =
524 round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
525 minority.trustAndConnect(minority + majorityA, delay);
526 majority.trustAndConnect(majority, delay);
527
528 CollectByNode<JumpCollector> jumps;
529 sim.collectors.add(jumps);
530
531 BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
532
533 // initial round to set prior state
534 sim.run(1);
535
536 // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
537 // tx 1
538 for (Peer* peer : network)
539 peer->delays.recvValidation = validationDelay;
540 for (Peer* peer : (minority + majorityA))
541 peer->openTxs.insert(Tx{0});
542 for (Peer* peer : majorityB)
543 peer->openTxs.insert(Tx{1});
544
545 // Run for additional rounds
546 // With no validation delay, only 2 more rounds are needed.
547 // 1. Round to generate different ledgers
548 // 2. Round to detect different prior ledgers (but still generate
549 // wrong ones) and recover within that round since wrong LCL
550 // is detected before we close
551 //
552 // With a validation delay of ledgerMIN_CLOSE, we need 3 more
553 // rounds.
554 // 1. Round to generate different ledgers
555 // 2. Round to detect different prior ledgers (but still generate
556 // wrong ones) but end up declaring consensus on wrong LCL (but
557 // with the right transaction set!). This is because we detect
558 // the wrong LCL after we have closed the ledger, so we declare
559 // consensus based solely on our peer proposals. But we haven't
560 // had time to acquire the right ledger.
561 // 3. Round to correct
562 sim.run(3);
563
564 // The network never actually forks, since node 0-1 never see a
565 // quorum of validations to fully validate the incorrect chain.
566
567 // However, for a non zero-validation delay, the network is not
568 // synchronized because nodes 0 and 1 are running one ledger behind
569 if (BEAST_EXPECT(sim.branches() == 1))
570 {
571 for (Peer const* peer : majority)
572 {
573 // No jumps for majority nodes
574 BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
575 BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
576 }
577 for (Peer const* peer : minority)
578 {
579 auto& peerJumps = jumps[peer->id];
580 // last closed ledger jump between chains
581 {
582 if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
583 {
584 JumpCollector::Jump const& jump =
585 peerJumps.closeJumps.front();
586 // Jump is to a different chain
587 BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
588 BEAST_EXPECT(!jump.to.isAncestor(jump.from));
589 }
590 }
591 // fully validated jump forward in same chain
592 {
593 if (BEAST_EXPECT(
594 peerJumps.fullyValidatedJumps.size() == 1))
595 {
596 JumpCollector::Jump const& jump =
597 peerJumps.fullyValidatedJumps.front();
598 // Jump is to a different chain with same seq
599 BEAST_EXPECT(jump.from.seq() < jump.to.seq());
600 BEAST_EXPECT(jump.to.isAncestor(jump.from));
601 }
602 }
603 }
604 }
605 }
606
607 {
608 // Additional test engineered to switch LCL during the establish
609 // phase. This was added to trigger a scenario that previously
610 // crashed, in which switchLCL switched from establish to open
611 // phase, but still processed the establish phase logic.
612
613 // Loner node will accept an initial ledger A, but all other nodes
614 // accept ledger B a bit later. By delaying the time it takes
615 // to process a validation, loner node will detect the wrongLCL
616 // after it is already in the establish phase of the next round.
617
618 Sim sim;
619 PeerGroup loner = sim.createGroup(1);
620 PeerGroup friends = sim.createGroup(3);
621 loner.trust(loner + friends);
622
623 PeerGroup others = sim.createGroup(6);
624 PeerGroup clique = friends + others;
625 clique.trust(clique);
626
627 PeerGroup network = loner + clique;
628 network.connect(
629 network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
630
631 // initial round to set prior state
632 sim.run(1);
633 for (Peer* peer : (loner + friends))
634 peer->openTxs.insert(Tx(0));
635 for (Peer* peer : others)
636 peer->openTxs.insert(Tx(1));
637
638 // Delay validation processing
639 for (Peer* peer : network)
640 peer->delays.recvValidation = parms.ledgerGRANULARITY;
641
642 // additional rounds to generate wrongLCL and recover
643 sim.run(2);
644
645 // Check all peers recovered
646 for (Peer* p : network)
647 BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
648 }
649 }
650
651 void
653 {
654 using namespace csf;
655 using namespace std::chrono;
656 testcase("consensus close time rounding");
657
658 // This is a specialized test engineered to yield ledgers with different
659 // close times even though the peers believe they had close time
660 // consensus on the ledger.
661 ConsensusParms parms;
662
663 Sim sim;
664
665 // This requires a group of 4 fast and 2 slow peers to create a
666 // situation in which a subset of peers requires seeing additional
667 // proposals to declare consensus.
668 PeerGroup slow = sim.createGroup(2);
669 PeerGroup fast = sim.createGroup(4);
670 PeerGroup network = fast + slow;
671
672 // Connected trust graph
673 network.trust(network);
674
675 // Fast and slow network connections
676 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
677 slow.connect(
678 network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
679
680 // Run to the ledger *prior* to decreasing the resolution
682
683 // In order to create the discrepency, we want a case where if
684 // X = effCloseTime(closeTime, resolution, parentCloseTime)
685 // X != effCloseTime(X, resolution, parentCloseTime)
686 //
687 // That is, the effective close time is not a fixed point. This can
688 // happen if X = parentCloseTime + 1, but a subsequent rounding goes
689 // to the next highest multiple of resolution.
690
691 // So we want to find an offset (now + offset) % 30s = 15
692 // (now + offset) % 20s = 15
693 // This way, the next ledger will close and round up Due to the
694 // network delay settings, the round of consensus will take 5s, so
695 // the next ledger's close time will
696
697 NetClock::duration when = network[0]->now().time_since_epoch();
698
699 // Check we are before the 30s to 20s transition
700 NetClock::duration resolution =
701 network[0]->lastClosedLedger.closeTimeResolution();
702 BEAST_EXPECT(resolution == NetClock::duration{30s});
703
704 while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
705 ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
706 when += 1s;
707 // Advance the clock without consensus running (IS THIS WHAT
708 // PREVENTS IT IN PRACTICE?)
709 sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
710
711 // Run one more ledger with 30s resolution
712 sim.run(1);
713 if (BEAST_EXPECT(sim.synchronized()))
714 {
715 // close time should be ahead of clock time since we engineered
716 // the close time to round up
717 for (Peer* peer : network)
718 {
719 BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
720 BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
721 }
722 }
723
724 // All peers submit their own ID as a transaction
725 for (Peer* peer : network)
726 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
727
728 // Run 1 more round, this time it will have a decreased
729 // resolution of 20 seconds.
730
731 // The network delays are engineered so that the slow peers
732 // initially have the wrong tx hash, but they see a majority
733 // of agreement from their peers and declare consensus
734 //
735 // The trick is that everyone starts with a raw close time of
736 // 84681s
737 // Which has
738 // effCloseTime(86481s, 20s, 86490s) = 86491s
739 // However, when the slow peers update their position, they change
740 // the close time to 86451s. The fast peers declare consensus with
741 // the 86481s as their position still.
742 //
743 // When accepted the ledger
744 // - fast peers use eff(86481s) -> 86491s as the close time
745 // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
746
747 sim.run(1);
748
749 BEAST_EXPECT(sim.synchronized());
750 }
751
752 void
754 {
755 using namespace csf;
756 using namespace std::chrono;
757 testcase("fork");
758
759 std::uint32_t numPeers = 10;
760 // Vary overlap between two UNLs
761 for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
762 {
763 ConsensusParms const parms{};
764 Sim sim;
765
766 std::uint32_t numA = (numPeers - overlap) / 2;
767 std::uint32_t numB = numPeers - numA - overlap;
768
769 PeerGroup aOnly = sim.createGroup(numA);
770 PeerGroup bOnly = sim.createGroup(numB);
771 PeerGroup commonOnly = sim.createGroup(overlap);
772
773 PeerGroup a = aOnly + commonOnly;
774 PeerGroup b = bOnly + commonOnly;
775
776 PeerGroup network = a + b;
777
778 SimDuration delay =
779 round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
780 a.trustAndConnect(a, delay);
781 b.trustAndConnect(b, delay);
782
783 // Initial round to set prior state
784 sim.run(1);
785 for (Peer* peer : network)
786 {
787 // Nodes have only seen transactions from their neighbors
788 peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
789 for (Peer* to : sim.trustGraph.trustedPeers(peer))
790 peer->openTxs.insert(
791 Tx{static_cast<std::uint32_t>(to->id)});
792 }
793 sim.run(1);
794
795 // Fork should not happen for 40% or greater overlap
796 // Since the overlapped nodes have a UNL that is the union of the
797 // two cliques, the maximum sized UNL list is the number of peers
798 if (overlap > 0.4 * numPeers)
799 BEAST_EXPECT(sim.synchronized());
800 else
801 {
802 // Even if we do fork, there shouldn't be more than 3 ledgers
803 // One for cliqueA, one for cliqueB and one for nodes in both
804 BEAST_EXPECT(sim.branches() <= 3);
805 }
806 }
807 }
808
809 void
811 {
812 using namespace csf;
813 using namespace std::chrono;
814 testcase("hub network");
815
816 // Simulate a set of 5 validators that aren't directly connected but
817 // rely on a single hub node for communication
818
819 ConsensusParms const parms{};
820 Sim sim;
821 PeerGroup validators = sim.createGroup(5);
822 PeerGroup center = sim.createGroup(1);
823 validators.trust(validators);
824 center.trust(validators);
825
826 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
827 validators.connect(center, delay);
828
829 center[0]->runAsValidator = false;
830
831 // prep round to set initial state.
832 sim.run(1);
833
834 // everyone submits their own ID as a TX and relay it to peers
835 for (Peer* p : validators)
836 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
837
838 sim.run(1);
839
840 // All peers are in sync
841 BEAST_EXPECT(sim.synchronized());
842 }
843
844 // Helper collector for testPreferredByBranch
845 // Invasively disconnects network at bad times to cause splits
847 {
852 bool reconnected = false;
853
855 csf::PeerGroup& net,
857 csf::PeerGroup& split,
859 : network(net), groupCfast(c), groupCsplit(split), delay(d)
860 {
861 }
862
863 template <class E>
864 void
866 {
867 }
868
869 void
871 {
872 using namespace std::chrono;
873 // As soon as the fastC node fully validates C, disconnect
874 // ALL c nodes from the network. The fast C node needs to disconnect
875 // as well to prevent it from relaying the validations it did see
876 if (who == groupCfast[0]->id &&
877 e.ledger.seq() == csf::Ledger::Seq{2})
878 {
879 network.disconnect(groupCsplit);
880 network.disconnect(groupCfast);
881 }
882 }
883
884 void
886 {
887 // As soon as anyone generates a child of B or C, reconnect the
888 // network so those validations make it through
889 if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
890 {
891 reconnected = true;
892 network.connect(groupCsplit, delay);
893 }
894 }
895 };
896
897 void
899 {
900 using namespace csf;
901 using namespace std::chrono;
902 testcase("preferred by branch");
903
904 // Simulate network splits that are prevented from forking when using
905 // preferred ledger by trie. This is a contrived example that involves
906 // excessive network splits, but demonstrates the safety improvement
907 // from the preferred ledger by trie approach.
908
909 // Consider 10 validating nodes that comprise a single common UNL
910 // Ledger history:
911 // 1: A
912 // _/ \_
913 // 2: B C
914 // _/ _/ \_
915 // 3: D C' |||||||| (8 different ledgers)
916
917 // - All nodes generate the common ledger A
918 // - 2 nodes generate B and 8 nodes generate C
919 // - Only 1 of the C nodes sees all the C validations and fully
920 // validates C. The rest of the C nodes split at just the right time
921 // such that they never see any C validations but their own.
922 // - The C nodes continue and generate 8 different child ledgers.
923 // - Meanwhile, the D nodes only saw 1 validation for C and 2
924 // validations
925 // for B.
926 // - The network reconnects and the validations for generation 3 ledgers
927 // are observed (D and the 8 C's)
928 // - In the old approach, 2 votes for D outweights 1 vote for each C'
929 // so the network would avalanche towards D and fully validate it
930 // EVEN though C was fully validated by one node
931 // - In the new approach, 2 votes for D are not enough to outweight the
932 // 8 implicit votes for C, so nodes will avalanche to C instead
933
934 ConsensusParms const parms{};
935 Sim sim;
936
937 // Goes A->B->D
938 PeerGroup groupABD = sim.createGroup(2);
939 // Single node that initially fully validates C before the split
940 PeerGroup groupCfast = sim.createGroup(1);
941 // Generates C, but fails to fully validate before the split
942 PeerGroup groupCsplit = sim.createGroup(7);
943
944 PeerGroup groupNotFastC = groupABD + groupCsplit;
945 PeerGroup network = groupABD + groupCsplit + groupCfast;
946
947 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
948 SimDuration fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
949
950 network.trust(network);
951 // C must have a shorter delay to see all the validations before the
952 // other nodes
953 network.connect(groupCfast, fDelay);
954 // The rest of the network is connected at the same speed
955 groupNotFastC.connect(groupNotFastC, delay);
956
957 Disruptor dc(network, groupCfast, groupCsplit, delay);
958 sim.collectors.add(dc);
959
960 // Consensus round to generate ledger A
961 sim.run(1);
962 BEAST_EXPECT(sim.synchronized());
963
964 // Next round generates B and C
965 // To force B, we inject an extra transaction in to those nodes
966 for (Peer* peer : groupABD)
967 {
968 peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
969 }
970 // The Disruptor will ensure that nodes disconnect before the C
971 // validations make it to all but the fastC node
972 sim.run(1);
973
974 // We are no longer in sync, but have not yet forked:
975 // 9 nodes consider A the last fully validated ledger and fastC sees C
976 BEAST_EXPECT(!sim.synchronized());
977 BEAST_EXPECT(sim.branches() == 1);
978
979 // Run another round to generate the 8 different C' ledgers
980 for (Peer* p : network)
981 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
982 sim.run(1);
983
984 // Still not forked
985 BEAST_EXPECT(!sim.synchronized());
986 BEAST_EXPECT(sim.branches() == 1);
987
988 // Disruptor will reconnect all but the fastC node
989 sim.run(1);
990
991 if (BEAST_EXPECT(sim.branches() == 1))
992 {
993 BEAST_EXPECT(sim.synchronized());
994 }
995 else // old approach caused a fork
996 {
997 BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
998 BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
999 }
1000 }
1001
1002 // Helper collector for testPauseForLaggards
1003 // This will remove the ledgerAccept delay used to
1004 // initially create the slow vs. fast validator groups.
1006 {
1008
1010 {
1011 }
1012
1013 template <class E>
1014 void
1016 {
1017 }
1018
1019 void
1021 {
1022 for (csf::Peer* p : g)
1023 {
1024 if (p->id == who)
1025 p->delays.ledgerAccept = std::chrono::seconds{0};
1026 }
1027 }
1028 };
1029
1030 void
1032 {
1033 using namespace csf;
1034 using namespace std::chrono;
1035 testcase("pause for laggards");
1036
1037 // Test that validators that jump ahead of the network slow
1038 // down.
1039
1040 // We engineer the following validated ledger history scenario:
1041 //
1042 // / --> B1 --> C1 --> ... -> G1 "ahead"
1043 // A
1044 // \ --> B2 --> C2 "behind"
1045 //
1046 // After validating a common ledger A, a set of "behind" validators
1047 // briefly run slower and validate the lower chain of ledgers.
1048 // The "ahead" validators run normal speed and run ahead validating the
1049 // upper chain of ledgers.
1050 //
1051 // Due to the uncommited support definition of the preferred branch
1052 // protocol, even if the "behind" validators are a majority, the "ahead"
1053 // validators cannot jump to the proper branch until the "behind"
1054 // validators catch up to the same sequence number. For this test to
1055 // succeed, the ahead validators need to briefly slow down consensus.
1056
1057 ConsensusParms const parms{};
1058 Sim sim;
1059 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
1060
1061 PeerGroup behind = sim.createGroup(3);
1062 PeerGroup ahead = sim.createGroup(2);
1063 PeerGroup network = ahead + behind;
1064
1065 hash_set<Peer::NodeKey_t> trustedKeys;
1066 for (Peer* p : network)
1067 trustedKeys.insert(p->key);
1068 for (Peer* p : network)
1069 p->trustedKeys = trustedKeys;
1070
1071 network.trustAndConnect(network, delay);
1072
1073 // Initial seed round to set prior state
1074 sim.run(1);
1075
1076 // Have the "behind" group initially take a really long time to
1077 // accept a ledger after ending deliberation
1078 for (Peer* p : behind)
1079 p->delays.ledgerAccept = 20s;
1080
1081 // Use the collector to revert the delay after the single
1082 // slow ledger is generated
1083 UndoDelay undoDelay{behind};
1084 sim.collectors.add(undoDelay);
1085
1086#if 0
1087 // Have all beast::journal output printed to stdout
1088 for (Peer* p : network)
1089 p->sink.threshold(beast::severities::kAll);
1090
1091 // Print ledger accept and fully validated events to stdout
1092 StreamCollector sc{std::cout};
1093 sim.collectors.add(sc);
1094#endif
1095 // Run the simulation for 100 seconds of simulation time with
1096 std::chrono::nanoseconds const simDuration = 100s;
1097
1098 // Simulate clients submitting 1 tx every 5 seconds to a random
1099 // validator
1100 Rate const rate{1, 5s};
1101 auto peerSelector = makeSelector(
1102 network.begin(),
1103 network.end(),
1104 std::vector<double>(network.size(), 1.),
1105 sim.rng);
1106 auto txSubmitter = makeSubmitter(
1107 ConstantDistribution{rate.inv()},
1108 sim.scheduler.now(),
1109 sim.scheduler.now() + simDuration,
1110 peerSelector,
1111 sim.scheduler,
1112 sim.rng);
1113
1114 // Run simulation
1115 sim.run(simDuration);
1116
1117 // Verify that the network recovered
1118 BEAST_EXPECT(sim.synchronized());
1119 }
1120
1121 void
1123 {
1124 testcase("disputes");
1125
1126 using namespace csf;
1127
1128 // Test dispute objects directly
1129 using Dispute = DisputedTx<Tx, PeerID>;
1130
1131 Tx const txTrue{99};
1132 Tx const txFalse{98};
1133 Tx const txFollowingTrue{97};
1134 Tx const txFollowingFalse{96};
1135 int const numPeers = 100;
1137 std::size_t peersUnchanged = 0;
1138
1140 auto j = logs->journal("Test");
1142
1143 // Three cases:
1144 // 1 proposing, initial vote yes
1145 // 2 proposing, initial vote no
1146 // 3 not proposing, initial vote doesn't matter after the first update,
1147 // use yes
1148 {
1149 Dispute proposingTrue{txTrue.id(), true, numPeers, journal_};
1150 Dispute proposingFalse{txFalse.id(), false, numPeers, journal_};
1151 Dispute followingTrue{
1152 txFollowingTrue.id(), true, numPeers, journal_};
1153 Dispute followingFalse{
1154 txFollowingFalse.id(), false, numPeers, journal_};
1155 BEAST_EXPECT(proposingTrue.ID() == 99);
1156 BEAST_EXPECT(proposingFalse.ID() == 98);
1157 BEAST_EXPECT(followingTrue.ID() == 97);
1158 BEAST_EXPECT(followingFalse.ID() == 96);
1159
1160 // Create an even split in the peer votes
1161 for (int i = 0; i < numPeers; ++i)
1162 {
1163 BEAST_EXPECT(proposingTrue.setVote(PeerID(i), i < 50));
1164 BEAST_EXPECT(proposingFalse.setVote(PeerID(i), i < 50));
1165 BEAST_EXPECT(followingTrue.setVote(PeerID(i), i < 50));
1166 BEAST_EXPECT(followingFalse.setVote(PeerID(i), i < 50));
1167 }
1168 // Switch the middle vote to match mine
1169 BEAST_EXPECT(proposingTrue.setVote(PeerID(50), true));
1170 BEAST_EXPECT(proposingFalse.setVote(PeerID(49), false));
1171 BEAST_EXPECT(followingTrue.setVote(PeerID(50), true));
1172 BEAST_EXPECT(followingFalse.setVote(PeerID(49), false));
1173
1174 // no changes yet
1175 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1176 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1177 BEAST_EXPECT(followingTrue.getOurVote() == true);
1178 BEAST_EXPECT(followingFalse.getOurVote() == false);
1179 BEAST_EXPECT(
1180 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1181 BEAST_EXPECT(
1182 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1183 BEAST_EXPECT(
1184 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1185 BEAST_EXPECT(
1186 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1187 BEAST_EXPECT(clog->str() == "");
1188
1189 // I'm in the majority, my vote should not change
1190 BEAST_EXPECT(!proposingTrue.updateVote(5, true, p));
1191 BEAST_EXPECT(!proposingFalse.updateVote(5, true, p));
1192 BEAST_EXPECT(!followingTrue.updateVote(5, false, p));
1193 BEAST_EXPECT(!followingFalse.updateVote(5, false, p));
1194
1195 BEAST_EXPECT(!proposingTrue.updateVote(10, true, p));
1196 BEAST_EXPECT(!proposingFalse.updateVote(10, true, p));
1197 BEAST_EXPECT(!followingTrue.updateVote(10, false, p));
1198 BEAST_EXPECT(!followingFalse.updateVote(10, false, p));
1199
1200 peersUnchanged = 2;
1201 BEAST_EXPECT(
1202 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1203 BEAST_EXPECT(
1204 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1205 BEAST_EXPECT(
1206 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1207 BEAST_EXPECT(
1208 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1209 BEAST_EXPECT(clog->str() == "");
1210
1211 // Right now, the vote is 51%. The requirement is about to jump to
1212 // 65%
1213 BEAST_EXPECT(proposingTrue.updateVote(55, true, p));
1214 BEAST_EXPECT(!proposingFalse.updateVote(55, true, p));
1215 BEAST_EXPECT(!followingTrue.updateVote(55, false, p));
1216 BEAST_EXPECT(!followingFalse.updateVote(55, false, p));
1217
1218 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1219 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1220 BEAST_EXPECT(followingTrue.getOurVote() == true);
1221 BEAST_EXPECT(followingFalse.getOurVote() == false);
1222 // 16 validators change their vote to match my original vote
1223 for (int i = 0; i < 16; ++i)
1224 {
1225 auto pTrue = PeerID(numPeers - i - 1);
1226 auto pFalse = PeerID(i);
1227 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1228 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1229 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1230 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1231 }
1232 // The vote should now be 66%, threshold is 65%
1233 BEAST_EXPECT(proposingTrue.updateVote(60, true, p));
1234 BEAST_EXPECT(!proposingFalse.updateVote(60, true, p));
1235 BEAST_EXPECT(!followingTrue.updateVote(60, false, p));
1236 BEAST_EXPECT(!followingFalse.updateVote(60, false, p));
1237
1238 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1239 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1240 BEAST_EXPECT(followingTrue.getOurVote() == true);
1241 BEAST_EXPECT(followingFalse.getOurVote() == false);
1242
1243 // Threshold jumps to 70%
1244 BEAST_EXPECT(proposingTrue.updateVote(86, true, p));
1245 BEAST_EXPECT(!proposingFalse.updateVote(86, true, p));
1246 BEAST_EXPECT(!followingTrue.updateVote(86, false, p));
1247 BEAST_EXPECT(!followingFalse.updateVote(86, false, p));
1248
1249 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1250 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1251 BEAST_EXPECT(followingTrue.getOurVote() == true);
1252 BEAST_EXPECT(followingFalse.getOurVote() == false);
1253
1254 // 5 more validators change their vote to match my original vote
1255 for (int i = 16; i < 21; ++i)
1256 {
1257 auto pTrue = PeerID(numPeers - i - 1);
1258 auto pFalse = PeerID(i);
1259 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1260 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1261 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1262 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1263 }
1264
1265 // The vote should now be 71%, threshold is 70%
1266 BEAST_EXPECT(proposingTrue.updateVote(90, true, p));
1267 BEAST_EXPECT(!proposingFalse.updateVote(90, true, p));
1268 BEAST_EXPECT(!followingTrue.updateVote(90, false, p));
1269 BEAST_EXPECT(!followingFalse.updateVote(90, false, p));
1270
1271 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1272 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1273 BEAST_EXPECT(followingTrue.getOurVote() == true);
1274 BEAST_EXPECT(followingFalse.getOurVote() == false);
1275
1276 // The vote should now be 71%, threshold is 70%
1277 BEAST_EXPECT(!proposingTrue.updateVote(150, true, p));
1278 BEAST_EXPECT(!proposingFalse.updateVote(150, true, p));
1279 BEAST_EXPECT(!followingTrue.updateVote(150, false, p));
1280 BEAST_EXPECT(!followingFalse.updateVote(150, false, p));
1281
1282 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1283 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1284 BEAST_EXPECT(followingTrue.getOurVote() == true);
1285 BEAST_EXPECT(followingFalse.getOurVote() == false);
1286
1287 // The vote should now be 71%, threshold is 70%
1288 BEAST_EXPECT(!proposingTrue.updateVote(190, true, p));
1289 BEAST_EXPECT(!proposingFalse.updateVote(190, true, p));
1290 BEAST_EXPECT(!followingTrue.updateVote(190, false, p));
1291 BEAST_EXPECT(!followingFalse.updateVote(190, false, p));
1292
1293 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1294 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1295 BEAST_EXPECT(followingTrue.getOurVote() == true);
1296 BEAST_EXPECT(followingFalse.getOurVote() == false);
1297
1298 peersUnchanged = 3;
1299 BEAST_EXPECT(
1300 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1301 BEAST_EXPECT(
1302 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1303 BEAST_EXPECT(
1304 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1305 BEAST_EXPECT(
1306 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1307 BEAST_EXPECT(clog->str() == "");
1308
1309 // Threshold jumps to 95%
1310 BEAST_EXPECT(proposingTrue.updateVote(220, true, p));
1311 BEAST_EXPECT(!proposingFalse.updateVote(220, true, p));
1312 BEAST_EXPECT(!followingTrue.updateVote(220, false, p));
1313 BEAST_EXPECT(!followingFalse.updateVote(220, false, p));
1314
1315 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1316 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1317 BEAST_EXPECT(followingTrue.getOurVote() == true);
1318 BEAST_EXPECT(followingFalse.getOurVote() == false);
1319
1320 // 25 more validators change their vote to match my original vote
1321 for (int i = 21; i < 46; ++i)
1322 {
1323 auto pTrue = PeerID(numPeers - i - 1);
1324 auto pFalse = PeerID(i);
1325 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1326 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1327 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1328 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1329 }
1330
1331 // The vote should now be 96%, threshold is 95%
1332 BEAST_EXPECT(proposingTrue.updateVote(250, true, p));
1333 BEAST_EXPECT(!proposingFalse.updateVote(250, true, p));
1334 BEAST_EXPECT(!followingTrue.updateVote(250, false, p));
1335 BEAST_EXPECT(!followingFalse.updateVote(250, false, p));
1336
1337 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1338 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1339 BEAST_EXPECT(followingTrue.getOurVote() == true);
1340 BEAST_EXPECT(followingFalse.getOurVote() == false);
1341
1342 for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged)
1343 {
1344 BEAST_EXPECT(
1345 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1346 BEAST_EXPECT(
1347 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1348 BEAST_EXPECT(
1349 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1350 BEAST_EXPECT(
1351 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1352 BEAST_EXPECT(clog->str() == "");
1353 }
1354
1355 auto expectStalled = [this, &clog](
1356 int txid,
1357 bool ourVote,
1358 int ourTime,
1359 int peerTime,
1360 int support,
1361 std::uint32_t line) {
1362 using namespace std::string_literals;
1363
1364 auto const s = clog->str();
1365 expect(s.find("stalled"), s, __FILE__, line);
1366 expect(
1367 s.starts_with("Transaction "s + std::to_string(txid)),
1368 s,
1369 __FILE__,
1370 line);
1371 expect(
1372 s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos,
1373 s,
1374 __FILE__,
1375 line);
1376 expect(
1377 s.find("for "s + std::to_string(ourTime) + " rounds."s) !=
1378 s.npos,
1379 s,
1380 __FILE__,
1381 line);
1382 expect(
1383 s.find(
1384 "votes in "s + std::to_string(peerTime) + " rounds.") !=
1385 s.npos,
1386 s,
1387 __FILE__,
1388 line);
1389 expect(
1390 s.ends_with(
1391 "has "s + std::to_string(support) + "% support. "s),
1392 s,
1393 __FILE__,
1394 line);
1396 };
1397
1398 for (int i = 0; i < 1; ++i)
1399 {
1400 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1401 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1402 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1403 BEAST_EXPECT(
1404 !followingFalse.updateVote(250 + 10 * i, false, p));
1405
1406 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1407 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1408 BEAST_EXPECT(followingTrue.getOurVote() == true);
1409 BEAST_EXPECT(followingFalse.getOurVote() == false);
1410
1411 // true vote has changed recently, so not stalled
1412 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1413 BEAST_EXPECT(clog->str() == "");
1414 // remaining votes have been unchanged in so long that we only
1415 // need to hit the second round at 95% to be stalled, regardless
1416 // of peers
1417 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1418 expectStalled(98, false, 11, 0, 2, __LINE__);
1419 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1420 expectStalled(97, true, 11, 0, 97, __LINE__);
1421 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1422 expectStalled(96, false, 11, 0, 3, __LINE__);
1423
1424 // true vote has changed recently, so not stalled
1425 BEAST_EXPECT(
1426 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1427 BEAST_EXPECTS(clog->str() == "", clog->str());
1428 // remaining votes have been unchanged in so long that we only
1429 // need to hit the second round at 95% to be stalled, regardless
1430 // of peers
1431 BEAST_EXPECT(
1432 proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1433 expectStalled(98, false, 11, 6, 2, __LINE__);
1434 BEAST_EXPECT(
1435 followingTrue.stalled(p, false, peersUnchanged, j, clog));
1436 expectStalled(97, true, 11, 6, 97, __LINE__);
1437 BEAST_EXPECT(
1438 followingFalse.stalled(p, false, peersUnchanged, j, clog));
1439 expectStalled(96, false, 11, 6, 3, __LINE__);
1440 }
1441 for (int i = 1; i < 3; ++i)
1442 {
1443 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1444 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1445 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1446 BEAST_EXPECT(
1447 !followingFalse.updateVote(250 + 10 * i, false, p));
1448
1449 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1450 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1451 BEAST_EXPECT(followingTrue.getOurVote() == true);
1452 BEAST_EXPECT(followingFalse.getOurVote() == false);
1453
1454 // true vote changed 2 rounds ago, and peers are changing, so
1455 // not stalled
1456 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1457 BEAST_EXPECTS(clog->str() == "", clog->str());
1458 // still stalled
1459 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1460 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1461 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1462 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1463 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1464 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1465
1466 // true vote changed 2 rounds ago, and peers are NOT changing,
1467 // so stalled
1468 BEAST_EXPECT(
1469 proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1470 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1471 // still stalled
1472 BEAST_EXPECT(
1473 proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1474 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1475 BEAST_EXPECT(
1476 followingTrue.stalled(p, false, peersUnchanged, j, clog));
1477 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1478 BEAST_EXPECT(
1479 followingFalse.stalled(p, false, peersUnchanged, j, clog));
1480 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1481 }
1482 for (int i = 3; i < 5; ++i)
1483 {
1484 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1485 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1486 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1487 BEAST_EXPECT(
1488 !followingFalse.updateVote(250 + 10 * i, false, p));
1489
1490 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1491 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1492 BEAST_EXPECT(followingTrue.getOurVote() == true);
1493 BEAST_EXPECT(followingFalse.getOurVote() == false);
1494
1495 BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog));
1496 expectStalled(99, true, 1 + i, 0, 97, __LINE__);
1497 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1498 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1499 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1500 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1501 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1502 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1503
1504 BEAST_EXPECT(
1505 proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1506 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1507 BEAST_EXPECT(
1508 proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1509 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1510 BEAST_EXPECT(
1511 followingTrue.stalled(p, false, peersUnchanged, j, clog));
1512 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1513 BEAST_EXPECT(
1514 followingFalse.stalled(p, false, peersUnchanged, j, clog));
1515 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1516 }
1517 }
1518 }
1519
1520 void
1521 run() override
1522 {
1523 testShouldCloseLedger();
1524 testCheckConsensus();
1525
1526 testStandalone();
1527 testPeersAgree();
1528 testSlowPeers();
1529 testCloseTimeDisagree();
1530 testWrongLCL();
1531 testConsensusCloseTimeRounding();
1532 testFork();
1533 testHubNetwork();
1534 testPreferredByBranch();
1535 testPauseForLaggards();
1536 testDisputes();
1537 }
1538};
1539
1540BEAST_DEFINE_TESTSUITE(Consensus, consensus, ripple);
1541} // namespace test
1542} // namespace ripple
A testsuite class.
Definition suite.h:55
testcase_t testcase
Memberspace for declaring test cases.
Definition suite.h:155
Generic implementation of consensus algorithm.
Definition Consensus.h:298
A transaction discovered to be in dispute during consensus.
Definition DisputedTx.h:49
Represents a peer connection in the overlay.
virtual id_t id() const =0
void run() override
Runs the suite.
A group of simulation Peers.
Definition PeerGroup.h:42
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition PeerGroup.h:186
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition PeerGroup.h:166
T insert(T... args)
T is_same_v
typename SimClock::duration SimDuration
Definition SimTime.h:36
typename SimClock::time_point SimTime
Definition SimTime.h:37
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:25
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, bool stalled, ConsensusParms const &parms, bool proposing, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determine whether the network reached consensus and whether we joined.
@ Expired
Consensus time limit has hard-expired.
@ MovedOn
The network has consensus without us.
@ Yes
We have consensus along with the network.
@ No
We do not have consensus.
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determines whether the current ledger should close at this time.
Definition Consensus.cpp:27
auto constexpr increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
STL namespace.
Consensus algorithm parameters.
std::chrono::milliseconds const ledgerGRANULARITY
How often we check state or change positions.
Represents a transfer rate.
Definition Rate.h:40
void on(csf::PeerID, csf::SimTime, E const &)
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
void on(csf::PeerID, csf::SimTime, E const &)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Peer accepted consensus results.
Definition events.h:121
Peer fully validated a new ledger.
Definition events.h:140
Ledger ledger
The new fully validated ledger.
Definition events.h:142
A single peer in the simulation.
T to_string(T... args)