rippled
Loading...
Searching...
No Matches
Consensus_test.cpp
1#include <test/csf.h>
2#include <test/unit_test/SuiteJournal.h>
3
4#include <xrpld/consensus/Consensus.h>
5
6#include <xrpl/beast/unit_test.h>
7#include <xrpl/json/to_string.h>
8
9namespace xrpl {
10namespace test {
11
13{
15
16public:
17 Consensus_test() : journal_("Consensus_test", *this)
18 {
19 }
20
21 void
23 {
24 using namespace std::chrono_literals;
25 testcase("should close ledger");
26
27 // Use default parameters
28 ConsensusParms const p{};
29
30 // Bizarre times forcibly close
31 BEAST_EXPECT(shouldCloseLedger(true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
32 BEAST_EXPECT(shouldCloseLedger(true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
33 BEAST_EXPECT(shouldCloseLedger(true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
34
35 // Rest of network has closed
36 BEAST_EXPECT(shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
37
38 // No transactions means wait until end of internval
39 BEAST_EXPECT(!shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
40 BEAST_EXPECT(shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
41
42 // Enforce minimum ledger open time
43 BEAST_EXPECT(!shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
44
45 // Don't go too much faster than last time
46 BEAST_EXPECT(!shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
47
48 BEAST_EXPECT(shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
49 }
50
51 void
53 {
54 using namespace std::chrono_literals;
55 testcase("check consensus");
56
57 // Use default parameters
58 ConsensusParms const p{};
59
61 // Disputes still in doubt
62 //
63 // Not enough time has elapsed
64 BEAST_EXPECT(ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 2s, false, p, true, journal_));
65
66 // If not enough peers have proposed, ensure
67 // more time for proposals
68 BEAST_EXPECT(ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 4s, false, p, true, journal_));
69
70 // Enough time has elapsed and we all agree
71 BEAST_EXPECT(ConsensusState::Yes == checkConsensus(10, 2, 2, 0, 3s, 10s, false, p, true, journal_));
72
73 // Enough time has elapsed and we don't yet agree
74 BEAST_EXPECT(ConsensusState::No == checkConsensus(10, 2, 1, 0, 3s, 10s, false, p, true, journal_));
75
76 // Our peers have moved on
77 // Enough time has elapsed and we all agree
78 BEAST_EXPECT(ConsensusState::MovedOn == checkConsensus(10, 2, 1, 8, 3s, 10s, false, p, true, journal_));
79
80 // If no peers, don't agree until time has passed.
81 BEAST_EXPECT(ConsensusState::No == checkConsensus(0, 0, 0, 0, 3s, 10s, false, p, true, journal_));
82
83 // Agree if no peers and enough time has passed.
84 BEAST_EXPECT(ConsensusState::Yes == checkConsensus(0, 0, 0, 0, 3s, 16s, false, p, true, journal_));
85
86 // Expire if too much time has passed without agreement
87 BEAST_EXPECT(ConsensusState::Expired == checkConsensus(10, 8, 1, 0, 1s, 19s, false, p, true, journal_));
88
90 // Stalled
91 //
92 // Not enough time has elapsed
93 BEAST_EXPECT(ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 2s, true, p, true, journal_));
94
95 // If not enough peers have proposed, ensure
96 // more time for proposals
97 BEAST_EXPECT(ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 4s, true, p, true, journal_));
98
99 // Enough time has elapsed and we all agree
100 BEAST_EXPECT(ConsensusState::Yes == checkConsensus(10, 2, 2, 0, 3s, 10s, true, p, true, journal_));
101
102 // Enough time has elapsed and we don't yet agree, but there's nothing
103 // left to dispute
104 BEAST_EXPECT(ConsensusState::Yes == checkConsensus(10, 2, 1, 0, 3s, 10s, true, p, true, journal_));
105
106 // Our peers have moved on
107 // Enough time has elapsed and we all agree, nothing left to dispute
108 BEAST_EXPECT(ConsensusState::Yes == checkConsensus(10, 2, 1, 8, 3s, 10s, true, p, true, journal_));
109
110 // If no peers, don't agree until time has passed.
111 BEAST_EXPECT(ConsensusState::No == checkConsensus(0, 0, 0, 0, 3s, 10s, true, p, true, journal_));
112
113 // Agree if no peers and enough time has passed.
114 BEAST_EXPECT(ConsensusState::Yes == checkConsensus(0, 0, 0, 0, 3s, 16s, true, p, true, journal_));
115
116 // We are done if there's nothing left to dispute, no matter how much
117 // time has passed
118 BEAST_EXPECT(ConsensusState::Yes == checkConsensus(10, 8, 1, 0, 1s, 19s, true, p, true, journal_));
119 }
120
121 void
123 {
124 using namespace std::chrono_literals;
125 using namespace csf;
126 testcase("standalone");
127
128 Sim s;
129 PeerGroup peers = s.createGroup(1);
130 Peer* peer = peers[0];
131 peer->targetLedgers = 1;
132 peer->start();
133 peer->submit(Tx{1});
134
135 s.scheduler.step();
136
137 // Inspect that the proper ledger was created
138 auto const& lcl = peer->lastClosedLedger;
139 BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
140 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
141 BEAST_EXPECT(lcl.txs().size() == 1);
142 BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
143 BEAST_EXPECT(peer->prevProposers == 0);
144 }
145
146 void
148 {
149 using namespace csf;
150 using namespace std::chrono;
151 testcase("peers agree");
152
153 ConsensusParms const parms{};
154 Sim sim;
155 PeerGroup peers = sim.createGroup(5);
156
157 // Connected trust and network graphs with single fixed delay
158 peers.trustAndConnect(peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
159
160 // everyone submits their own ID as a TX
161 for (Peer* p : peers)
162 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
163
164 sim.run(1);
165
166 // All peers are in sync
167 if (BEAST_EXPECT(sim.synchronized()))
168 {
169 for (Peer const* peer : peers)
170 {
171 auto const& lcl = peer->lastClosedLedger;
172 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
173 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
174 // All peers proposed
175 BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
176 // All transactions were accepted
177 for (std::uint32_t i = 0; i < peers.size(); ++i)
178 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
179 }
180 }
181 }
182
183 void
185 {
186 using namespace csf;
187 using namespace std::chrono;
188 testcase("slow peers");
189
190 // Several tests of a complete trust graph with a subset of peers
191 // that have significantly longer network delays to the rest of the
192 // network
193
194 // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
195 {
196 ConsensusParms const parms{};
197 Sim sim;
198 PeerGroup slow = sim.createGroup(1);
199 PeerGroup fast = sim.createGroup(4);
200 PeerGroup network = fast + slow;
201
202 // Fully connected trust graph
203 network.trust(network);
204
205 // Fast and slow network connections
206 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
207
208 slow.connect(network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
209
210 // All peers submit their own ID as a transaction
211 for (Peer* peer : network)
212 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
213
214 sim.run(1);
215
216 // Verify all peers have same LCL but are missing transaction 0
217 // All peers are in sync even with a slower peer 0
218 if (BEAST_EXPECT(sim.synchronized()))
219 {
220 for (Peer* peer : network)
221 {
222 auto const& lcl = peer->lastClosedLedger;
223 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
224 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
225
226 BEAST_EXPECT(peer->prevProposers == network.size() - 1);
227 BEAST_EXPECT(peer->prevRoundTime == network[0]->prevRoundTime);
228
229 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
230 for (std::uint32_t i = 2; i < network.size(); ++i)
231 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
232
233 // Tx 0 didn't make it
234 BEAST_EXPECT(peer->openTxs.find(Tx{0}) != peer->openTxs.end());
235 }
236 }
237 }
238
239 // Test when the slow peers delay a consensus quorum (4/6 agree)
240 {
241 // Run two tests
242 // 1. The slow peers are participating in consensus
243 // 2. The slow peers are just observing
244
245 for (auto isParticipant : {true, false})
246 {
247 ConsensusParms const parms{};
248
249 Sim sim;
250 PeerGroup slow = sim.createGroup(2);
251 PeerGroup fast = sim.createGroup(4);
252 PeerGroup network = fast + slow;
253
254 // Connected trust graph
255 network.trust(network);
256
257 // Fast and slow network connections
258 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
259
260 slow.connect(network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
261
262 for (Peer* peer : slow)
263 peer->runAsValidator = isParticipant;
264
265 // All peers submit their own ID as a transaction and relay it
266 // to peers
267 for (Peer* peer : network)
268 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
269
270 sim.run(1);
271
272 if (BEAST_EXPECT(sim.synchronized()))
273 {
274 // Verify all peers have same LCL but are missing
275 // transaction 0,1 which was not received by all peers
276 // before the ledger closed
277 for (Peer* peer : network)
278 {
279 // Closed ledger has all but transaction 0,1
280 auto const& lcl = peer->lastClosedLedger;
281 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
282 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
283 BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
284 for (std::uint32_t i = slow.size(); i < network.size(); ++i)
285 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
286
287 // Tx 0-1 didn't make it
288 BEAST_EXPECT(peer->openTxs.find(Tx{0}) != peer->openTxs.end());
289 BEAST_EXPECT(peer->openTxs.find(Tx{1}) != peer->openTxs.end());
290 }
291
292 Peer const* slowPeer = slow[0];
293 if (isParticipant)
294 BEAST_EXPECT(slowPeer->prevProposers == network.size() - 1);
295 else
296 BEAST_EXPECT(slowPeer->prevProposers == fast.size());
297
298 for (Peer* peer : fast)
299 {
300 // Due to the network link delay settings
301 // Peer 0 initially proposes {0}
302 // Peer 1 initially proposes {1}
303 // Peers 2-5 initially propose {2,3,4,5}
304 // Since peers 2-5 agree, 4/6 > the initial 50% needed
305 // to include a disputed transaction, so Peer 0/1 switch
306 // to agree with those peers. Peer 0/1 then closes with
307 // an 80% quorum of agreeing positions (5/6) match.
308 //
309 // Peers 2-5 do not change position, since tx 0 or tx 1
310 // have less than the 50% initial threshold. They also
311 // cannot declare consensus, since 4/6 agreeing
312 // positions are < 80% threshold. They therefore need an
313 // additional timerEntry call to see the updated
314 // positions from Peer 0 & 1.
315
316 if (isParticipant)
317 {
318 BEAST_EXPECT(peer->prevProposers == network.size() - 1);
319 BEAST_EXPECT(peer->prevRoundTime > slowPeer->prevRoundTime);
320 }
321 else
322 {
323 BEAST_EXPECT(peer->prevProposers == fast.size() - 1);
324 // so all peers should have closed together
325 BEAST_EXPECT(peer->prevRoundTime == slowPeer->prevRoundTime);
326 }
327 }
328 }
329 }
330 }
331 }
332
333 void
335 {
336 using namespace csf;
337 using namespace std::chrono;
338 testcase("close time disagree");
339
340 // This is a very specialized test to get ledgers to disagree on
341 // the close time. It unfortunately assumes knowledge about current
342 // timing constants. This is a necessary evil to get coverage up
343 // pending more extensive refactorings of timing constants.
344
345 // In order to agree-to-disagree on the close time, there must be no
346 // clear majority of nodes agreeing on a close time. This test
347 // sets a relative offset to the peers internal clocks so that they
348 // send proposals with differing times.
349
350 // However, agreement is on the effective close time, not the
351 // exact close time. The minimum closeTimeResolution is given by
352 // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
353 // the skews need to be at least 10 seconds to have different effective
354 // close times.
355
356 // Complicating this matter is that nodes will ignore proposals
357 // with times more than proposeFRESHNESS =20s in the past. So at
358 // the minimum granularity, we have at most 3 types of skews
359 // (0s,10s,20s).
360
361 // This test therefore has 6 nodes, with 2 nodes having each type of
362 // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
363 // actual close time.
364
365 ConsensusParms const parms{};
366 Sim sim;
367
368 PeerGroup groupA = sim.createGroup(2);
369 PeerGroup groupB = sim.createGroup(2);
370 PeerGroup groupC = sim.createGroup(2);
371 PeerGroup network = groupA + groupB + groupC;
372
373 network.trust(network);
374 network.connect(network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
375
376 // Run consensus without skew until we have a short close time
377 // resolution
378 Peer* firstPeer = *groupA.begin();
379 while (firstPeer->lastClosedLedger.closeTimeResolution() >= parms.proposeFRESHNESS)
380 sim.run(1);
381
382 // Introduce a shift on the time of 2/3 of peers
383 for (Peer* peer : groupA)
384 peer->clockSkew = parms.proposeFRESHNESS / 2;
385 for (Peer* peer : groupB)
386 peer->clockSkew = parms.proposeFRESHNESS;
387
388 sim.run(1);
389
390 // All nodes agreed to disagree on the close time
391 if (BEAST_EXPECT(sim.synchronized()))
392 {
393 for (Peer* peer : network)
394 BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
395 }
396 }
397
398 void
400 {
401 using namespace csf;
402 using namespace std::chrono;
403 testcase("wrong LCL");
404
405 // Specialized test to exercise a temporary fork in which some peers
406 // are working on an incorrect prior ledger.
407
408 ConsensusParms const parms{};
409
410 // Vary the time it takes to process validations to exercise detecting
411 // the wrong LCL at different phases of consensus
412 for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
413 {
414 // Consider 10 peers:
415 // 0 1 2 3 4 5 6 7 8 9
416 // minority majorityA majorityB
417 //
418 // Nodes 0-1 trust nodes 0-4
419 // Nodes 2-9 trust nodes 2-9
420 //
421 // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
422 // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
423 // nodes will instead accept the ledger with tx 1.
424
425 // Nodes 0-1 will detect this mismatch during a subsequent round
426 // since nodes 2-4 will validate a different ledger.
427
428 // Nodes 0-1 will acquire the proper ledger from the network and
429 // resume consensus and eventually generate the dominant network
430 // ledger.
431
432 // This topology can potentially fork with the above trust relations
433 // but that is intended for this test.
434
435 Sim sim;
436
437 PeerGroup minority = sim.createGroup(2);
438 PeerGroup majorityA = sim.createGroup(3);
439 PeerGroup majorityB = sim.createGroup(5);
440
441 PeerGroup majority = majorityA + majorityB;
442 PeerGroup network = minority + majority;
443
444 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
445 minority.trustAndConnect(minority + majorityA, delay);
446 majority.trustAndConnect(majority, delay);
447
448 CollectByNode<JumpCollector> jumps;
449 sim.collectors.add(jumps);
450
451 BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
452
453 // initial round to set prior state
454 sim.run(1);
455
456 // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
457 // tx 1
458 for (Peer* peer : network)
459 peer->delays.recvValidation = validationDelay;
460 for (Peer* peer : (minority + majorityA))
461 peer->openTxs.insert(Tx{0});
462 for (Peer* peer : majorityB)
463 peer->openTxs.insert(Tx{1});
464
465 // Run for additional rounds
466 // With no validation delay, only 2 more rounds are needed.
467 // 1. Round to generate different ledgers
468 // 2. Round to detect different prior ledgers (but still generate
469 // wrong ones) and recover within that round since wrong LCL
470 // is detected before we close
471 //
472 // With a validation delay of ledgerMIN_CLOSE, we need 3 more
473 // rounds.
474 // 1. Round to generate different ledgers
475 // 2. Round to detect different prior ledgers (but still generate
476 // wrong ones) but end up declaring consensus on wrong LCL (but
477 // with the right transaction set!). This is because we detect
478 // the wrong LCL after we have closed the ledger, so we declare
479 // consensus based solely on our peer proposals. But we haven't
480 // had time to acquire the right ledger.
481 // 3. Round to correct
482 sim.run(3);
483
484 // The network never actually forks, since node 0-1 never see a
485 // quorum of validations to fully validate the incorrect chain.
486
487 // However, for a non zero-validation delay, the network is not
488 // synchronized because nodes 0 and 1 are running one ledger behind
489 if (BEAST_EXPECT(sim.branches() == 1))
490 {
491 for (Peer const* peer : majority)
492 {
493 // No jumps for majority nodes
494 BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
495 BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
496 }
497 for (Peer const* peer : minority)
498 {
499 auto& peerJumps = jumps[peer->id];
500 // last closed ledger jump between chains
501 {
502 if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
503 {
504 JumpCollector::Jump const& jump = peerJumps.closeJumps.front();
505 // Jump is to a different chain
506 BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
507 BEAST_EXPECT(!jump.to.isAncestor(jump.from));
508 }
509 }
510 // fully validated jump forward in same chain
511 {
512 if (BEAST_EXPECT(peerJumps.fullyValidatedJumps.size() == 1))
513 {
514 JumpCollector::Jump const& jump = peerJumps.fullyValidatedJumps.front();
515 // Jump is to a different chain with same seq
516 BEAST_EXPECT(jump.from.seq() < jump.to.seq());
517 BEAST_EXPECT(jump.to.isAncestor(jump.from));
518 }
519 }
520 }
521 }
522 }
523
524 {
525 // Additional test engineered to switch LCL during the establish
526 // phase. This was added to trigger a scenario that previously
527 // crashed, in which switchLCL switched from establish to open
528 // phase, but still processed the establish phase logic.
529
530 // Loner node will accept an initial ledger A, but all other nodes
531 // accept ledger B a bit later. By delaying the time it takes
532 // to process a validation, loner node will detect the wrongLCL
533 // after it is already in the establish phase of the next round.
534
535 Sim sim;
536 PeerGroup loner = sim.createGroup(1);
537 PeerGroup friends = sim.createGroup(3);
538 loner.trust(loner + friends);
539
540 PeerGroup others = sim.createGroup(6);
541 PeerGroup clique = friends + others;
542 clique.trust(clique);
543
544 PeerGroup network = loner + clique;
545 network.connect(network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
546
547 // initial round to set prior state
548 sim.run(1);
549 for (Peer* peer : (loner + friends))
550 peer->openTxs.insert(Tx(0));
551 for (Peer* peer : others)
552 peer->openTxs.insert(Tx(1));
553
554 // Delay validation processing
555 for (Peer* peer : network)
556 peer->delays.recvValidation = parms.ledgerGRANULARITY;
557
558 // additional rounds to generate wrongLCL and recover
559 sim.run(2);
560
561 // Check all peers recovered
562 for (Peer* p : network)
563 BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
564 }
565 }
566
567 void
569 {
570 using namespace csf;
571 using namespace std::chrono;
572 testcase("consensus close time rounding");
573
574 // This is a specialized test engineered to yield ledgers with different
575 // close times even though the peers believe they had close time
576 // consensus on the ledger.
577 ConsensusParms parms;
578
579 Sim sim;
580
581 // This requires a group of 4 fast and 2 slow peers to create a
582 // situation in which a subset of peers requires seeing additional
583 // proposals to declare consensus.
584 PeerGroup slow = sim.createGroup(2);
585 PeerGroup fast = sim.createGroup(4);
586 PeerGroup network = fast + slow;
587
588 // Connected trust graph
589 network.trust(network);
590
591 // Fast and slow network connections
592 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
593 slow.connect(network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
594
595 // Run to the ledger *prior* to decreasing the resolution
597
598 // In order to create the discrepancy, we want a case where if
599 // X = effCloseTime(closeTime, resolution, parentCloseTime)
600 // X != effCloseTime(X, resolution, parentCloseTime)
601 //
602 // That is, the effective close time is not a fixed point. This can
603 // happen if X = parentCloseTime + 1, but a subsequent rounding goes
604 // to the next highest multiple of resolution.
605
606 // So we want to find an offset (now + offset) % 30s = 15
607 // (now + offset) % 20s = 15
608 // This way, the next ledger will close and round up Due to the
609 // network delay settings, the round of consensus will take 5s, so
610 // the next ledger's close time will
611
612 NetClock::duration when = network[0]->now().time_since_epoch();
613
614 // Check we are before the 30s to 20s transition
615 NetClock::duration resolution = network[0]->lastClosedLedger.closeTimeResolution();
616 BEAST_EXPECT(resolution == NetClock::duration{30s});
617
618 while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
619 ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
620 when += 1s;
621 // Advance the clock without consensus running (IS THIS WHAT
622 // PREVENTS IT IN PRACTICE?)
623 sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
624
625 // Run one more ledger with 30s resolution
626 sim.run(1);
627 if (BEAST_EXPECT(sim.synchronized()))
628 {
629 // close time should be ahead of clock time since we engineered
630 // the close time to round up
631 for (Peer* peer : network)
632 {
633 BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
634 BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
635 }
636 }
637
638 // All peers submit their own ID as a transaction
639 for (Peer* peer : network)
640 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
641
642 // Run 1 more round, this time it will have a decreased
643 // resolution of 20 seconds.
644
645 // The network delays are engineered so that the slow peers
646 // initially have the wrong tx hash, but they see a majority
647 // of agreement from their peers and declare consensus
648 //
649 // The trick is that everyone starts with a raw close time of
650 // 84681s
651 // Which has
652 // effCloseTime(86481s, 20s, 86490s) = 86491s
653 // However, when the slow peers update their position, they change
654 // the close time to 86451s. The fast peers declare consensus with
655 // the 86481s as their position still.
656 //
657 // When accepted the ledger
658 // - fast peers use eff(86481s) -> 86491s as the close time
659 // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
660
661 sim.run(1);
662
663 BEAST_EXPECT(sim.synchronized());
664 }
665
666 void
668 {
669 using namespace csf;
670 using namespace std::chrono;
671 testcase("fork");
672
673 std::uint32_t numPeers = 10;
674 // Vary overlap between two UNLs
675 for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
676 {
677 ConsensusParms const parms{};
678 Sim sim;
679
680 std::uint32_t numA = (numPeers - overlap) / 2;
681 std::uint32_t numB = numPeers - numA - overlap;
682
683 PeerGroup aOnly = sim.createGroup(numA);
684 PeerGroup bOnly = sim.createGroup(numB);
685 PeerGroup commonOnly = sim.createGroup(overlap);
686
687 PeerGroup a = aOnly + commonOnly;
688 PeerGroup b = bOnly + commonOnly;
689
690 PeerGroup network = a + b;
691
692 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
693 a.trustAndConnect(a, delay);
694 b.trustAndConnect(b, delay);
695
696 // Initial round to set prior state
697 sim.run(1);
698 for (Peer* peer : network)
699 {
700 // Nodes have only seen transactions from their neighbors
701 peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
702 for (Peer* to : sim.trustGraph.trustedPeers(peer))
703 peer->openTxs.insert(Tx{static_cast<std::uint32_t>(to->id)});
704 }
705 sim.run(1);
706
707 // Fork should not happen for 40% or greater overlap
708 // Since the overlapped nodes have a UNL that is the union of the
709 // two cliques, the maximum sized UNL list is the number of peers
710 if (overlap > 0.4 * numPeers)
711 BEAST_EXPECT(sim.synchronized());
712 else
713 {
714 // Even if we do fork, there shouldn't be more than 3 ledgers
715 // One for cliqueA, one for cliqueB and one for nodes in both
716 BEAST_EXPECT(sim.branches() <= 3);
717 }
718 }
719 }
720
721 void
723 {
724 using namespace csf;
725 using namespace std::chrono;
726 testcase("hub network");
727
728 // Simulate a set of 5 validators that aren't directly connected but
729 // rely on a single hub node for communication
730
731 ConsensusParms const parms{};
732 Sim sim;
733 PeerGroup validators = sim.createGroup(5);
734 PeerGroup center = sim.createGroup(1);
735 validators.trust(validators);
736 center.trust(validators);
737
738 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
739 validators.connect(center, delay);
740
741 center[0]->runAsValidator = false;
742
743 // prep round to set initial state.
744 sim.run(1);
745
746 // everyone submits their own ID as a TX and relay it to peers
747 for (Peer* p : validators)
748 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
749
750 sim.run(1);
751
752 // All peers are in sync
753 BEAST_EXPECT(sim.synchronized());
754 }
755
756 // Helper collector for testPreferredByBranch
757 // Invasively disconnects network at bad times to cause splits
759 {
764 bool reconnected = false;
765
767 : network(net), groupCfast(c), groupCsplit(split), delay(d)
768 {
769 }
770
771 template <class E>
772 void
774 {
775 }
776
777 void
779 {
780 using namespace std::chrono;
781 // As soon as the fastC node fully validates C, disconnect
782 // ALL c nodes from the network. The fast C node needs to disconnect
783 // as well to prevent it from relaying the validations it did see
784 if (who == groupCfast[0]->id && e.ledger.seq() == csf::Ledger::Seq{2})
785 {
786 network.disconnect(groupCsplit);
787 network.disconnect(groupCfast);
788 }
789 }
790
791 void
793 {
794 // As soon as anyone generates a child of B or C, reconnect the
795 // network so those validations make it through
796 if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
797 {
798 reconnected = true;
799 network.connect(groupCsplit, delay);
800 }
801 }
802 };
803
804 void
806 {
807 using namespace csf;
808 using namespace std::chrono;
809 testcase("preferred by branch");
810
811 // Simulate network splits that are prevented from forking when using
812 // preferred ledger by trie. This is a contrived example that involves
813 // excessive network splits, but demonstrates the safety improvement
814 // from the preferred ledger by trie approach.
815
816 // Consider 10 validating nodes that comprise a single common UNL
817 // Ledger history:
818 // 1: A
819 // _/ \_
820 // 2: B C
821 // _/ _/ \_
822 // 3: D C' |||||||| (8 different ledgers)
823
824 // - All nodes generate the common ledger A
825 // - 2 nodes generate B and 8 nodes generate C
826 // - Only 1 of the C nodes sees all the C validations and fully
827 // validates C. The rest of the C nodes split at just the right time
828 // such that they never see any C validations but their own.
829 // - The C nodes continue and generate 8 different child ledgers.
830 // - Meanwhile, the D nodes only saw 1 validation for C and 2
831 // validations
832 // for B.
833 // - The network reconnects and the validations for generation 3 ledgers
834 // are observed (D and the 8 C's)
835 // - In the old approach, 2 votes for D outweighs 1 vote for each C'
836 // so the network would avalanche towards D and fully validate it
837 // EVEN though C was fully validated by one node
838 // - In the new approach, 2 votes for D are not enough to outweight the
839 // 8 implicit votes for C, so nodes will avalanche to C instead
840
841 ConsensusParms const parms{};
842 Sim sim;
843
844 // Goes A->B->D
845 PeerGroup groupABD = sim.createGroup(2);
846 // Single node that initially fully validates C before the split
847 PeerGroup groupCfast = sim.createGroup(1);
848 // Generates C, but fails to fully validate before the split
849 PeerGroup groupCsplit = sim.createGroup(7);
850
851 PeerGroup groupNotFastC = groupABD + groupCsplit;
852 PeerGroup network = groupABD + groupCsplit + groupCfast;
853
854 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
855 SimDuration fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
856
857 network.trust(network);
858 // C must have a shorter delay to see all the validations before the
859 // other nodes
860 network.connect(groupCfast, fDelay);
861 // The rest of the network is connected at the same speed
862 groupNotFastC.connect(groupNotFastC, delay);
863
864 Disruptor dc(network, groupCfast, groupCsplit, delay);
865 sim.collectors.add(dc);
866
867 // Consensus round to generate ledger A
868 sim.run(1);
869 BEAST_EXPECT(sim.synchronized());
870
871 // Next round generates B and C
872 // To force B, we inject an extra transaction in to those nodes
873 for (Peer* peer : groupABD)
874 {
875 peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
876 }
877 // The Disruptor will ensure that nodes disconnect before the C
878 // validations make it to all but the fastC node
879 sim.run(1);
880
881 // We are no longer in sync, but have not yet forked:
882 // 9 nodes consider A the last fully validated ledger and fastC sees C
883 BEAST_EXPECT(!sim.synchronized());
884 BEAST_EXPECT(sim.branches() == 1);
885
886 // Run another round to generate the 8 different C' ledgers
887 for (Peer* p : network)
888 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
889 sim.run(1);
890
891 // Still not forked
892 BEAST_EXPECT(!sim.synchronized());
893 BEAST_EXPECT(sim.branches() == 1);
894
895 // Disruptor will reconnect all but the fastC node
896 sim.run(1);
897
898 if (BEAST_EXPECT(sim.branches() == 1))
899 {
900 BEAST_EXPECT(sim.synchronized());
901 }
902 else // old approach caused a fork
903 {
904 BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
905 BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
906 }
907 }
908
909 // Helper collector for testPauseForLaggards
910 // This will remove the ledgerAccept delay used to
911 // initially create the slow vs. fast validator groups.
913 {
915
917 {
918 }
919
920 template <class E>
921 void
923 {
924 }
925
926 void
928 {
929 for (csf::Peer* p : g)
930 {
931 if (p->id == who)
932 p->delays.ledgerAccept = std::chrono::seconds{0};
933 }
934 }
935 };
936
937 void
939 {
940 using namespace csf;
941 using namespace std::chrono;
942 testcase("pause for laggards");
943
944 // Test that validators that jump ahead of the network slow
945 // down.
946
947 // We engineer the following validated ledger history scenario:
948 //
949 // / --> B1 --> C1 --> ... -> G1 "ahead"
950 // A
951 // \ --> B2 --> C2 "behind"
952 //
953 // After validating a common ledger A, a set of "behind" validators
954 // briefly run slower and validate the lower chain of ledgers.
955 // The "ahead" validators run normal speed and run ahead validating the
956 // upper chain of ledgers.
957 //
958 // Due to the uncommitted support definition of the preferred branch
959 // protocol, even if the "behind" validators are a majority, the "ahead"
960 // validators cannot jump to the proper branch until the "behind"
961 // validators catch up to the same sequence number. For this test to
962 // succeed, the ahead validators need to briefly slow down consensus.
963
964 ConsensusParms const parms{};
965 Sim sim;
966 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
967
968 PeerGroup behind = sim.createGroup(3);
969 PeerGroup ahead = sim.createGroup(2);
970 PeerGroup network = ahead + behind;
971
972 hash_set<Peer::NodeKey_t> trustedKeys;
973 for (Peer* p : network)
974 trustedKeys.insert(p->key);
975 for (Peer* p : network)
976 p->trustedKeys = trustedKeys;
977
978 network.trustAndConnect(network, delay);
979
980 // Initial seed round to set prior state
981 sim.run(1);
982
983 // Have the "behind" group initially take a really long time to
984 // accept a ledger after ending deliberation
985 for (Peer* p : behind)
986 p->delays.ledgerAccept = 20s;
987
988 // Use the collector to revert the delay after the single
989 // slow ledger is generated
990 UndoDelay undoDelay{behind};
991 sim.collectors.add(undoDelay);
992
993#if 0
994 // Have all beast::journal output printed to stdout
995 for (Peer* p : network)
996 p->sink.threshold(beast::severities::kAll);
997
998 // Print ledger accept and fully validated events to stdout
999 StreamCollector sc{std::cout};
1000 sim.collectors.add(sc);
1001#endif
1002 // Run the simulation for 100 seconds of simulation time with
1003 std::chrono::nanoseconds const simDuration = 100s;
1004
1005 // Simulate clients submitting 1 tx every 5 seconds to a random
1006 // validator
1007 Rate const rate{1, 5s};
1008 auto peerSelector =
1009 makeSelector(network.begin(), network.end(), std::vector<double>(network.size(), 1.), sim.rng);
1010 auto txSubmitter = makeSubmitter(
1011 ConstantDistribution{rate.inv()},
1012 sim.scheduler.now(),
1013 sim.scheduler.now() + simDuration,
1014 peerSelector,
1015 sim.scheduler,
1016 sim.rng);
1017
1018 // Run simulation
1019 sim.run(simDuration);
1020
1021 // Verify that the network recovered
1022 BEAST_EXPECT(sim.synchronized());
1023 }
1024
1025 void
1027 {
1028 testcase("disputes");
1029
1030 using namespace csf;
1031
1032 // Test dispute objects directly
1033 using Dispute = DisputedTx<Tx, PeerID>;
1034
1035 Tx const txTrue{99};
1036 Tx const txFalse{98};
1037 Tx const txFollowingTrue{97};
1038 Tx const txFollowingFalse{96};
1039 int const numPeers = 100;
1041 std::size_t peersUnchanged = 0;
1042
1044 auto j = logs->journal("Test");
1046
1047 // Three cases:
1048 // 1 proposing, initial vote yes
1049 // 2 proposing, initial vote no
1050 // 3 not proposing, initial vote doesn't matter after the first update,
1051 // use yes
1052 {
1053 Dispute proposingTrue{txTrue.id(), true, numPeers, journal_};
1054 Dispute proposingFalse{txFalse.id(), false, numPeers, journal_};
1055 Dispute followingTrue{txFollowingTrue.id(), true, numPeers, journal_};
1056 Dispute followingFalse{txFollowingFalse.id(), false, numPeers, journal_};
1057 BEAST_EXPECT(proposingTrue.ID() == 99);
1058 BEAST_EXPECT(proposingFalse.ID() == 98);
1059 BEAST_EXPECT(followingTrue.ID() == 97);
1060 BEAST_EXPECT(followingFalse.ID() == 96);
1061
1062 // Create an even split in the peer votes
1063 for (int i = 0; i < numPeers; ++i)
1064 {
1065 BEAST_EXPECT(proposingTrue.setVote(PeerID(i), i < 50));
1066 BEAST_EXPECT(proposingFalse.setVote(PeerID(i), i < 50));
1067 BEAST_EXPECT(followingTrue.setVote(PeerID(i), i < 50));
1068 BEAST_EXPECT(followingFalse.setVote(PeerID(i), i < 50));
1069 }
1070 // Switch the middle vote to match mine
1071 BEAST_EXPECT(proposingTrue.setVote(PeerID(50), true));
1072 BEAST_EXPECT(proposingFalse.setVote(PeerID(49), false));
1073 BEAST_EXPECT(followingTrue.setVote(PeerID(50), true));
1074 BEAST_EXPECT(followingFalse.setVote(PeerID(49), false));
1075
1076 // no changes yet
1077 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1078 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1079 BEAST_EXPECT(followingTrue.getOurVote() == true);
1080 BEAST_EXPECT(followingFalse.getOurVote() == false);
1081 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1082 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1083 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1084 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1085 BEAST_EXPECT(clog->str() == "");
1086
1087 // I'm in the majority, my vote should not change
1088 BEAST_EXPECT(!proposingTrue.updateVote(5, true, p));
1089 BEAST_EXPECT(!proposingFalse.updateVote(5, true, p));
1090 BEAST_EXPECT(!followingTrue.updateVote(5, false, p));
1091 BEAST_EXPECT(!followingFalse.updateVote(5, false, p));
1092
1093 BEAST_EXPECT(!proposingTrue.updateVote(10, true, p));
1094 BEAST_EXPECT(!proposingFalse.updateVote(10, true, p));
1095 BEAST_EXPECT(!followingTrue.updateVote(10, false, p));
1096 BEAST_EXPECT(!followingFalse.updateVote(10, false, p));
1097
1098 peersUnchanged = 2;
1099 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1100 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1101 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1102 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1103 BEAST_EXPECT(clog->str() == "");
1104
1105 // Right now, the vote is 51%. The requirement is about to jump to
1106 // 65%
1107 BEAST_EXPECT(proposingTrue.updateVote(55, true, p));
1108 BEAST_EXPECT(!proposingFalse.updateVote(55, true, p));
1109 BEAST_EXPECT(!followingTrue.updateVote(55, false, p));
1110 BEAST_EXPECT(!followingFalse.updateVote(55, false, p));
1111
1112 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1113 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1114 BEAST_EXPECT(followingTrue.getOurVote() == true);
1115 BEAST_EXPECT(followingFalse.getOurVote() == false);
1116 // 16 validators change their vote to match my original vote
1117 for (int i = 0; i < 16; ++i)
1118 {
1119 auto pTrue = PeerID(numPeers - i - 1);
1120 auto pFalse = PeerID(i);
1121 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1122 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1123 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1124 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1125 }
1126 // The vote should now be 66%, threshold is 65%
1127 BEAST_EXPECT(proposingTrue.updateVote(60, true, p));
1128 BEAST_EXPECT(!proposingFalse.updateVote(60, true, p));
1129 BEAST_EXPECT(!followingTrue.updateVote(60, false, p));
1130 BEAST_EXPECT(!followingFalse.updateVote(60, false, p));
1131
1132 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1133 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1134 BEAST_EXPECT(followingTrue.getOurVote() == true);
1135 BEAST_EXPECT(followingFalse.getOurVote() == false);
1136
1137 // Threshold jumps to 70%
1138 BEAST_EXPECT(proposingTrue.updateVote(86, true, p));
1139 BEAST_EXPECT(!proposingFalse.updateVote(86, true, p));
1140 BEAST_EXPECT(!followingTrue.updateVote(86, false, p));
1141 BEAST_EXPECT(!followingFalse.updateVote(86, false, p));
1142
1143 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1144 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1145 BEAST_EXPECT(followingTrue.getOurVote() == true);
1146 BEAST_EXPECT(followingFalse.getOurVote() == false);
1147
1148 // 5 more validators change their vote to match my original vote
1149 for (int i = 16; i < 21; ++i)
1150 {
1151 auto pTrue = PeerID(numPeers - i - 1);
1152 auto pFalse = PeerID(i);
1153 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1154 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1155 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1156 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1157 }
1158
1159 // The vote should now be 71%, threshold is 70%
1160 BEAST_EXPECT(proposingTrue.updateVote(90, true, p));
1161 BEAST_EXPECT(!proposingFalse.updateVote(90, true, p));
1162 BEAST_EXPECT(!followingTrue.updateVote(90, false, p));
1163 BEAST_EXPECT(!followingFalse.updateVote(90, false, p));
1164
1165 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1166 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1167 BEAST_EXPECT(followingTrue.getOurVote() == true);
1168 BEAST_EXPECT(followingFalse.getOurVote() == false);
1169
1170 // The vote should now be 71%, threshold is 70%
1171 BEAST_EXPECT(!proposingTrue.updateVote(150, true, p));
1172 BEAST_EXPECT(!proposingFalse.updateVote(150, true, p));
1173 BEAST_EXPECT(!followingTrue.updateVote(150, false, p));
1174 BEAST_EXPECT(!followingFalse.updateVote(150, false, p));
1175
1176 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1177 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1178 BEAST_EXPECT(followingTrue.getOurVote() == true);
1179 BEAST_EXPECT(followingFalse.getOurVote() == false);
1180
1181 // The vote should now be 71%, threshold is 70%
1182 BEAST_EXPECT(!proposingTrue.updateVote(190, true, p));
1183 BEAST_EXPECT(!proposingFalse.updateVote(190, true, p));
1184 BEAST_EXPECT(!followingTrue.updateVote(190, false, p));
1185 BEAST_EXPECT(!followingFalse.updateVote(190, false, p));
1186
1187 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1188 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1189 BEAST_EXPECT(followingTrue.getOurVote() == true);
1190 BEAST_EXPECT(followingFalse.getOurVote() == false);
1191
1192 peersUnchanged = 3;
1193 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1194 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1195 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1196 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1197 BEAST_EXPECT(clog->str() == "");
1198
1199 // Threshold jumps to 95%
1200 BEAST_EXPECT(proposingTrue.updateVote(220, true, p));
1201 BEAST_EXPECT(!proposingFalse.updateVote(220, true, p));
1202 BEAST_EXPECT(!followingTrue.updateVote(220, false, p));
1203 BEAST_EXPECT(!followingFalse.updateVote(220, false, p));
1204
1205 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1206 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1207 BEAST_EXPECT(followingTrue.getOurVote() == true);
1208 BEAST_EXPECT(followingFalse.getOurVote() == false);
1209
1210 // 25 more validators change their vote to match my original vote
1211 for (int i = 21; i < 46; ++i)
1212 {
1213 auto pTrue = PeerID(numPeers - i - 1);
1214 auto pFalse = PeerID(i);
1215 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1216 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1217 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1218 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1219 }
1220
1221 // The vote should now be 96%, threshold is 95%
1222 BEAST_EXPECT(proposingTrue.updateVote(250, true, p));
1223 BEAST_EXPECT(!proposingFalse.updateVote(250, true, p));
1224 BEAST_EXPECT(!followingTrue.updateVote(250, false, p));
1225 BEAST_EXPECT(!followingFalse.updateVote(250, false, p));
1226
1227 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1228 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1229 BEAST_EXPECT(followingTrue.getOurVote() == true);
1230 BEAST_EXPECT(followingFalse.getOurVote() == false);
1231
1232 for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged)
1233 {
1234 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1235 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1236 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1237 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1238 BEAST_EXPECT(clog->str() == "");
1239 }
1240
1241 auto expectStalled =
1242 [this, &clog](int txid, bool ourVote, int ourTime, int peerTime, int support, std::uint32_t line) {
1243 using namespace std::string_literals;
1244
1245 auto const s = clog->str();
1246 expect(s.find("stalled"), s, __FILE__, line);
1247 expect(s.starts_with("Transaction "s + std::to_string(txid)), s, __FILE__, line);
1248 expect(s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos, s, __FILE__, line);
1249 expect(s.find("for "s + std::to_string(ourTime) + " rounds."s) != s.npos, s, __FILE__, line);
1250 expect(s.find("votes in "s + std::to_string(peerTime) + " rounds.") != s.npos, s, __FILE__, line);
1251 expect(s.ends_with("has "s + std::to_string(support) + "% support. "s), s, __FILE__, line);
1253 };
1254
1255 for (int i = 0; i < 1; ++i)
1256 {
1257 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1258 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1259 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1260 BEAST_EXPECT(!followingFalse.updateVote(250 + 10 * i, false, p));
1261
1262 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1263 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1264 BEAST_EXPECT(followingTrue.getOurVote() == true);
1265 BEAST_EXPECT(followingFalse.getOurVote() == false);
1266
1267 // true vote has changed recently, so not stalled
1268 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1269 BEAST_EXPECT(clog->str() == "");
1270 // remaining votes have been unchanged in so long that we only
1271 // need to hit the second round at 95% to be stalled, regardless
1272 // of peers
1273 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1274 expectStalled(98, false, 11, 0, 2, __LINE__);
1275 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1276 expectStalled(97, true, 11, 0, 97, __LINE__);
1277 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1278 expectStalled(96, false, 11, 0, 3, __LINE__);
1279
1280 // true vote has changed recently, so not stalled
1281 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1282 BEAST_EXPECTS(clog->str() == "", clog->str());
1283 // remaining votes have been unchanged in so long that we only
1284 // need to hit the second round at 95% to be stalled, regardless
1285 // of peers
1286 BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1287 expectStalled(98, false, 11, 6, 2, __LINE__);
1288 BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged, j, clog));
1289 expectStalled(97, true, 11, 6, 97, __LINE__);
1290 BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged, j, clog));
1291 expectStalled(96, false, 11, 6, 3, __LINE__);
1292 }
1293 for (int i = 1; i < 3; ++i)
1294 {
1295 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1296 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1297 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1298 BEAST_EXPECT(!followingFalse.updateVote(250 + 10 * i, false, p));
1299
1300 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1301 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1302 BEAST_EXPECT(followingTrue.getOurVote() == true);
1303 BEAST_EXPECT(followingFalse.getOurVote() == false);
1304
1305 // true vote changed 2 rounds ago, and peers are changing, so
1306 // not stalled
1307 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1308 BEAST_EXPECTS(clog->str() == "", clog->str());
1309 // still stalled
1310 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1311 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1312 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1313 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1314 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1315 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1316
1317 // true vote changed 2 rounds ago, and peers are NOT changing,
1318 // so stalled
1319 BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1320 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1321 // still stalled
1322 BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1323 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1324 BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged, j, clog));
1325 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1326 BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged, j, clog));
1327 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1328 }
1329 for (int i = 3; i < 5; ++i)
1330 {
1331 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1332 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1333 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1334 BEAST_EXPECT(!followingFalse.updateVote(250 + 10 * i, false, p));
1335
1336 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1337 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1338 BEAST_EXPECT(followingTrue.getOurVote() == true);
1339 BEAST_EXPECT(followingFalse.getOurVote() == false);
1340
1341 BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog));
1342 expectStalled(99, true, 1 + i, 0, 97, __LINE__);
1343 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1344 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1345 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1346 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1347 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1348 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1349
1350 BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1351 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1352 BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1353 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1354 BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged, j, clog));
1355 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1356 BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged, j, clog));
1357 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1358 }
1359 }
1360 }
1361
1362 void
1363 run() override
1364 {
1365 testShouldCloseLedger();
1366 testCheckConsensus();
1367
1368 testStandalone();
1369 testPeersAgree();
1370 testSlowPeers();
1371 testCloseTimeDisagree();
1372 testWrongLCL();
1373 testConsensusCloseTimeRounding();
1374 testFork();
1375 testHubNetwork();
1376 testPreferredByBranch();
1377 testPauseForLaggards();
1378 testDisputes();
1379 }
1380};
1381
1382BEAST_DEFINE_TESTSUITE(Consensus, consensus, xrpl);
1383} // namespace test
1384} // namespace xrpl
A testsuite class.
Definition suite.h:52
testcase_t testcase
Memberspace for declaring test cases.
Definition suite.h:148
Generic implementation of consensus algorithm.
Definition Consensus.h:279
A transaction discovered to be in dispute during consensus.
Definition DisputedTx.h:30
Represents a peer connection in the overlay.
virtual id_t id() const =0
void run() override
Runs the suite.
A group of simulation Peers.
Definition PeerGroup.h:23
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition PeerGroup.h:165
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition PeerGroup.h:145
T insert(T... args)
T is_same_v
STL namespace.
typename SimClock::duration SimDuration
Definition SimTime.h:17
typename SimClock::time_point SimTime
Definition SimTime.h:18
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
auto constexpr increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, bool stalled, ConsensusParms const &parms, bool proposing, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determine whether the network reached consensus and whether we joined.
@ Expired
Consensus time limit has hard-expired.
@ MovedOn
The network has consensus without us.
@ Yes
We have consensus along with the network.
@ No
We do not have consensus.
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determines whether the current ledger should close at this time.
Definition Consensus.cpp:8
Consensus algorithm parameters.
std::chrono::milliseconds const ledgerGRANULARITY
How often we check state or change positions.
Represents a transfer rate.
Definition Rate.h:21
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
void on(csf::PeerID, csf::SimTime, E const &)
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
void on(csf::PeerID, csf::SimTime, E const &)
Peer accepted consensus results.
Definition events.h:102
Peer fully validated a new ledger.
Definition events.h:121
Ledger ledger
The new fully validated ledger.
Definition events.h:123
A single peer in the simulation.
T to_string(T... args)