rippled
Loading...
Searching...
No Matches
Consensus_test.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012-2016 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19#include <test/csf.h>
20#include <test/unit_test/SuiteJournal.h>
21#include <xrpld/consensus/Consensus.h>
22#include <xrpld/consensus/ConsensusProposal.h>
23#include <xrpl/beast/clock/manual_clock.h>
24#include <xrpl/beast/unit_test.h>
25#include <utility>
26
27namespace ripple {
28namespace test {
29
31{
33
34public:
35 Consensus_test() : journal_("Consensus_test", *this)
36 {
37 }
38
39 void
41 {
42 using namespace std::chrono_literals;
43
44 // Use default parameters
45 ConsensusParms const p{};
46
47 // Bizarre times forcibly close
48 BEAST_EXPECT(shouldCloseLedger(
49 true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
50 BEAST_EXPECT(shouldCloseLedger(
51 true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
52 BEAST_EXPECT(shouldCloseLedger(
53 true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
54
55 // Rest of network has closed
56 BEAST_EXPECT(
57 shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
58
59 // No transactions means wait until end of internval
60 BEAST_EXPECT(
61 !shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
62 BEAST_EXPECT(
63 shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
64
65 // Enforce minimum ledger open time
66 BEAST_EXPECT(
67 !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
68
69 // Don't go too much faster than last time
70 BEAST_EXPECT(
71 !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
72
73 BEAST_EXPECT(
74 shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
75 }
76
77 void
79 {
80 using namespace std::chrono_literals;
81
82 // Use default parameterss
83 ConsensusParms const p{};
84
85 // Not enough time has elapsed
86 BEAST_EXPECT(
88 checkConsensus(10, 2, 2, 0, 3s, 2s, p, true, journal_));
89
90 // If not enough peers have propsed, ensure
91 // more time for proposals
92 BEAST_EXPECT(
94 checkConsensus(10, 2, 2, 0, 3s, 4s, p, true, journal_));
95
96 // Enough time has elapsed and we all agree
97 BEAST_EXPECT(
99 checkConsensus(10, 2, 2, 0, 3s, 10s, p, true, journal_));
100
101 // Enough time has elapsed and we don't yet agree
102 BEAST_EXPECT(
104 checkConsensus(10, 2, 1, 0, 3s, 10s, p, true, journal_));
105
106 // Our peers have moved on
107 // Enough time has elapsed and we all agree
108 BEAST_EXPECT(
110 checkConsensus(10, 2, 1, 8, 3s, 10s, p, true, journal_));
111
112 // If no peers, don't agree until time has passed.
113 BEAST_EXPECT(
115 checkConsensus(0, 0, 0, 0, 3s, 10s, p, true, journal_));
116
117 // Agree if no peers and enough time has passed.
118 BEAST_EXPECT(
120 checkConsensus(0, 0, 0, 0, 3s, 16s, p, true, journal_));
121 }
122
123 void
125 {
126 using namespace std::chrono_literals;
127 using namespace csf;
128
129 Sim s;
130 PeerGroup peers = s.createGroup(1);
131 Peer* peer = peers[0];
132 peer->targetLedgers = 1;
133 peer->start();
134 peer->submit(Tx{1});
135
136 s.scheduler.step();
137
138 // Inspect that the proper ledger was created
139 auto const& lcl = peer->lastClosedLedger;
140 BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
141 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
142 BEAST_EXPECT(lcl.txs().size() == 1);
143 BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
144 BEAST_EXPECT(peer->prevProposers == 0);
145 }
146
147 void
149 {
150 using namespace csf;
151 using namespace std::chrono;
152
153 ConsensusParms const parms{};
154 Sim sim;
155 PeerGroup peers = sim.createGroup(5);
156
157 // Connected trust and network graphs with single fixed delay
158 peers.trustAndConnect(
159 peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
160
161 // everyone submits their own ID as a TX
162 for (Peer* p : peers)
163 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
164
165 sim.run(1);
166
167 // All peers are in sync
168 if (BEAST_EXPECT(sim.synchronized()))
169 {
170 for (Peer const* peer : peers)
171 {
172 auto const& lcl = peer->lastClosedLedger;
173 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
174 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
175 // All peers proposed
176 BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
177 // All transactions were accepted
178 for (std::uint32_t i = 0; i < peers.size(); ++i)
179 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
180 }
181 }
182 }
183
184 void
186 {
187 using namespace csf;
188 using namespace std::chrono;
189
190 // Several tests of a complete trust graph with a subset of peers
191 // that have significantly longer network delays to the rest of the
192 // network
193
194 // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
195 {
196 ConsensusParms const parms{};
197 Sim sim;
198 PeerGroup slow = sim.createGroup(1);
199 PeerGroup fast = sim.createGroup(4);
200 PeerGroup network = fast + slow;
201
202 // Fully connected trust graph
203 network.trust(network);
204
205 // Fast and slow network connections
206 fast.connect(
207 fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
208
209 slow.connect(
210 network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
211
212 // All peers submit their own ID as a transaction
213 for (Peer* peer : network)
214 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
215
216 sim.run(1);
217
218 // Verify all peers have same LCL but are missing transaction 0
219 // All peers are in sync even with a slower peer 0
220 if (BEAST_EXPECT(sim.synchronized()))
221 {
222 for (Peer* peer : network)
223 {
224 auto const& lcl = peer->lastClosedLedger;
225 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
226 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
227
228 BEAST_EXPECT(peer->prevProposers == network.size() - 1);
229 BEAST_EXPECT(
230 peer->prevRoundTime == network[0]->prevRoundTime);
231
232 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
233 for (std::uint32_t i = 2; i < network.size(); ++i)
234 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
235
236 // Tx 0 didn't make it
237 BEAST_EXPECT(
238 peer->openTxs.find(Tx{0}) != peer->openTxs.end());
239 }
240 }
241 }
242
243 // Test when the slow peers delay a consensus quorum (4/6 agree)
244 {
245 // Run two tests
246 // 1. The slow peers are participating in consensus
247 // 2. The slow peers are just observing
248
249 for (auto isParticipant : {true, false})
250 {
251 ConsensusParms const parms{};
252
253 Sim sim;
254 PeerGroup slow = sim.createGroup(2);
255 PeerGroup fast = sim.createGroup(4);
256 PeerGroup network = fast + slow;
257
258 // Connected trust graph
259 network.trust(network);
260
261 // Fast and slow network connections
262 fast.connect(
263 fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
264
265 slow.connect(
266 network,
267 round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
268
269 for (Peer* peer : slow)
270 peer->runAsValidator = isParticipant;
271
272 // All peers submit their own ID as a transaction and relay it
273 // to peers
274 for (Peer* peer : network)
275 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
276
277 sim.run(1);
278
279 if (BEAST_EXPECT(sim.synchronized()))
280 {
281 // Verify all peers have same LCL but are missing
282 // transaction 0,1 which was not received by all peers
283 // before the ledger closed
284 for (Peer* peer : network)
285 {
286 // Closed ledger has all but transaction 0,1
287 auto const& lcl = peer->lastClosedLedger;
288 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
289 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
290 BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
291 for (std::uint32_t i = slow.size(); i < network.size();
292 ++i)
293 BEAST_EXPECT(
294 lcl.txs().find(Tx{i}) != lcl.txs().end());
295
296 // Tx 0-1 didn't make it
297 BEAST_EXPECT(
298 peer->openTxs.find(Tx{0}) != peer->openTxs.end());
299 BEAST_EXPECT(
300 peer->openTxs.find(Tx{1}) != peer->openTxs.end());
301 }
302
303 Peer const* slowPeer = slow[0];
304 if (isParticipant)
305 BEAST_EXPECT(
306 slowPeer->prevProposers == network.size() - 1);
307 else
308 BEAST_EXPECT(slowPeer->prevProposers == fast.size());
309
310 for (Peer* peer : fast)
311 {
312 // Due to the network link delay settings
313 // Peer 0 initially proposes {0}
314 // Peer 1 initially proposes {1}
315 // Peers 2-5 initially propose {2,3,4,5}
316 // Since peers 2-5 agree, 4/6 > the initial 50% needed
317 // to include a disputed transaction, so Peer 0/1 switch
318 // to agree with those peers. Peer 0/1 then closes with
319 // an 80% quorum of agreeing positions (5/6) match.
320 //
321 // Peers 2-5 do not change position, since tx 0 or tx 1
322 // have less than the 50% initial threshold. They also
323 // cannot declare consensus, since 4/6 agreeing
324 // positions are < 80% threshold. They therefore need an
325 // additional timerEntry call to see the updated
326 // positions from Peer 0 & 1.
327
328 if (isParticipant)
329 {
330 BEAST_EXPECT(
331 peer->prevProposers == network.size() - 1);
332 BEAST_EXPECT(
333 peer->prevRoundTime > slowPeer->prevRoundTime);
334 }
335 else
336 {
337 BEAST_EXPECT(
338 peer->prevProposers == fast.size() - 1);
339 // so all peers should have closed together
340 BEAST_EXPECT(
341 peer->prevRoundTime == slowPeer->prevRoundTime);
342 }
343 }
344 }
345 }
346 }
347 }
348
349 void
351 {
352 using namespace csf;
353 using namespace std::chrono;
354
355 // This is a very specialized test to get ledgers to disagree on
356 // the close time. It unfortunately assumes knowledge about current
357 // timing constants. This is a necessary evil to get coverage up
358 // pending more extensive refactorings of timing constants.
359
360 // In order to agree-to-disagree on the close time, there must be no
361 // clear majority of nodes agreeing on a close time. This test
362 // sets a relative offset to the peers internal clocks so that they
363 // send proposals with differing times.
364
365 // However, agreement is on the effective close time, not the
366 // exact close time. The minimum closeTimeResolution is given by
367 // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
368 // the skews need to be at least 10 seconds to have different effective
369 // close times.
370
371 // Complicating this matter is that nodes will ignore proposals
372 // with times more than proposeFRESHNESS =20s in the past. So at
373 // the minimum granularity, we have at most 3 types of skews
374 // (0s,10s,20s).
375
376 // This test therefore has 6 nodes, with 2 nodes having each type of
377 // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
378 // actual close time.
379
380 ConsensusParms const parms{};
381 Sim sim;
382
383 PeerGroup groupA = sim.createGroup(2);
384 PeerGroup groupB = sim.createGroup(2);
385 PeerGroup groupC = sim.createGroup(2);
386 PeerGroup network = groupA + groupB + groupC;
387
388 network.trust(network);
389 network.connect(
390 network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
391
392 // Run consensus without skew until we have a short close time
393 // resolution
394 Peer* firstPeer = *groupA.begin();
395 while (firstPeer->lastClosedLedger.closeTimeResolution() >=
396 parms.proposeFRESHNESS)
397 sim.run(1);
398
399 // Introduce a shift on the time of 2/3 of peers
400 for (Peer* peer : groupA)
401 peer->clockSkew = parms.proposeFRESHNESS / 2;
402 for (Peer* peer : groupB)
403 peer->clockSkew = parms.proposeFRESHNESS;
404
405 sim.run(1);
406
407 // All nodes agreed to disagree on the close time
408 if (BEAST_EXPECT(sim.synchronized()))
409 {
410 for (Peer* peer : network)
411 BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
412 }
413 }
414
415 void
417 {
418 using namespace csf;
419 using namespace std::chrono;
420 // Specialized test to exercise a temporary fork in which some peers
421 // are working on an incorrect prior ledger.
422
423 ConsensusParms const parms{};
424
425 // Vary the time it takes to process validations to exercise detecting
426 // the wrong LCL at different phases of consensus
427 for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
428 {
429 // Consider 10 peers:
430 // 0 1 2 3 4 5 6 7 8 9
431 // minority majorityA majorityB
432 //
433 // Nodes 0-1 trust nodes 0-4
434 // Nodes 2-9 trust nodes 2-9
435 //
436 // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
437 // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
438 // nodes will instead accept the ledger with tx 1.
439
440 // Nodes 0-1 will detect this mismatch during a subsequent round
441 // since nodes 2-4 will validate a different ledger.
442
443 // Nodes 0-1 will acquire the proper ledger from the network and
444 // resume consensus and eventually generate the dominant network
445 // ledger.
446
447 // This topology can potentially fork with the above trust relations
448 // but that is intended for this test.
449
450 Sim sim;
451
452 PeerGroup minority = sim.createGroup(2);
453 PeerGroup majorityA = sim.createGroup(3);
454 PeerGroup majorityB = sim.createGroup(5);
455
456 PeerGroup majority = majorityA + majorityB;
457 PeerGroup network = minority + majority;
458
459 SimDuration delay =
460 round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
461 minority.trustAndConnect(minority + majorityA, delay);
462 majority.trustAndConnect(majority, delay);
463
464 CollectByNode<JumpCollector> jumps;
465 sim.collectors.add(jumps);
466
467 BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
468
469 // initial round to set prior state
470 sim.run(1);
471
472 // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
473 // tx 1
474 for (Peer* peer : network)
475 peer->delays.recvValidation = validationDelay;
476 for (Peer* peer : (minority + majorityA))
477 peer->openTxs.insert(Tx{0});
478 for (Peer* peer : majorityB)
479 peer->openTxs.insert(Tx{1});
480
481 // Run for additional rounds
482 // With no validation delay, only 2 more rounds are needed.
483 // 1. Round to generate different ledgers
484 // 2. Round to detect different prior ledgers (but still generate
485 // wrong ones) and recover within that round since wrong LCL
486 // is detected before we close
487 //
488 // With a validation delay of ledgerMIN_CLOSE, we need 3 more
489 // rounds.
490 // 1. Round to generate different ledgers
491 // 2. Round to detect different prior ledgers (but still generate
492 // wrong ones) but end up declaring consensus on wrong LCL (but
493 // with the right transaction set!). This is because we detect
494 // the wrong LCL after we have closed the ledger, so we declare
495 // consensus based solely on our peer proposals. But we haven't
496 // had time to acquire the right ledger.
497 // 3. Round to correct
498 sim.run(3);
499
500 // The network never actually forks, since node 0-1 never see a
501 // quorum of validations to fully validate the incorrect chain.
502
503 // However, for a non zero-validation delay, the network is not
504 // synchronized because nodes 0 and 1 are running one ledger behind
505 if (BEAST_EXPECT(sim.branches() == 1))
506 {
507 for (Peer const* peer : majority)
508 {
509 // No jumps for majority nodes
510 BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
511 BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
512 }
513 for (Peer const* peer : minority)
514 {
515 auto& peerJumps = jumps[peer->id];
516 // last closed ledger jump between chains
517 {
518 if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
519 {
520 JumpCollector::Jump const& jump =
521 peerJumps.closeJumps.front();
522 // Jump is to a different chain
523 BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
524 BEAST_EXPECT(!jump.to.isAncestor(jump.from));
525 }
526 }
527 // fully validated jump forward in same chain
528 {
529 if (BEAST_EXPECT(
530 peerJumps.fullyValidatedJumps.size() == 1))
531 {
532 JumpCollector::Jump const& jump =
533 peerJumps.fullyValidatedJumps.front();
534 // Jump is to a different chain with same seq
535 BEAST_EXPECT(jump.from.seq() < jump.to.seq());
536 BEAST_EXPECT(jump.to.isAncestor(jump.from));
537 }
538 }
539 }
540 }
541 }
542
543 {
544 // Additional test engineered to switch LCL during the establish
545 // phase. This was added to trigger a scenario that previously
546 // crashed, in which switchLCL switched from establish to open
547 // phase, but still processed the establish phase logic.
548
549 // Loner node will accept an initial ledger A, but all other nodes
550 // accept ledger B a bit later. By delaying the time it takes
551 // to process a validation, loner node will detect the wrongLCL
552 // after it is already in the establish phase of the next round.
553
554 Sim sim;
555 PeerGroup loner = sim.createGroup(1);
556 PeerGroup friends = sim.createGroup(3);
557 loner.trust(loner + friends);
558
559 PeerGroup others = sim.createGroup(6);
560 PeerGroup clique = friends + others;
561 clique.trust(clique);
562
563 PeerGroup network = loner + clique;
564 network.connect(
565 network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
566
567 // initial round to set prior state
568 sim.run(1);
569 for (Peer* peer : (loner + friends))
570 peer->openTxs.insert(Tx(0));
571 for (Peer* peer : others)
572 peer->openTxs.insert(Tx(1));
573
574 // Delay validation processing
575 for (Peer* peer : network)
576 peer->delays.recvValidation = parms.ledgerGRANULARITY;
577
578 // additional rounds to generate wrongLCL and recover
579 sim.run(2);
580
581 // Check all peers recovered
582 for (Peer* p : network)
583 BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
584 }
585 }
586
587 void
589 {
590 using namespace csf;
591 using namespace std::chrono;
592
593 // This is a specialized test engineered to yield ledgers with different
594 // close times even though the peers believe they had close time
595 // consensus on the ledger.
596 ConsensusParms parms;
597
598 Sim sim;
599
600 // This requires a group of 4 fast and 2 slow peers to create a
601 // situation in which a subset of peers requires seeing additional
602 // proposals to declare consensus.
603 PeerGroup slow = sim.createGroup(2);
604 PeerGroup fast = sim.createGroup(4);
605 PeerGroup network = fast + slow;
606
607 for (Peer* peer : network)
608 peer->consensusParms = parms;
609
610 // Connected trust graph
611 network.trust(network);
612
613 // Fast and slow network connections
614 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
615 slow.connect(
616 network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
617
618 // Run to the ledger *prior* to decreasing the resolution
620
621 // In order to create the discrepency, we want a case where if
622 // X = effCloseTime(closeTime, resolution, parentCloseTime)
623 // X != effCloseTime(X, resolution, parentCloseTime)
624 //
625 // That is, the effective close time is not a fixed point. This can
626 // happen if X = parentCloseTime + 1, but a subsequent rounding goes
627 // to the next highest multiple of resolution.
628
629 // So we want to find an offset (now + offset) % 30s = 15
630 // (now + offset) % 20s = 15
631 // This way, the next ledger will close and round up Due to the
632 // network delay settings, the round of consensus will take 5s, so
633 // the next ledger's close time will
634
635 NetClock::duration when = network[0]->now().time_since_epoch();
636
637 // Check we are before the 30s to 20s transition
638 NetClock::duration resolution =
639 network[0]->lastClosedLedger.closeTimeResolution();
640 BEAST_EXPECT(resolution == NetClock::duration{30s});
641
642 while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
643 ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
644 when += 1s;
645 // Advance the clock without consensus running (IS THIS WHAT
646 // PREVENTS IT IN PRACTICE?)
647 sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
648
649 // Run one more ledger with 30s resolution
650 sim.run(1);
651 if (BEAST_EXPECT(sim.synchronized()))
652 {
653 // close time should be ahead of clock time since we engineered
654 // the close time to round up
655 for (Peer* peer : network)
656 {
657 BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
658 BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
659 }
660 }
661
662 // All peers submit their own ID as a transaction
663 for (Peer* peer : network)
664 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
665
666 // Run 1 more round, this time it will have a decreased
667 // resolution of 20 seconds.
668
669 // The network delays are engineered so that the slow peers
670 // initially have the wrong tx hash, but they see a majority
671 // of agreement from their peers and declare consensus
672 //
673 // The trick is that everyone starts with a raw close time of
674 // 84681s
675 // Which has
676 // effCloseTime(86481s, 20s, 86490s) = 86491s
677 // However, when the slow peers update their position, they change
678 // the close time to 86451s. The fast peers declare consensus with
679 // the 86481s as their position still.
680 //
681 // When accepted the ledger
682 // - fast peers use eff(86481s) -> 86491s as the close time
683 // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
684
685 sim.run(1);
686
687 BEAST_EXPECT(sim.synchronized());
688 }
689
690 void
692 {
693 using namespace csf;
694 using namespace std::chrono;
695
696 std::uint32_t numPeers = 10;
697 // Vary overlap between two UNLs
698 for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
699 {
700 ConsensusParms const parms{};
701 Sim sim;
702
703 std::uint32_t numA = (numPeers - overlap) / 2;
704 std::uint32_t numB = numPeers - numA - overlap;
705
706 PeerGroup aOnly = sim.createGroup(numA);
707 PeerGroup bOnly = sim.createGroup(numB);
708 PeerGroup commonOnly = sim.createGroup(overlap);
709
710 PeerGroup a = aOnly + commonOnly;
711 PeerGroup b = bOnly + commonOnly;
712
713 PeerGroup network = a + b;
714
715 SimDuration delay =
716 round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
717 a.trustAndConnect(a, delay);
718 b.trustAndConnect(b, delay);
719
720 // Initial round to set prior state
721 sim.run(1);
722 for (Peer* peer : network)
723 {
724 // Nodes have only seen transactions from their neighbors
725 peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
726 for (Peer* to : sim.trustGraph.trustedPeers(peer))
727 peer->openTxs.insert(
728 Tx{static_cast<std::uint32_t>(to->id)});
729 }
730 sim.run(1);
731
732 // Fork should not happen for 40% or greater overlap
733 // Since the overlapped nodes have a UNL that is the union of the
734 // two cliques, the maximum sized UNL list is the number of peers
735 if (overlap > 0.4 * numPeers)
736 BEAST_EXPECT(sim.synchronized());
737 else
738 {
739 // Even if we do fork, there shouldn't be more than 3 ledgers
740 // One for cliqueA, one for cliqueB and one for nodes in both
741 BEAST_EXPECT(sim.branches() <= 3);
742 }
743 }
744 }
745
746 void
748 {
749 using namespace csf;
750 using namespace std::chrono;
751
752 // Simulate a set of 5 validators that aren't directly connected but
753 // rely on a single hub node for communication
754
755 ConsensusParms const parms{};
756 Sim sim;
757 PeerGroup validators = sim.createGroup(5);
758 PeerGroup center = sim.createGroup(1);
759 validators.trust(validators);
760 center.trust(validators);
761
762 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
763 validators.connect(center, delay);
764
765 center[0]->runAsValidator = false;
766
767 // prep round to set initial state.
768 sim.run(1);
769
770 // everyone submits their own ID as a TX and relay it to peers
771 for (Peer* p : validators)
772 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
773
774 sim.run(1);
775
776 // All peers are in sync
777 BEAST_EXPECT(sim.synchronized());
778 }
779
780 // Helper collector for testPreferredByBranch
781 // Invasively disconnects network at bad times to cause splits
783 {
788 bool reconnected = false;
789
791 csf::PeerGroup& net,
793 csf::PeerGroup& split,
795 : network(net), groupCfast(c), groupCsplit(split), delay(d)
796 {
797 }
798
799 template <class E>
800 void
802 {
803 }
804
805 void
807 {
808 using namespace std::chrono;
809 // As soon as the fastC node fully validates C, disconnect
810 // ALL c nodes from the network. The fast C node needs to disconnect
811 // as well to prevent it from relaying the validations it did see
812 if (who == groupCfast[0]->id &&
813 e.ledger.seq() == csf::Ledger::Seq{2})
814 {
815 network.disconnect(groupCsplit);
816 network.disconnect(groupCfast);
817 }
818 }
819
820 void
822 {
823 // As soon as anyone generates a child of B or C, reconnect the
824 // network so those validations make it through
825 if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
826 {
827 reconnected = true;
828 network.connect(groupCsplit, delay);
829 }
830 }
831 };
832
833 void
835 {
836 using namespace csf;
837 using namespace std::chrono;
838
839 // Simulate network splits that are prevented from forking when using
840 // preferred ledger by trie. This is a contrived example that involves
841 // excessive network splits, but demonstrates the safety improvement
842 // from the preferred ledger by trie approach.
843
844 // Consider 10 validating nodes that comprise a single common UNL
845 // Ledger history:
846 // 1: A
847 // _/ \_
848 // 2: B C
849 // _/ _/ \_
850 // 3: D C' |||||||| (8 different ledgers)
851
852 // - All nodes generate the common ledger A
853 // - 2 nodes generate B and 8 nodes generate C
854 // - Only 1 of the C nodes sees all the C validations and fully
855 // validates C. The rest of the C nodes split at just the right time
856 // such that they never see any C validations but their own.
857 // - The C nodes continue and generate 8 different child ledgers.
858 // - Meanwhile, the D nodes only saw 1 validation for C and 2
859 // validations
860 // for B.
861 // - The network reconnects and the validations for generation 3 ledgers
862 // are observed (D and the 8 C's)
863 // - In the old approach, 2 votes for D outweights 1 vote for each C'
864 // so the network would avalanche towards D and fully validate it
865 // EVEN though C was fully validated by one node
866 // - In the new approach, 2 votes for D are not enough to outweight the
867 // 8 implicit votes for C, so nodes will avalanche to C instead
868
869 ConsensusParms const parms{};
870 Sim sim;
871
872 // Goes A->B->D
873 PeerGroup groupABD = sim.createGroup(2);
874 // Single node that initially fully validates C before the split
875 PeerGroup groupCfast = sim.createGroup(1);
876 // Generates C, but fails to fully validate before the split
877 PeerGroup groupCsplit = sim.createGroup(7);
878
879 PeerGroup groupNotFastC = groupABD + groupCsplit;
880 PeerGroup network = groupABD + groupCsplit + groupCfast;
881
882 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
883 SimDuration fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
884
885 network.trust(network);
886 // C must have a shorter delay to see all the validations before the
887 // other nodes
888 network.connect(groupCfast, fDelay);
889 // The rest of the network is connected at the same speed
890 groupNotFastC.connect(groupNotFastC, delay);
891
892 Disruptor dc(network, groupCfast, groupCsplit, delay);
893 sim.collectors.add(dc);
894
895 // Consensus round to generate ledger A
896 sim.run(1);
897 BEAST_EXPECT(sim.synchronized());
898
899 // Next round generates B and C
900 // To force B, we inject an extra transaction in to those nodes
901 for (Peer* peer : groupABD)
902 {
903 peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
904 }
905 // The Disruptor will ensure that nodes disconnect before the C
906 // validations make it to all but the fastC node
907 sim.run(1);
908
909 // We are no longer in sync, but have not yet forked:
910 // 9 nodes consider A the last fully validated ledger and fastC sees C
911 BEAST_EXPECT(!sim.synchronized());
912 BEAST_EXPECT(sim.branches() == 1);
913
914 // Run another round to generate the 8 different C' ledgers
915 for (Peer* p : network)
916 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
917 sim.run(1);
918
919 // Still not forked
920 BEAST_EXPECT(!sim.synchronized());
921 BEAST_EXPECT(sim.branches() == 1);
922
923 // Disruptor will reconnect all but the fastC node
924 sim.run(1);
925
926 if (BEAST_EXPECT(sim.branches() == 1))
927 {
928 BEAST_EXPECT(sim.synchronized());
929 }
930 else // old approach caused a fork
931 {
932 BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
933 BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
934 }
935 }
936
937 // Helper collector for testPauseForLaggards
938 // This will remove the ledgerAccept delay used to
939 // initially create the slow vs. fast validator groups.
941 {
943
945 {
946 }
947
948 template <class E>
949 void
951 {
952 }
953
954 void
956 {
957 for (csf::Peer* p : g)
958 {
959 if (p->id == who)
960 p->delays.ledgerAccept = std::chrono::seconds{0};
961 }
962 }
963 };
964
965 void
967 {
968 using namespace csf;
969 using namespace std::chrono;
970
971 // Test that validators that jump ahead of the network slow
972 // down.
973
974 // We engineer the following validated ledger history scenario:
975 //
976 // / --> B1 --> C1 --> ... -> G1 "ahead"
977 // A
978 // \ --> B2 --> C2 "behind"
979 //
980 // After validating a common ledger A, a set of "behind" validators
981 // briefly run slower and validate the lower chain of ledgers.
982 // The "ahead" validators run normal speed and run ahead validating the
983 // upper chain of ledgers.
984 //
985 // Due to the uncommited support definition of the preferred branch
986 // protocol, even if the "behind" validators are a majority, the "ahead"
987 // validators cannot jump to the proper branch until the "behind"
988 // validators catch up to the same sequence number. For this test to
989 // succeed, the ahead validators need to briefly slow down consensus.
990
991 ConsensusParms const parms{};
992 Sim sim;
993 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
994
995 PeerGroup behind = sim.createGroup(3);
996 PeerGroup ahead = sim.createGroup(2);
997 PeerGroup network = ahead + behind;
998
999 hash_set<Peer::NodeKey_t> trustedKeys;
1000 for (Peer* p : network)
1001 trustedKeys.insert(p->key);
1002 for (Peer* p : network)
1003 p->trustedKeys = trustedKeys;
1004
1005 network.trustAndConnect(network, delay);
1006
1007 // Initial seed round to set prior state
1008 sim.run(1);
1009
1010 // Have the "behind" group initially take a really long time to
1011 // accept a ledger after ending deliberation
1012 for (Peer* p : behind)
1013 p->delays.ledgerAccept = 20s;
1014
1015 // Use the collector to revert the delay after the single
1016 // slow ledger is generated
1017 UndoDelay undoDelay{behind};
1018 sim.collectors.add(undoDelay);
1019
1020#if 0
1021 // Have all beast::journal output printed to stdout
1022 for (Peer* p : network)
1023 p->sink.threshold(beast::severities::kAll);
1024
1025 // Print ledger accept and fully validated events to stdout
1026 StreamCollector sc{std::cout};
1027 sim.collectors.add(sc);
1028#endif
1029 // Run the simulation for 100 seconds of simulation time with
1030 std::chrono::nanoseconds const simDuration = 100s;
1031
1032 // Simulate clients submitting 1 tx every 5 seconds to a random
1033 // validator
1034 Rate const rate{1, 5s};
1035 auto peerSelector = makeSelector(
1036 network.begin(),
1037 network.end(),
1038 std::vector<double>(network.size(), 1.),
1039 sim.rng);
1040 auto txSubmitter = makeSubmitter(
1041 ConstantDistribution{rate.inv()},
1042 sim.scheduler.now(),
1043 sim.scheduler.now() + simDuration,
1044 peerSelector,
1045 sim.scheduler,
1046 sim.rng);
1047
1048 // Run simulation
1049 sim.run(simDuration);
1050
1051 // Verify that the network recovered
1052 BEAST_EXPECT(sim.synchronized());
1053 }
1054
1055 void
1056 run() override
1057 {
1058 testShouldCloseLedger();
1059 testCheckConsensus();
1060
1061 testStandalone();
1062 testPeersAgree();
1063 testSlowPeers();
1064 testCloseTimeDisagree();
1065 testWrongLCL();
1066 testConsensusCloseTimeRounding();
1067 testFork();
1068 testHubNetwork();
1069 testPreferredByBranch();
1070 testPauseForLaggards();
1071 }
1072};
1073
1074BEAST_DEFINE_TESTSUITE(Consensus, consensus, ripple);
1075} // namespace test
1076} // namespace ripple
A testsuite class.
Definition: suite.h:53
Generic implementation of consensus algorithm.
Definition: Consensus.h:292
Represents a peer connection in the overlay.
virtual id_t id() const =0
void run() override
Runs the suite.
A group of simulation Peers.
Definition: PeerGroup.h:40
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition: PeerGroup.h:184
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition: PeerGroup.h:164
T insert(T... args)
typename SimClock::duration SimDuration
Definition: SimTime.h:35
typename SimClock::time_point SimTime
Definition: SimTime.h:36
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, ConsensusParms const &parms, bool proposing, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determine whether the network reached consensus and whether we joined.
Definition: Consensus.cpp:163
@ MovedOn
The network has consensus without us.
@ Yes
We have consensus along with the network.
@ No
We do not have consensus.
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determines whether the current ledger should close at this time.
Definition: Consensus.cpp:26
auto constexpr increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
Definition: LedgerTiming.h:50
Consensus algorithm parameters.
std::chrono::milliseconds ledgerGRANULARITY
How often we check state or change positions.
Represents a transfer rate.
Definition: Rate.h:38
void on(csf::PeerID, csf::SimTime, E const &)
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
void on(csf::PeerID, csf::SimTime, E const &)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Peer accepted consensus results.
Definition: events.h:119
Peer fully validated a new ledger.
Definition: events.h:138
Ledger ledger
The new fully validated ledger.
Definition: events.h:140
A single peer in the simulation.
Definition: test/csf/Peer.h:55