rippled
Consensus_test.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012-2016 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 #include <ripple/beast/clock/manual_clock.h>
20 #include <ripple/beast/unit_test.h>
21 #include <ripple/consensus/Consensus.h>
22 #include <ripple/consensus/ConsensusProposal.h>
23 #include <test/csf.h>
24 #include <test/unit_test/SuiteJournal.h>
25 #include <utility>
26 
27 namespace ripple {
28 namespace test {
29 
30 class Consensus_test : public beast::unit_test::suite
31 {
33 
34 public:
35  Consensus_test() : journal_("Consensus_test", *this)
36  {
37  }
38 
39  void
41  {
42  using namespace std::chrono_literals;
43 
44  // Use default parameters
45  ConsensusParms const p{};
46 
48  // Bizarre times forcibly close
49  BEAST_EXPECT(shouldCloseLedger(
50  true, 10, 10, 10, -10s, 10s, 1s, delay, 1s, p, journal_));
51  BEAST_EXPECT(shouldCloseLedger(
52  true, 10, 10, 10, 100h, 10s, 1s, delay, 1s, p, journal_));
53  BEAST_EXPECT(shouldCloseLedger(
54  true, 10, 10, 10, 10s, 100h, 1s, delay, 1s, p, journal_));
55 
56  // Rest of network has closed
57  BEAST_EXPECT(shouldCloseLedger(
58  true, 10, 3, 5, 10s, 10s, 10s, delay, 10s, p, journal_));
59 
60  // No transactions means wait until end of internval
61  BEAST_EXPECT(!shouldCloseLedger(
62  false, 10, 0, 0, 1s, 1s, 1s, delay, 10s, p, journal_));
63  BEAST_EXPECT(shouldCloseLedger(
64  false, 10, 0, 0, 1s, 10s, 1s, delay, 10s, p, journal_));
65 
66  // Enforce minimum ledger open time
67  BEAST_EXPECT(!shouldCloseLedger(
68  true, 10, 0, 0, 10s, 10s, 1s, delay, 10s, p, journal_));
69 
70  // Don't go too much faster than last time
71  BEAST_EXPECT(!shouldCloseLedger(
72  true, 10, 0, 0, 10s, 10s, 3s, delay, 10s, p, journal_));
73 
74  BEAST_EXPECT(shouldCloseLedger(
75  true, 10, 0, 0, 10s, 10s, 10s, delay, 10s, p, journal_));
76  }
77 
78  void
80  {
81  using namespace std::chrono_literals;
82 
83  // Use default parameterss
84  ConsensusParms const p{};
85 
86  // Not enough time has elapsed
87  BEAST_EXPECT(
89  checkConsensus(10, 2, 2, 0, 3s, 2s, p, true, journal_));
90 
91  // If not enough peers have propsed, ensure
92  // more time for proposals
93  BEAST_EXPECT(
95  checkConsensus(10, 2, 2, 0, 3s, 4s, p, true, journal_));
96 
97  // Enough time has elapsed and we all agree
98  BEAST_EXPECT(
100  checkConsensus(10, 2, 2, 0, 3s, 10s, p, true, journal_));
101 
102  // Enough time has elapsed and we don't yet agree
103  BEAST_EXPECT(
105  checkConsensus(10, 2, 1, 0, 3s, 10s, p, true, journal_));
106 
107  // Our peers have moved on
108  // Enough time has elapsed and we all agree
109  BEAST_EXPECT(
111  checkConsensus(10, 2, 1, 8, 3s, 10s, p, true, journal_));
112 
113  // No peers makes it easy to agree
114  BEAST_EXPECT(
116  checkConsensus(0, 0, 0, 0, 3s, 10s, p, true, journal_));
117  }
118 
119  void
121  {
122  using namespace std::chrono_literals;
123  using namespace csf;
124 
125  Sim s;
126  PeerGroup peers = s.createGroup(1);
127  Peer* peer = peers[0];
128  peer->targetLedgers = 1;
129  peer->start();
130  peer->submit(Tx{1});
131 
132  s.scheduler.step();
133 
134  // Inspect that the proper ledger was created
135  auto const& lcl = peer->lastClosedLedger;
136  BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
137  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
138  BEAST_EXPECT(lcl.txs().size() == 1);
139  BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
140  BEAST_EXPECT(peer->prevProposers == 0);
141  }
142 
143  void
145  {
146  using namespace csf;
147  using namespace std::chrono;
148 
149  ConsensusParms const parms{};
150  Sim sim;
151  PeerGroup peers = sim.createGroup(5);
152 
153  // Connected trust and network graphs with single fixed delay
154  peers.trustAndConnect(
155  peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
156 
157  // everyone submits their own ID as a TX
158  for (Peer* p : peers)
159  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
160 
161  sim.run(1);
162 
163  // All peers are in sync
164  if (BEAST_EXPECT(sim.synchronized()))
165  {
166  for (Peer const* peer : peers)
167  {
168  auto const& lcl = peer->lastClosedLedger;
169  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
170  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
171  // All peers proposed
172  BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
173  // All transactions were accepted
174  for (std::uint32_t i = 0; i < peers.size(); ++i)
175  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
176  }
177  }
178  }
179 
180  void
182  {
183  using namespace csf;
184  using namespace std::chrono;
185 
186  // Several tests of a complete trust graph with a subset of peers
187  // that have significantly longer network delays to the rest of the
188  // network
189 
190  // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
191  {
192  ConsensusParms const parms{};
193  Sim sim;
194  PeerGroup slow = sim.createGroup(1);
195  PeerGroup fast = sim.createGroup(4);
196  PeerGroup network = fast + slow;
197 
198  // Fully connected trust graph
199  network.trust(network);
200 
201  // Fast and slow network connections
202  fast.connect(
203  fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
204 
205  slow.connect(
206  network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
207 
208  // All peers submit their own ID as a transaction
209  for (Peer* peer : network)
210  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
211 
212  sim.run(1);
213 
214  // Verify all peers have same LCL but are missing transaction 0
215  // All peers are in sync even with a slower peer 0
216  if (BEAST_EXPECT(sim.synchronized()))
217  {
218  for (Peer* peer : network)
219  {
220  auto const& lcl = peer->lastClosedLedger;
221  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
222  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
223 
224  BEAST_EXPECT(peer->prevProposers == network.size() - 1);
225  BEAST_EXPECT(
226  peer->prevRoundTime == network[0]->prevRoundTime);
227 
228  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
229  for (std::uint32_t i = 2; i < network.size(); ++i)
230  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
231 
232  // Tx 0 didn't make it
233  BEAST_EXPECT(
234  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
235  }
236  }
237  }
238 
239  // Test when the slow peers delay a consensus quorum (4/6 agree)
240  {
241  // Run two tests
242  // 1. The slow peers are participating in consensus
243  // 2. The slow peers are just observing
244 
245  for (auto isParticipant : {true, false})
246  {
247  ConsensusParms const parms{};
248 
249  Sim sim;
250  PeerGroup slow = sim.createGroup(2);
251  PeerGroup fast = sim.createGroup(4);
252  PeerGroup network = fast + slow;
253 
254  // Connected trust graph
255  network.trust(network);
256 
257  // Fast and slow network connections
258  fast.connect(
259  fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
260 
261  slow.connect(
262  network,
263  round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
264 
265  for (Peer* peer : slow)
266  peer->runAsValidator = isParticipant;
267 
268  // All peers submit their own ID as a transaction and relay it
269  // to peers
270  for (Peer* peer : network)
271  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
272 
273  sim.run(1);
274 
275  if (BEAST_EXPECT(sim.synchronized()))
276  {
277  // Verify all peers have same LCL but are missing
278  // transaction 0,1 which was not received by all peers
279  // before the ledger closed
280  for (Peer* peer : network)
281  {
282  // Closed ledger has all but transaction 0,1
283  auto const& lcl = peer->lastClosedLedger;
284  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
285  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
286  BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
287  for (std::uint32_t i = slow.size(); i < network.size();
288  ++i)
289  BEAST_EXPECT(
290  lcl.txs().find(Tx{i}) != lcl.txs().end());
291 
292  // Tx 0-1 didn't make it
293  BEAST_EXPECT(
294  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
295  BEAST_EXPECT(
296  peer->openTxs.find(Tx{1}) != peer->openTxs.end());
297  }
298 
299  Peer const* slowPeer = slow[0];
300  if (isParticipant)
301  BEAST_EXPECT(
302  slowPeer->prevProposers == network.size() - 1);
303  else
304  BEAST_EXPECT(slowPeer->prevProposers == fast.size());
305 
306  for (Peer* peer : fast)
307  {
308  // Due to the network link delay settings
309  // Peer 0 initially proposes {0}
310  // Peer 1 initially proposes {1}
311  // Peers 2-5 initially propose {2,3,4,5}
312  // Since peers 2-5 agree, 4/6 > the initial 50% needed
313  // to include a disputed transaction, so Peer 0/1 switch
314  // to agree with those peers. Peer 0/1 then closes with
315  // an 80% quorum of agreeing positions (5/6) match.
316  //
317  // Peers 2-5 do not change position, since tx 0 or tx 1
318  // have less than the 50% initial threshold. They also
319  // cannot declare consensus, since 4/6 agreeing
320  // positions are < 80% threshold. They therefore need an
321  // additional timerEntry call to see the updated
322  // positions from Peer 0 & 1.
323 
324  if (isParticipant)
325  {
326  BEAST_EXPECT(
327  peer->prevProposers == network.size() - 1);
328  BEAST_EXPECT(
329  peer->prevRoundTime > slowPeer->prevRoundTime);
330  }
331  else
332  {
333  BEAST_EXPECT(
334  peer->prevProposers == fast.size() - 1);
335  // so all peers should have closed together
336  BEAST_EXPECT(
337  peer->prevRoundTime == slowPeer->prevRoundTime);
338  }
339  }
340  }
341  }
342  }
343  }
344 
345  void
347  {
348  using namespace csf;
349  using namespace std::chrono;
350 
351  // This is a very specialized test to get ledgers to disagree on
352  // the close time. It unfortunately assumes knowledge about current
353  // timing constants. This is a necessary evil to get coverage up
354  // pending more extensive refactorings of timing constants.
355 
356  // In order to agree-to-disagree on the close time, there must be no
357  // clear majority of nodes agreeing on a close time. This test
358  // sets a relative offset to the peers internal clocks so that they
359  // send proposals with differing times.
360 
361  // However, agreement is on the effective close time, not the
362  // exact close time. The minimum closeTimeResolution is given by
363  // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
364  // the skews need to be at least 10 seconds to have different effective
365  // close times.
366 
367  // Complicating this matter is that nodes will ignore proposals
368  // with times more than proposeFRESHNESS =20s in the past. So at
369  // the minimum granularity, we have at most 3 types of skews
370  // (0s,10s,20s).
371 
372  // This test therefore has 6 nodes, with 2 nodes having each type of
373  // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
374  // actual close time.
375 
376  ConsensusParms const parms{};
377  Sim sim;
378 
379  PeerGroup groupA = sim.createGroup(2);
380  PeerGroup groupB = sim.createGroup(2);
381  PeerGroup groupC = sim.createGroup(2);
382  PeerGroup network = groupA + groupB + groupC;
383 
384  network.trust(network);
385  network.connect(
386  network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
387 
388  // Run consensus without skew until we have a short close time
389  // resolution
390  Peer* firstPeer = *groupA.begin();
391  while (firstPeer->lastClosedLedger.closeTimeResolution() >=
392  parms.proposeFRESHNESS)
393  sim.run(1);
394 
395  // Introduce a shift on the time of 2/3 of peers
396  for (Peer* peer : groupA)
397  peer->clockSkew = parms.proposeFRESHNESS / 2;
398  for (Peer* peer : groupB)
399  peer->clockSkew = parms.proposeFRESHNESS;
400 
401  sim.run(1);
402 
403  // All nodes agreed to disagree on the close time
404  if (BEAST_EXPECT(sim.synchronized()))
405  {
406  for (Peer* peer : network)
407  BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
408  }
409  }
410 
411  void
413  {
414  using namespace csf;
415  using namespace std::chrono;
416  // Specialized test to exercise a temporary fork in which some peers
417  // are working on an incorrect prior ledger.
418 
419  ConsensusParms const parms{};
420 
421  // Vary the time it takes to process validations to exercise detecting
422  // the wrong LCL at different phases of consensus
423  for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
424  {
425  // Consider 10 peers:
426  // 0 1 2 3 4 5 6 7 8 9
427  // minority majorityA majorityB
428  //
429  // Nodes 0-1 trust nodes 0-4
430  // Nodes 2-9 trust nodes 2-9
431  //
432  // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
433  // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
434  // nodes will instead accept the ledger with tx 1.
435 
436  // Nodes 0-1 will detect this mismatch during a subsequent round
437  // since nodes 2-4 will validate a different ledger.
438 
439  // Nodes 0-1 will acquire the proper ledger from the network and
440  // resume consensus and eventually generate the dominant network
441  // ledger.
442 
443  // This topology can potentially fork with the above trust relations
444  // but that is intended for this test.
445 
446  Sim sim;
447 
448  PeerGroup minority = sim.createGroup(2);
449  PeerGroup majorityA = sim.createGroup(3);
450  PeerGroup majorityB = sim.createGroup(5);
451 
452  PeerGroup majority = majorityA + majorityB;
453  PeerGroup network = minority + majority;
454 
455  SimDuration delay =
456  round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
457  minority.trustAndConnect(minority + majorityA, delay);
458  majority.trustAndConnect(majority, delay);
459 
460  CollectByNode<JumpCollector> jumps;
461  sim.collectors.add(jumps);
462 
463  BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
464 
465  // initial round to set prior state
466  sim.run(1);
467 
468  // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
469  // tx 1
470  for (Peer* peer : network)
471  peer->delays.recvValidation = validationDelay;
472  for (Peer* peer : (minority + majorityA))
473  peer->openTxs.insert(Tx{0});
474  for (Peer* peer : majorityB)
475  peer->openTxs.insert(Tx{1});
476 
477  // Run for additional rounds
478  // With no validation delay, only 2 more rounds are needed.
479  // 1. Round to generate different ledgers
480  // 2. Round to detect different prior ledgers (but still generate
481  // wrong ones) and recover within that round since wrong LCL
482  // is detected before we close
483  //
484  // With a validation delay of ledgerMIN_CLOSE, we need 3 more
485  // rounds.
486  // 1. Round to generate different ledgers
487  // 2. Round to detect different prior ledgers (but still generate
488  // wrong ones) but end up declaring consensus on wrong LCL (but
489  // with the right transaction set!). This is because we detect
490  // the wrong LCL after we have closed the ledger, so we declare
491  // consensus based solely on our peer proposals. But we haven't
492  // had time to acquire the right ledger.
493  // 3. Round to correct
494  sim.run(3);
495 
496  // The network never actually forks, since node 0-1 never see a
497  // quorum of validations to fully validate the incorrect chain.
498 
499  // However, for a non zero-validation delay, the network is not
500  // synchronized because nodes 0 and 1 are running one ledger behind
501  if (BEAST_EXPECT(sim.branches() == 1))
502  {
503  for (Peer const* peer : majority)
504  {
505  // No jumps for majority nodes
506  BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
507  BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
508  }
509  for (Peer const* peer : minority)
510  {
511  auto& peerJumps = jumps[peer->id];
512  // last closed ledger jump between chains
513  {
514  if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
515  {
516  JumpCollector::Jump const& jump =
517  peerJumps.closeJumps.front();
518  // Jump is to a different chain
519  BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
520  BEAST_EXPECT(!jump.to.isAncestor(jump.from));
521  }
522  }
523  // fully validated jump forward in same chain
524  {
525  if (BEAST_EXPECT(
526  peerJumps.fullyValidatedJumps.size() == 1))
527  {
528  JumpCollector::Jump const& jump =
529  peerJumps.fullyValidatedJumps.front();
530  // Jump is to a different chain with same seq
531  BEAST_EXPECT(jump.from.seq() < jump.to.seq());
532  BEAST_EXPECT(jump.to.isAncestor(jump.from));
533  }
534  }
535  }
536  }
537  }
538 
539  {
540  // Additional test engineered to switch LCL during the establish
541  // phase. This was added to trigger a scenario that previously
542  // crashed, in which switchLCL switched from establish to open
543  // phase, but still processed the establish phase logic.
544 
545  // Loner node will accept an initial ledger A, but all other nodes
546  // accept ledger B a bit later. By delaying the time it takes
547  // to process a validation, loner node will detect the wrongLCL
548  // after it is already in the establish phase of the next round.
549 
550  Sim sim;
551  PeerGroup loner = sim.createGroup(1);
552  PeerGroup friends = sim.createGroup(3);
553  loner.trust(loner + friends);
554 
555  PeerGroup others = sim.createGroup(6);
556  PeerGroup clique = friends + others;
557  clique.trust(clique);
558 
559  PeerGroup network = loner + clique;
560  network.connect(
561  network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
562 
563  // initial round to set prior state
564  sim.run(1);
565  for (Peer* peer : (loner + friends))
566  peer->openTxs.insert(Tx(0));
567  for (Peer* peer : others)
568  peer->openTxs.insert(Tx(1));
569 
570  // Delay validation processing
571  for (Peer* peer : network)
572  peer->delays.recvValidation = parms.ledgerGRANULARITY;
573 
574  // additional rounds to generate wrongLCL and recover
575  sim.run(2);
576 
577  // Check all peers recovered
578  for (Peer* p : network)
579  BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
580  }
581  }
582 
583  void
585  {
586  using namespace csf;
587  using namespace std::chrono;
588 
589  // This is a specialized test engineered to yield ledgers with different
590  // close times even though the peers believe they had close time
591  // consensus on the ledger.
592  ConsensusParms parms;
593 
594  Sim sim;
595 
596  // This requires a group of 4 fast and 2 slow peers to create a
597  // situation in which a subset of peers requires seeing additional
598  // proposals to declare consensus.
599  PeerGroup slow = sim.createGroup(2);
600  PeerGroup fast = sim.createGroup(4);
601  PeerGroup network = fast + slow;
602 
603  for (Peer* peer : network)
604  peer->consensusParms = parms;
605 
606  // Connected trust graph
607  network.trust(network);
608 
609  // Fast and slow network connections
610  fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
611  slow.connect(
612  network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
613 
614  // Run to the ledger *prior* to decreasing the resolution
616 
617  // In order to create the discrepency, we want a case where if
618  // X = effCloseTime(closeTime, resolution, parentCloseTime)
619  // X != effCloseTime(X, resolution, parentCloseTime)
620  //
621  // That is, the effective close time is not a fixed point. This can
622  // happen if X = parentCloseTime + 1, but a subsequent rounding goes
623  // to the next highest multiple of resolution.
624 
625  // So we want to find an offset (now + offset) % 30s = 15
626  // (now + offset) % 20s = 15
627  // This way, the next ledger will close and round up Due to the
628  // network delay settings, the round of consensus will take 5s, so
629  // the next ledger's close time will
630 
631  NetClock::duration when = network[0]->now().time_since_epoch();
632 
633  // Check we are before the 30s to 20s transition
634  NetClock::duration resolution =
635  network[0]->lastClosedLedger.closeTimeResolution();
636  BEAST_EXPECT(resolution == NetClock::duration{30s});
637 
638  while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
639  ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
640  when += 1s;
641  // Advance the clock without consensus running (IS THIS WHAT
642  // PREVENTS IT IN PRACTICE?)
643  sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
644 
645  // Run one more ledger with 30s resolution
646  sim.run(1);
647  if (BEAST_EXPECT(sim.synchronized()))
648  {
649  // close time should be ahead of clock time since we engineered
650  // the close time to round up
651  for (Peer* peer : network)
652  {
653  BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
654  BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
655  }
656  }
657 
658  // All peers submit their own ID as a transaction
659  for (Peer* peer : network)
660  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
661 
662  // Run 1 more round, this time it will have a decreased
663  // resolution of 20 seconds.
664 
665  // The network delays are engineered so that the slow peers
666  // initially have the wrong tx hash, but they see a majority
667  // of agreement from their peers and declare consensus
668  //
669  // The trick is that everyone starts with a raw close time of
670  // 84681s
671  // Which has
672  // effCloseTime(86481s, 20s, 86490s) = 86491s
673  // However, when the slow peers update their position, they change
674  // the close time to 86451s. The fast peers declare consensus with
675  // the 86481s as their position still.
676  //
677  // When accepted the ledger
678  // - fast peers use eff(86481s) -> 86491s as the close time
679  // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
680 
681  sim.run(1);
682 
683  BEAST_EXPECT(sim.synchronized());
684  }
685 
686  void
688  {
689  using namespace csf;
690  using namespace std::chrono;
691 
692  std::uint32_t numPeers = 10;
693  // Vary overlap between two UNLs
694  for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
695  {
696  ConsensusParms const parms{};
697  Sim sim;
698 
699  std::uint32_t numA = (numPeers - overlap) / 2;
700  std::uint32_t numB = numPeers - numA - overlap;
701 
702  PeerGroup aOnly = sim.createGroup(numA);
703  PeerGroup bOnly = sim.createGroup(numB);
704  PeerGroup commonOnly = sim.createGroup(overlap);
705 
706  PeerGroup a = aOnly + commonOnly;
707  PeerGroup b = bOnly + commonOnly;
708 
709  PeerGroup network = a + b;
710 
711  SimDuration delay =
712  round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
713  a.trustAndConnect(a, delay);
714  b.trustAndConnect(b, delay);
715 
716  // Initial round to set prior state
717  sim.run(1);
718  for (Peer* peer : network)
719  {
720  // Nodes have only seen transactions from their neighbors
721  peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
722  for (Peer* to : sim.trustGraph.trustedPeers(peer))
723  peer->openTxs.insert(
724  Tx{static_cast<std::uint32_t>(to->id)});
725  }
726  sim.run(1);
727 
728  // Fork should not happen for 40% or greater overlap
729  // Since the overlapped nodes have a UNL that is the union of the
730  // two cliques, the maximum sized UNL list is the number of peers
731  if (overlap > 0.4 * numPeers)
732  BEAST_EXPECT(sim.synchronized());
733  else
734  {
735  // Even if we do fork, there shouldn't be more than 3 ledgers
736  // One for cliqueA, one for cliqueB and one for nodes in both
737  BEAST_EXPECT(sim.branches() <= 3);
738  }
739  }
740  }
741 
742  void
744  {
745  using namespace csf;
746  using namespace std::chrono;
747 
748  // Simulate a set of 5 validators that aren't directly connected but
749  // rely on a single hub node for communication
750 
751  ConsensusParms const parms{};
752  Sim sim;
753  PeerGroup validators = sim.createGroup(5);
754  PeerGroup center = sim.createGroup(1);
755  validators.trust(validators);
756  center.trust(validators);
757 
758  SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
759  validators.connect(center, delay);
760 
761  center[0]->runAsValidator = false;
762 
763  // prep round to set initial state.
764  sim.run(1);
765 
766  // everyone submits their own ID as a TX and relay it to peers
767  for (Peer* p : validators)
768  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
769 
770  sim.run(1);
771 
772  // All peers are in sync
773  BEAST_EXPECT(sim.synchronized());
774  }
775 
776  // Helper collector for testPreferredByBranch
777  // Invasively disconnects network at bad times to cause splits
778  struct Disruptor
779  {
784  bool reconnected = false;
785 
787  csf::PeerGroup& net,
788  csf::PeerGroup& c,
789  csf::PeerGroup& split,
791  : network(net), groupCfast(c), groupCsplit(split), delay(d)
792  {
793  }
794 
795  template <class E>
796  void
798  {
799  }
800 
801  void
803  {
804  using namespace std::chrono;
805  // As soon as the the fastC node fully validates C, disconnect
806  // ALL c nodes from the network. The fast C node needs to disconnect
807  // as well to prevent it from relaying the validations it did see
808  if (who == groupCfast[0]->id &&
809  e.ledger.seq() == csf::Ledger::Seq{2})
810  {
811  network.disconnect(groupCsplit);
812  network.disconnect(groupCfast);
813  }
814  }
815 
816  void
818  {
819  // As soon as anyone generates a child of B or C, reconnect the
820  // network so those validations make it through
821  if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
822  {
823  reconnected = true;
824  network.connect(groupCsplit, delay);
825  }
826  }
827  };
828 
829  void
831  {
832  using namespace csf;
833  using namespace std::chrono;
834 
835  // Simulate network splits that are prevented from forking when using
836  // preferred ledger by trie. This is a contrived example that involves
837  // excessive network splits, but demonstrates the safety improvement
838  // from the preferred ledger by trie approach.
839 
840  // Consider 10 validating nodes that comprise a single common UNL
841  // Ledger history:
842  // 1: A
843  // _/ \_
844  // 2: B C
845  // _/ _/ \_
846  // 3: D C' |||||||| (8 different ledgers)
847 
848  // - All nodes generate the common ledger A
849  // - 2 nodes generate B and 8 nodes generate C
850  // - Only 1 of the C nodes sees all the C validations and fully
851  // validates C. The rest of the C nodes split at just the right time
852  // such that they never see any C validations but their own.
853  // - The C nodes continue and generate 8 different child ledgers.
854  // - Meanwhile, the D nodes only saw 1 validation for C and 2
855  // validations
856  // for B.
857  // - The network reconnects and the validations for generation 3 ledgers
858  // are observed (D and the 8 C's)
859  // - In the old approach, 2 votes for D outweights 1 vote for each C'
860  // so the network would avalanche towards D and fully validate it
861  // EVEN though C was fully validated by one node
862  // - In the new approach, 2 votes for D are not enough to outweight the
863  // 8 implicit votes for C, so nodes will avalanche to C instead
864 
865  ConsensusParms const parms{};
866  Sim sim;
867 
868  // Goes A->B->D
869  PeerGroup groupABD = sim.createGroup(2);
870  // Single node that initially fully validates C before the split
871  PeerGroup groupCfast = sim.createGroup(1);
872  // Generates C, but fails to fully validate before the split
873  PeerGroup groupCsplit = sim.createGroup(7);
874 
875  PeerGroup groupNotFastC = groupABD + groupCsplit;
876  PeerGroup network = groupABD + groupCsplit + groupCfast;
877 
878  SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
879  SimDuration fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
880 
881  network.trust(network);
882  // C must have a shorter delay to see all the validations before the
883  // other nodes
884  network.connect(groupCfast, fDelay);
885  // The rest of the network is connected at the same speed
886  groupNotFastC.connect(groupNotFastC, delay);
887 
888  Disruptor dc(network, groupCfast, groupCsplit, delay);
889  sim.collectors.add(dc);
890 
891  // Consensus round to generate ledger A
892  sim.run(1);
893  BEAST_EXPECT(sim.synchronized());
894 
895  // Next round generates B and C
896  // To force B, we inject an extra transaction in to those nodes
897  for (Peer* peer : groupABD)
898  {
899  peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
900  }
901  // The Disruptor will ensure that nodes disconnect before the C
902  // validations make it to all but the fastC node
903  sim.run(1);
904 
905  // We are no longer in sync, but have not yet forked:
906  // 9 nodes consider A the last fully validated ledger and fastC sees C
907  BEAST_EXPECT(!sim.synchronized());
908  BEAST_EXPECT(sim.branches() == 1);
909 
910  // Run another round to generate the 8 different C' ledgers
911  for (Peer* p : network)
912  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
913  sim.run(1);
914 
915  // Still not forked
916  BEAST_EXPECT(!sim.synchronized());
917  BEAST_EXPECT(sim.branches() == 1);
918 
919  // Disruptor will reconnect all but the fastC node
920  sim.run(1);
921 
922  if (BEAST_EXPECT(sim.branches() == 1))
923  {
924  BEAST_EXPECT(sim.synchronized());
925  }
926  else // old approach caused a fork
927  {
928  BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
929  BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
930  }
931  }
932 
933  // Helper collector for testPauseForLaggards
934  // This will remove the ledgerAccept delay used to
935  // initially create the slow vs. fast validator groups.
936  struct UndoDelay
937  {
939 
941  {
942  }
943 
944  template <class E>
945  void
947  {
948  }
949 
950  void
952  {
953  for (csf::Peer* p : g)
954  {
955  if (p->id == who)
956  p->delays.ledgerAccept = std::chrono::seconds{0};
957  }
958  }
959  };
960 
961  void
963  {
964  using namespace csf;
965  using namespace std::chrono;
966 
967  // Test that validators that jump ahead of the network slow
968  // down.
969 
970  // We engineer the following validated ledger history scenario:
971  //
972  // / --> B1 --> C1 --> ... -> G1 "ahead"
973  // A
974  // \ --> B2 --> C2 "behind"
975  //
976  // After validating a common ledger A, a set of "behind" validators
977  // briefly run slower and validate the lower chain of ledgers.
978  // The "ahead" validators run normal speed and run ahead validating the
979  // upper chain of ledgers.
980  //
981  // Due to the uncommited support definition of the preferred branch
982  // protocol, even if the "behind" validators are a majority, the "ahead"
983  // validators cannot jump to the proper branch until the "behind"
984  // validators catch up to the same sequence number. For this test to
985  // succeed, the ahead validators need to briefly slow down consensus.
986 
987  ConsensusParms const parms{};
988  Sim sim;
989  SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
990 
991  PeerGroup behind = sim.createGroup(3);
992  PeerGroup ahead = sim.createGroup(2);
993  PeerGroup network = ahead + behind;
994 
995  hash_set<Peer::NodeKey_t> trustedKeys;
996  for (Peer* p : network)
997  trustedKeys.insert(p->key);
998  for (Peer* p : network)
999  p->trustedKeys = trustedKeys;
1000 
1001  network.trustAndConnect(network, delay);
1002 
1003  // Initial seed round to set prior state
1004  sim.run(1);
1005 
1006  // Have the "behind" group initially take a really long time to
1007  // accept a ledger after ending deliberation
1008  for (Peer* p : behind)
1009  p->delays.ledgerAccept = 20s;
1010 
1011  // Use the collector to revert the delay after the single
1012  // slow ledger is generated
1013  UndoDelay undoDelay{behind};
1014  sim.collectors.add(undoDelay);
1015 
1016 #if 0
1017  // Have all beast::journal output printed to stdout
1018  for (Peer* p : network)
1019  p->sink.threshold(beast::severities::kAll);
1020 
1021  // Print ledger accept and fully validated events to stdout
1022  StreamCollector sc{std::cout};
1023  sim.collectors.add(sc);
1024 #endif
1025  // Run the simulation for 100 seconds of simulation time with
1026  std::chrono::nanoseconds const simDuration = 100s;
1027 
1028  // Simulate clients submitting 1 tx every 5 seconds to a random
1029  // validator
1030  Rate const rate{1, 5s};
1031  auto peerSelector = makeSelector(
1032  network.begin(),
1033  network.end(),
1034  std::vector<double>(network.size(), 1.),
1035  sim.rng);
1036  auto txSubmitter = makeSubmitter(
1037  ConstantDistribution{rate.inv()},
1038  sim.scheduler.now(),
1039  sim.scheduler.now() + simDuration,
1040  peerSelector,
1041  sim.scheduler,
1042  sim.rng);
1043 
1044  // Run simulation
1045  sim.run(simDuration);
1046 
1047  // Verify that the network recovered
1048  BEAST_EXPECT(sim.synchronized());
1049  }
1050 
1051  void
1052  run() override
1053  {
1056 
1057  testStandalone();
1058  testPeersAgree();
1059  testSlowPeers();
1061  testWrongLCL();
1063  testFork();
1064  testHubNetwork();
1067  }
1068 };
1069 
1071 } // namespace test
1072 } // namespace ripple
ripple::test::csf::SimTime
typename SimClock::time_point SimTime
Definition: SimTime.h:36
ripple::test::Consensus_test::testHubNetwork
void testHubNetwork()
Definition: Consensus_test.cpp:743
ripple::checkConsensus
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, ConsensusParms const &parms, bool proposing, beast::Journal j)
Determine whether the network reached consensus and whether we joined.
Definition: Consensus.cpp:115
ripple::test::Consensus_test::testSlowPeers
void testSlowPeers()
Definition: Consensus_test.cpp:181
ripple::test::Consensus_test::Disruptor
Definition: Consensus_test.cpp:778
utility
ripple::test::Consensus_test::testPeersAgree
void testPeersAgree()
Definition: Consensus_test.cpp:144
ripple::Rate
Represents a transfer rate.
Definition: Rate.h:37
std::unordered_set
STL class.
ripple::test::Consensus_test::testConsensusCloseTimeRounding
void testConsensusCloseTimeRounding()
Definition: Consensus_test.cpp:584
std::vector
STL class.
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:817
ripple::test::Consensus_test::Disruptor::Disruptor
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
Definition: Consensus_test.cpp:786
ripple::Consensus
Generic implementation of consensus algorithm.
Definition: Consensus.h:314
ripple::ConsensusState::Yes
@ Yes
We have consensus along with the network.
ripple::test::Consensus_test::Disruptor::delay
csf::SimDuration delay
Definition: Consensus_test.cpp:783
std::chrono::duration
beast::severities::kAll
@ kAll
Definition: Journal.h:32
ripple::test::csf::FullyValidateLedger
Peer fully validated a new ledger.
Definition: events.h:137
ripple::ConsensusParms::ledgerGRANULARITY
std::chrono::milliseconds ledgerGRANULARITY
How often we check state or change positions.
Definition: ConsensusParms.h:103
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
Definition: Consensus_test.cpp:802
ripple::test::csf::Ledger::seq
Seq seq() const
Definition: ledgers.h:173
ripple::test::Consensus_test::Disruptor::groupCfast
csf::PeerGroup & groupCfast
Definition: Consensus_test.cpp:781
ripple::test::csf::FullyValidateLedger::ledger
Ledger ledger
The new fully validated ledger.
Definition: events.h:140
ripple::test::csf::AcceptLedger
Peer accepted consensus results.
Definition: events.h:118
ripple::test::csf::PeerGroup::connect
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition: PeerGroup.h:164
std::cout
ripple::test::Consensus_test::testCheckConsensus
void testCheckConsensus()
Definition: Consensus_test.cpp:79
ripple::test::csf::AcceptLedger::ledger
Ledger ledger
Definition: events.h:121
std::chrono::time_point
ripple::test::Consensus_test::testStandalone
void testStandalone()
Definition: Consensus_test.cpp:120
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:797
std::uint32_t
ripple::ConsensusState::No
@ No
We do not have consensus.
ripple::ConsensusState::MovedOn
@ MovedOn
The network has consensus without us.
ripple::test::Consensus_test::run
void run() override
Definition: Consensus_test.cpp:1052
ripple::test::Consensus_test::testShouldCloseLedger
void testShouldCloseLedger()
Definition: Consensus_test.cpp:40
ripple::test::SuiteJournal
Definition: SuiteJournal.h:88
ripple::test::csf::PeerGroup::disconnect
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition: PeerGroup.h:184
ripple::test::Consensus_test::testPreferredByBranch
void testPreferredByBranch()
Definition: Consensus_test.cpp:830
ripple::test::csf::PeerGroup
A group of simulation Peers.
Definition: PeerGroup.h:39
ripple::test::Consensus_test::testFork
void testFork()
Definition: Consensus_test.cpp:687
ripple::test::csf::Peer
A single peer in the simulation.
Definition: test/csf/Peer.h:62
ripple::test::Consensus_test::journal_
SuiteJournal journal_
Definition: Consensus_test.cpp:32
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::test::Consensus_test::UndoDelay::g
csf::PeerGroup & g
Definition: Consensus_test.cpp:938
ripple::test::Consensus_test::UndoDelay
Definition: Consensus_test.cpp:936
ripple::ConsensusParms
Consensus algorithm parameters.
Definition: ConsensusParms.h:33
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:946
std::unordered_set::insert
T insert(T... args)
ripple::test::Consensus_test::testWrongLCL
void testWrongLCL()
Definition: Consensus_test.cpp:412
ripple::increaseLedgerTimeResolutionEvery
constexpr auto increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
Definition: LedgerTiming.h:50
ripple::test::Consensus_test::Disruptor::network
csf::PeerGroup & network
Definition: Consensus_test.cpp:780
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:951
std::optional< std::chrono::milliseconds >
ripple::Peer::id
virtual id_t id() const =0
ripple::tagged_integer< std::uint32_t, PeerIDTag >
ripple::test::csf::SimDuration
typename SimClock::duration SimDuration
Definition: SimTime.h:35
ripple::test::Consensus_test::UndoDelay::UndoDelay
UndoDelay(csf::PeerGroup &a)
Definition: Consensus_test.cpp:940
ripple::test::Consensus_test::Disruptor::groupCsplit
csf::PeerGroup & groupCsplit
Definition: Consensus_test.cpp:782
ripple::test::Consensus_test::testPauseForLaggards
void testPauseForLaggards()
Definition: Consensus_test.cpp:962
ripple::test::Consensus_test::Consensus_test
Consensus_test()
Definition: Consensus_test.cpp:35
ripple::test::Consensus_test
Definition: Consensus_test.cpp:30
ripple::shouldCloseLedger
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::optional< std::chrono::milliseconds > validationDelay, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j)
Determines whether the current ledger should close at this time.
Definition: Consensus.cpp:26
ripple::test::Consensus_test::testCloseTimeDisagree
void testCloseTimeDisagree()
Definition: Consensus_test.cpp:346
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:45
ripple::test::jtx::rate
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
ripple::test::BEAST_DEFINE_TESTSUITE
BEAST_DEFINE_TESTSUITE(DeliverMin, app, ripple)
std::chrono