rippled
Consensus_test.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012-2016 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 #include <ripple/beast/clock/manual_clock.h>
20 #include <ripple/beast/unit_test.h>
21 #include <ripple/consensus/Consensus.h>
22 #include <ripple/consensus/ConsensusProposal.h>
23 #include <test/csf.h>
24 #include <test/unit_test/SuiteJournal.h>
25 #include <utility>
26 
27 namespace ripple {
28 namespace test {
29 
30 class Consensus_test : public beast::unit_test::suite
31 {
33 
34 public:
35  Consensus_test() : journal_("Consensus_test", *this)
36  {
37  }
38 
39  void
41  {
42  using namespace std::chrono_literals;
43 
44  // Use default parameters
45  ConsensusParms const p{};
46 
47  // Bizarre times forcibly close
48  BEAST_EXPECT(shouldCloseLedger(
49  true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
50  BEAST_EXPECT(shouldCloseLedger(
51  true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
52  BEAST_EXPECT(shouldCloseLedger(
53  true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
54 
55  // Rest of network has closed
56  BEAST_EXPECT(
57  shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
58 
59  // No transactions means wait until end of internval
60  BEAST_EXPECT(
61  !shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
62  BEAST_EXPECT(
63  shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
64 
65  // Enforce minimum ledger open time
66  BEAST_EXPECT(
67  !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
68 
69  // Don't go too much faster than last time
70  BEAST_EXPECT(
71  !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
72 
73  BEAST_EXPECT(
74  shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
75  }
76 
77  void
79  {
80  using namespace std::chrono_literals;
81 
82  // Use default parameterss
83  ConsensusParms const p{};
84 
85  // Not enough time has elapsed
86  BEAST_EXPECT(
88  checkConsensus(10, 2, 2, 0, 3s, 2s, p, true, journal_));
89 
90  // If not enough peers have propsed, ensure
91  // more time for proposals
92  BEAST_EXPECT(
94  checkConsensus(10, 2, 2, 0, 3s, 4s, p, true, journal_));
95 
96  // Enough time has elapsed and we all agree
97  BEAST_EXPECT(
99  checkConsensus(10, 2, 2, 0, 3s, 10s, p, true, journal_));
100 
101  // Enough time has elapsed and we don't yet agree
102  BEAST_EXPECT(
104  checkConsensus(10, 2, 1, 0, 3s, 10s, p, true, journal_));
105 
106  // Our peers have moved on
107  // Enough time has elapsed and we all agree
108  BEAST_EXPECT(
110  checkConsensus(10, 2, 1, 8, 3s, 10s, p, true, journal_));
111 
112  // No peers makes it easy to agree
113  BEAST_EXPECT(
115  checkConsensus(0, 0, 0, 0, 3s, 10s, p, true, journal_));
116  }
117 
118  void
120  {
121  using namespace std::chrono_literals;
122  using namespace csf;
123 
124  Sim s;
125  PeerGroup peers = s.createGroup(1);
126  Peer* peer = peers[0];
127  peer->targetLedgers = 1;
128  peer->start();
129  peer->submit(Tx{1});
130 
131  s.scheduler.step();
132 
133  // Inspect that the proper ledger was created
134  auto const& lcl = peer->lastClosedLedger;
135  BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
136  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
137  BEAST_EXPECT(lcl.txs().size() == 1);
138  BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
139  BEAST_EXPECT(peer->prevProposers == 0);
140  }
141 
142  void
144  {
145  using namespace csf;
146  using namespace std::chrono;
147 
148  ConsensusParms const parms{};
149  Sim sim;
150  PeerGroup peers = sim.createGroup(5);
151 
152  // Connected trust and network graphs with single fixed delay
153  peers.trustAndConnect(
154  peers, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
155 
156  // everyone submits their own ID as a TX
157  for (Peer* p : peers)
158  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
159 
160  sim.run(1);
161 
162  // All peers are in sync
163  if (BEAST_EXPECT(sim.synchronized()))
164  {
165  for (Peer const* peer : peers)
166  {
167  auto const& lcl = peer->lastClosedLedger;
168  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
169  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
170  // All peers proposed
171  BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
172  // All transactions were accepted
173  for (std::uint32_t i = 0; i < peers.size(); ++i)
174  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
175  }
176  }
177  }
178 
179  void
181  {
182  using namespace csf;
183  using namespace std::chrono;
184 
185  // Several tests of a complete trust graph with a subset of peers
186  // that have significantly longer network delays to the rest of the
187  // network
188 
189  // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
190  {
191  ConsensusParms const parms{};
192  Sim sim;
193  PeerGroup slow = sim.createGroup(1);
194  PeerGroup fast = sim.createGroup(4);
195  PeerGroup network = fast + slow;
196 
197  // Fully connected trust graph
198  network.trust(network);
199 
200  // Fast and slow network connections
201  fast.connect(
202  fast, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
203 
204  slow.connect(
205  network,
206  date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
207 
208  // All peers submit their own ID as a transaction
209  for (Peer* peer : network)
210  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
211 
212  sim.run(1);
213 
214  // Verify all peers have same LCL but are missing transaction 0
215  // All peers are in sync even with a slower peer 0
216  if (BEAST_EXPECT(sim.synchronized()))
217  {
218  for (Peer* peer : network)
219  {
220  auto const& lcl = peer->lastClosedLedger;
221  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
222  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
223 
224  BEAST_EXPECT(peer->prevProposers == network.size() - 1);
225  BEAST_EXPECT(
226  peer->prevRoundTime == network[0]->prevRoundTime);
227 
228  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
229  for (std::uint32_t i = 2; i < network.size(); ++i)
230  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
231 
232  // Tx 0 didn't make it
233  BEAST_EXPECT(
234  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
235  }
236  }
237  }
238 
239  // Test when the slow peers delay a consensus quorum (4/6 agree)
240  {
241  // Run two tests
242  // 1. The slow peers are participating in consensus
243  // 2. The slow peers are just observing
244 
245  for (auto isParticipant : {true, false})
246  {
247  ConsensusParms const parms{};
248 
249  Sim sim;
250  PeerGroup slow = sim.createGroup(2);
251  PeerGroup fast = sim.createGroup(4);
252  PeerGroup network = fast + slow;
253 
254  // Connected trust graph
255  network.trust(network);
256 
257  // Fast and slow network connections
258  fast.connect(
259  fast,
260  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
261 
262  slow.connect(
263  network,
264  date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
265 
266  for (Peer* peer : slow)
267  peer->runAsValidator = isParticipant;
268 
269  // All peers submit their own ID as a transaction and relay it
270  // to peers
271  for (Peer* peer : network)
272  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
273 
274  sim.run(1);
275 
276  if (BEAST_EXPECT(sim.synchronized()))
277  {
278  // Verify all peers have same LCL but are missing
279  // transaction 0,1 which was not received by all peers
280  // before the ledger closed
281  for (Peer* peer : network)
282  {
283  // Closed ledger has all but transaction 0,1
284  auto const& lcl = peer->lastClosedLedger;
285  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
286  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
287  BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
288  for (std::uint32_t i = slow.size(); i < network.size();
289  ++i)
290  BEAST_EXPECT(
291  lcl.txs().find(Tx{i}) != lcl.txs().end());
292 
293  // Tx 0-1 didn't make it
294  BEAST_EXPECT(
295  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
296  BEAST_EXPECT(
297  peer->openTxs.find(Tx{1}) != peer->openTxs.end());
298  }
299 
300  Peer const* slowPeer = slow[0];
301  if (isParticipant)
302  BEAST_EXPECT(
303  slowPeer->prevProposers == network.size() - 1);
304  else
305  BEAST_EXPECT(slowPeer->prevProposers == fast.size());
306 
307  for (Peer* peer : fast)
308  {
309  // Due to the network link delay settings
310  // Peer 0 initially proposes {0}
311  // Peer 1 initially proposes {1}
312  // Peers 2-5 initially propose {2,3,4,5}
313  // Since peers 2-5 agree, 4/6 > the initial 50% needed
314  // to include a disputed transaction, so Peer 0/1 switch
315  // to agree with those peers. Peer 0/1 then closes with
316  // an 80% quorum of agreeing positions (5/6) match.
317  //
318  // Peers 2-5 do not change position, since tx 0 or tx 1
319  // have less than the 50% initial threshold. They also
320  // cannot declare consensus, since 4/6 agreeing
321  // positions are < 80% threshold. They therefore need an
322  // additional timerEntry call to see the updated
323  // positions from Peer 0 & 1.
324 
325  if (isParticipant)
326  {
327  BEAST_EXPECT(
328  peer->prevProposers == network.size() - 1);
329  BEAST_EXPECT(
330  peer->prevRoundTime > slowPeer->prevRoundTime);
331  }
332  else
333  {
334  BEAST_EXPECT(
335  peer->prevProposers == fast.size() - 1);
336  // so all peers should have closed together
337  BEAST_EXPECT(
338  peer->prevRoundTime == slowPeer->prevRoundTime);
339  }
340  }
341  }
342  }
343  }
344  }
345 
346  void
348  {
349  using namespace csf;
350  using namespace std::chrono;
351 
352  // This is a very specialized test to get ledgers to disagree on
353  // the close time. It unfortunately assumes knowledge about current
354  // timing constants. This is a necessary evil to get coverage up
355  // pending more extensive refactorings of timing constants.
356 
357  // In order to agree-to-disagree on the close time, there must be no
358  // clear majority of nodes agreeing on a close time. This test
359  // sets a relative offset to the peers internal clocks so that they
360  // send proposals with differing times.
361 
362  // However, agreement is on the effective close time, not the
363  // exact close time. The minimum closeTimeResolution is given by
364  // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
365  // the skews need to be at least 10 seconds to have different effective
366  // close times.
367 
368  // Complicating this matter is that nodes will ignore proposals
369  // with times more than proposeFRESHNESS =20s in the past. So at
370  // the minimum granularity, we have at most 3 types of skews
371  // (0s,10s,20s).
372 
373  // This test therefore has 6 nodes, with 2 nodes having each type of
374  // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
375  // actual close time.
376 
377  ConsensusParms const parms{};
378  Sim sim;
379 
380  PeerGroup groupA = sim.createGroup(2);
381  PeerGroup groupB = sim.createGroup(2);
382  PeerGroup groupC = sim.createGroup(2);
383  PeerGroup network = groupA + groupB + groupC;
384 
385  network.trust(network);
386  network.connect(
387  network, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
388 
389  // Run consensus without skew until we have a short close time
390  // resolution
391  Peer* firstPeer = *groupA.begin();
392  while (firstPeer->lastClosedLedger.closeTimeResolution() >=
393  parms.proposeFRESHNESS)
394  sim.run(1);
395 
396  // Introduce a shift on the time of 2/3 of peers
397  for (Peer* peer : groupA)
398  peer->clockSkew = parms.proposeFRESHNESS / 2;
399  for (Peer* peer : groupB)
400  peer->clockSkew = parms.proposeFRESHNESS;
401 
402  sim.run(1);
403 
404  // All nodes agreed to disagree on the close time
405  if (BEAST_EXPECT(sim.synchronized()))
406  {
407  for (Peer* peer : network)
408  BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
409  }
410  }
411 
412  void
414  {
415  using namespace csf;
416  using namespace std::chrono;
417  // Specialized test to exercise a temporary fork in which some peers
418  // are working on an incorrect prior ledger.
419 
420  ConsensusParms const parms{};
421 
422  // Vary the time it takes to process validations to exercise detecting
423  // the wrong LCL at different phases of consensus
424  for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
425  {
426  // Consider 10 peers:
427  // 0 1 2 3 4 5 6 7 8 9
428  // minority majorityA majorityB
429  //
430  // Nodes 0-1 trust nodes 0-4
431  // Nodes 2-9 trust nodes 2-9
432  //
433  // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
434  // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
435  // nodes will instead accept the ledger with tx 1.
436 
437  // Nodes 0-1 will detect this mismatch during a subsequent round
438  // since nodes 2-4 will validate a different ledger.
439 
440  // Nodes 0-1 will acquire the proper ledger from the network and
441  // resume consensus and eventually generate the dominant network
442  // ledger.
443 
444  // This topology can potentially fork with the above trust relations
445  // but that is intended for this test.
446 
447  Sim sim;
448 
449  PeerGroup minority = sim.createGroup(2);
450  PeerGroup majorityA = sim.createGroup(3);
451  PeerGroup majorityB = sim.createGroup(5);
452 
453  PeerGroup majority = majorityA + majorityB;
454  PeerGroup network = minority + majority;
455 
456  SimDuration delay =
457  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
458  minority.trustAndConnect(minority + majorityA, delay);
459  majority.trustAndConnect(majority, delay);
460 
461  CollectByNode<JumpCollector> jumps;
462  sim.collectors.add(jumps);
463 
464  BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
465 
466  // initial round to set prior state
467  sim.run(1);
468 
469  // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
470  // tx 1
471  for (Peer* peer : network)
472  peer->delays.recvValidation = validationDelay;
473  for (Peer* peer : (minority + majorityA))
474  peer->openTxs.insert(Tx{0});
475  for (Peer* peer : majorityB)
476  peer->openTxs.insert(Tx{1});
477 
478  // Run for additional rounds
479  // With no validation delay, only 2 more rounds are needed.
480  // 1. Round to generate different ledgers
481  // 2. Round to detect different prior ledgers (but still generate
482  // wrong ones) and recover within that round since wrong LCL
483  // is detected before we close
484  //
485  // With a validation delay of ledgerMIN_CLOSE, we need 3 more
486  // rounds.
487  // 1. Round to generate different ledgers
488  // 2. Round to detect different prior ledgers (but still generate
489  // wrong ones) but end up declaring consensus on wrong LCL (but
490  // with the right transaction set!). This is because we detect
491  // the wrong LCL after we have closed the ledger, so we declare
492  // consensus based solely on our peer proposals. But we haven't
493  // had time to acquire the right ledger.
494  // 3. Round to correct
495  sim.run(3);
496 
497  // The network never actually forks, since node 0-1 never see a
498  // quorum of validations to fully validate the incorrect chain.
499 
500  // However, for a non zero-validation delay, the network is not
501  // synchronized because nodes 0 and 1 are running one ledger behind
502  if (BEAST_EXPECT(sim.branches() == 1))
503  {
504  for (Peer const* peer : majority)
505  {
506  // No jumps for majority nodes
507  BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
508  BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
509  }
510  for (Peer const* peer : minority)
511  {
512  auto& peerJumps = jumps[peer->id];
513  // last closed ledger jump between chains
514  {
515  if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
516  {
517  JumpCollector::Jump const& jump =
518  peerJumps.closeJumps.front();
519  // Jump is to a different chain
520  BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
521  BEAST_EXPECT(!jump.to.isAncestor(jump.from));
522  }
523  }
524  // fully validated jump forward in same chain
525  {
526  if (BEAST_EXPECT(
527  peerJumps.fullyValidatedJumps.size() == 1))
528  {
529  JumpCollector::Jump const& jump =
530  peerJumps.fullyValidatedJumps.front();
531  // Jump is to a different chain with same seq
532  BEAST_EXPECT(jump.from.seq() < jump.to.seq());
533  BEAST_EXPECT(jump.to.isAncestor(jump.from));
534  }
535  }
536  }
537  }
538  }
539 
540  {
541  // Additional test engineered to switch LCL during the establish
542  // phase. This was added to trigger a scenario that previously
543  // crashed, in which switchLCL switched from establish to open
544  // phase, but still processed the establish phase logic.
545 
546  // Loner node will accept an initial ledger A, but all other nodes
547  // accept ledger B a bit later. By delaying the time it takes
548  // to process a validation, loner node will detect the wrongLCL
549  // after it is already in the establish phase of the next round.
550 
551  Sim sim;
552  PeerGroup loner = sim.createGroup(1);
553  PeerGroup friends = sim.createGroup(3);
554  loner.trust(loner + friends);
555 
556  PeerGroup others = sim.createGroup(6);
557  PeerGroup clique = friends + others;
558  clique.trust(clique);
559 
560  PeerGroup network = loner + clique;
561  network.connect(
562  network,
563  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
564 
565  // initial round to set prior state
566  sim.run(1);
567  for (Peer* peer : (loner + friends))
568  peer->openTxs.insert(Tx(0));
569  for (Peer* peer : others)
570  peer->openTxs.insert(Tx(1));
571 
572  // Delay validation processing
573  for (Peer* peer : network)
574  peer->delays.recvValidation = parms.ledgerGRANULARITY;
575 
576  // additional rounds to generate wrongLCL and recover
577  sim.run(2);
578 
579  // Check all peers recovered
580  for (Peer* p : network)
581  BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
582  }
583  }
584 
585  void
587  {
588  using namespace csf;
589  using namespace std::chrono;
590 
591  // This is a specialized test engineered to yield ledgers with different
592  // close times even though the peers believe they had close time
593  // consensus on the ledger.
594  ConsensusParms parms;
595 
596  Sim sim;
597 
598  // This requires a group of 4 fast and 2 slow peers to create a
599  // situation in which a subset of peers requires seeing additional
600  // proposals to declare consensus.
601  PeerGroup slow = sim.createGroup(2);
602  PeerGroup fast = sim.createGroup(4);
603  PeerGroup network = fast + slow;
604 
605  for (Peer* peer : network)
606  peer->consensusParms = parms;
607 
608  // Connected trust graph
609  network.trust(network);
610 
611  // Fast and slow network connections
612  fast.connect(
613  fast, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
614  slow.connect(
615  network, date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
616 
617  // Run to the ledger *prior* to decreasing the resolution
619 
620  // In order to create the discrepency, we want a case where if
621  // X = effCloseTime(closeTime, resolution, parentCloseTime)
622  // X != effCloseTime(X, resolution, parentCloseTime)
623  //
624  // That is, the effective close time is not a fixed point. This can
625  // happen if X = parentCloseTime + 1, but a subsequent rounding goes
626  // to the next highest multiple of resolution.
627 
628  // So we want to find an offset (now + offset) % 30s = 15
629  // (now + offset) % 20s = 15
630  // This way, the next ledger will close and round up Due to the
631  // network delay settings, the round of consensus will take 5s, so
632  // the next ledger's close time will
633 
634  NetClock::duration when = network[0]->now().time_since_epoch();
635 
636  // Check we are before the 30s to 20s transition
637  NetClock::duration resolution =
638  network[0]->lastClosedLedger.closeTimeResolution();
639  BEAST_EXPECT(resolution == NetClock::duration{30s});
640 
641  while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
642  ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
643  when += 1s;
644  // Advance the clock without consensus running (IS THIS WHAT
645  // PREVENTS IT IN PRACTICE?)
646  sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
647 
648  // Run one more ledger with 30s resolution
649  sim.run(1);
650  if (BEAST_EXPECT(sim.synchronized()))
651  {
652  // close time should be ahead of clock time since we engineered
653  // the close time to round up
654  for (Peer* peer : network)
655  {
656  BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
657  BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
658  }
659  }
660 
661  // All peers submit their own ID as a transaction
662  for (Peer* peer : network)
663  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
664 
665  // Run 1 more round, this time it will have a decreased
666  // resolution of 20 seconds.
667 
668  // The network delays are engineered so that the slow peers
669  // initially have the wrong tx hash, but they see a majority
670  // of agreement from their peers and declare consensus
671  //
672  // The trick is that everyone starts with a raw close time of
673  // 84681s
674  // Which has
675  // effCloseTime(86481s, 20s, 86490s) = 86491s
676  // However, when the slow peers update their position, they change
677  // the close time to 86451s. The fast peers declare consensus with
678  // the 86481s as their position still.
679  //
680  // When accepted the ledger
681  // - fast peers use eff(86481s) -> 86491s as the close time
682  // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
683 
684  sim.run(1);
685 
686  BEAST_EXPECT(sim.synchronized());
687  }
688 
689  void
691  {
692  using namespace csf;
693  using namespace std::chrono;
694 
695  std::uint32_t numPeers = 10;
696  // Vary overlap between two UNLs
697  for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
698  {
699  ConsensusParms const parms{};
700  Sim sim;
701 
702  std::uint32_t numA = (numPeers - overlap) / 2;
703  std::uint32_t numB = numPeers - numA - overlap;
704 
705  PeerGroup aOnly = sim.createGroup(numA);
706  PeerGroup bOnly = sim.createGroup(numB);
707  PeerGroup commonOnly = sim.createGroup(overlap);
708 
709  PeerGroup a = aOnly + commonOnly;
710  PeerGroup b = bOnly + commonOnly;
711 
712  PeerGroup network = a + b;
713 
714  SimDuration delay =
715  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
716  a.trustAndConnect(a, delay);
717  b.trustAndConnect(b, delay);
718 
719  // Initial round to set prior state
720  sim.run(1);
721  for (Peer* peer : network)
722  {
723  // Nodes have only seen transactions from their neighbors
724  peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
725  for (Peer* to : sim.trustGraph.trustedPeers(peer))
726  peer->openTxs.insert(
727  Tx{static_cast<std::uint32_t>(to->id)});
728  }
729  sim.run(1);
730 
731  // Fork should not happen for 40% or greater overlap
732  // Since the overlapped nodes have a UNL that is the union of the
733  // two cliques, the maximum sized UNL list is the number of peers
734  if (overlap > 0.4 * numPeers)
735  BEAST_EXPECT(sim.synchronized());
736  else
737  {
738  // Even if we do fork, there shouldn't be more than 3 ledgers
739  // One for cliqueA, one for cliqueB and one for nodes in both
740  BEAST_EXPECT(sim.branches() <= 3);
741  }
742  }
743  }
744 
745  void
747  {
748  using namespace csf;
749  using namespace std::chrono;
750 
751  // Simulate a set of 5 validators that aren't directly connected but
752  // rely on a single hub node for communication
753 
754  ConsensusParms const parms{};
755  Sim sim;
756  PeerGroup validators = sim.createGroup(5);
757  PeerGroup center = sim.createGroup(1);
758  validators.trust(validators);
759  center.trust(validators);
760 
761  SimDuration delay =
762  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
763  validators.connect(center, delay);
764 
765  center[0]->runAsValidator = false;
766 
767  // prep round to set initial state.
768  sim.run(1);
769 
770  // everyone submits their own ID as a TX and relay it to peers
771  for (Peer* p : validators)
772  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
773 
774  sim.run(1);
775 
776  // All peers are in sync
777  BEAST_EXPECT(sim.synchronized());
778  }
779 
780  // Helper collector for testPreferredByBranch
781  // Invasively disconnects network at bad times to cause splits
782  struct Disruptor
783  {
788  bool reconnected = false;
789 
791  csf::PeerGroup& net,
792  csf::PeerGroup& c,
793  csf::PeerGroup& split,
795  : network(net), groupCfast(c), groupCsplit(split), delay(d)
796  {
797  }
798 
799  template <class E>
800  void
802  {
803  }
804 
805  void
807  {
808  using namespace std::chrono;
809  // As soon as the the fastC node fully validates C, disconnect
810  // ALL c nodes from the network. The fast C node needs to disconnect
811  // as well to prevent it from relaying the validations it did see
812  if (who == groupCfast[0]->id &&
813  e.ledger.seq() == csf::Ledger::Seq{2})
814  {
815  network.disconnect(groupCsplit);
816  network.disconnect(groupCfast);
817  }
818  }
819 
820  void
822  {
823  // As soon as anyone generates a child of B or C, reconnect the
824  // network so those validations make it through
825  if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
826  {
827  reconnected = true;
828  network.connect(groupCsplit, delay);
829  }
830  }
831  };
832 
833  void
835  {
836  using namespace csf;
837  using namespace std::chrono;
838 
839  // Simulate network splits that are prevented from forking when using
840  // preferred ledger by trie. This is a contrived example that involves
841  // excessive network splits, but demonstrates the safety improvement
842  // from the preferred ledger by trie approach.
843 
844  // Consider 10 validating nodes that comprise a single common UNL
845  // Ledger history:
846  // 1: A
847  // _/ \_
848  // 2: B C
849  // _/ _/ \_
850  // 3: D C' |||||||| (8 different ledgers)
851 
852  // - All nodes generate the common ledger A
853  // - 2 nodes generate B and 8 nodes generate C
854  // - Only 1 of the C nodes sees all the C validations and fully
855  // validates C. The rest of the C nodes split at just the right time
856  // such that they never see any C validations but their own.
857  // - The C nodes continue and generate 8 different child ledgers.
858  // - Meanwhile, the D nodes only saw 1 validation for C and 2
859  // validations
860  // for B.
861  // - The network reconnects and the validations for generation 3 ledgers
862  // are observed (D and the 8 C's)
863  // - In the old approach, 2 votes for D outweights 1 vote for each C'
864  // so the network would avalanche towards D and fully validate it
865  // EVEN though C was fully validated by one node
866  // - In the new approach, 2 votes for D are not enough to outweight the
867  // 8 implicit votes for C, so nodes will avalanche to C instead
868 
869  ConsensusParms const parms{};
870  Sim sim;
871 
872  // Goes A->B->D
873  PeerGroup groupABD = sim.createGroup(2);
874  // Single node that initially fully validates C before the split
875  PeerGroup groupCfast = sim.createGroup(1);
876  // Generates C, but fails to fully validate before the split
877  PeerGroup groupCsplit = sim.createGroup(7);
878 
879  PeerGroup groupNotFastC = groupABD + groupCsplit;
880  PeerGroup network = groupABD + groupCsplit + groupCfast;
881 
882  SimDuration delay =
883  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
884  SimDuration fDelay =
885  date::round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
886 
887  network.trust(network);
888  // C must have a shorter delay to see all the validations before the
889  // other nodes
890  network.connect(groupCfast, fDelay);
891  // The rest of the network is connected at the same speed
892  groupNotFastC.connect(groupNotFastC, delay);
893 
894  Disruptor dc(network, groupCfast, groupCsplit, delay);
895  sim.collectors.add(dc);
896 
897  // Consensus round to generate ledger A
898  sim.run(1);
899  BEAST_EXPECT(sim.synchronized());
900 
901  // Next round generates B and C
902  // To force B, we inject an extra transaction in to those nodes
903  for (Peer* peer : groupABD)
904  {
905  peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
906  }
907  // The Disruptor will ensure that nodes disconnect before the C
908  // validations make it to all but the fastC node
909  sim.run(1);
910 
911  // We are no longer in sync, but have not yet forked:
912  // 9 nodes consider A the last fully validated ledger and fastC sees C
913  BEAST_EXPECT(!sim.synchronized());
914  BEAST_EXPECT(sim.branches() == 1);
915 
916  // Run another round to generate the 8 different C' ledgers
917  for (Peer* p : network)
918  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
919  sim.run(1);
920 
921  // Still not forked
922  BEAST_EXPECT(!sim.synchronized());
923  BEAST_EXPECT(sim.branches() == 1);
924 
925  // Disruptor will reconnect all but the fastC node
926  sim.run(1);
927 
928  if (BEAST_EXPECT(sim.branches() == 1))
929  {
930  BEAST_EXPECT(sim.synchronized());
931  }
932  else // old approach caused a fork
933  {
934  BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
935  BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
936  }
937  }
938 
939  // Helper collector for testPauseForLaggards
940  // This will remove the ledgerAccept delay used to
941  // initially create the slow vs. fast validator groups.
942  struct UndoDelay
943  {
945 
947  {
948  }
949 
950  template <class E>
951  void
953  {
954  }
955 
956  void
958  {
959  for (csf::Peer* p : g)
960  {
961  if (p->id == who)
962  p->delays.ledgerAccept = std::chrono::seconds{0};
963  }
964  }
965  };
966 
967  void
969  {
970  using namespace csf;
971  using namespace std::chrono;
972 
973  // Test that validators that jump ahead of the network slow
974  // down.
975 
976  // We engineer the following validated ledger history scenario:
977  //
978  // / --> B1 --> C1 --> ... -> G1 "ahead"
979  // A
980  // \ --> B2 --> C2 "behind"
981  //
982  // After validating a common ledger A, a set of "behind" validators
983  // briefly run slower and validate the lower chain of ledgers.
984  // The "ahead" validators run normal speed and run ahead validating the
985  // upper chain of ledgers.
986  //
987  // Due to the uncommited support definition of the preferred branch
988  // protocol, even if the "behind" validators are a majority, the "ahead"
989  // validators cannot jump to the proper branch until the "behind"
990  // validators catch up to the same sequence number. For this test to
991  // succeed, the ahead validators need to briefly slow down consensus.
992 
993  ConsensusParms const parms{};
994  Sim sim;
995  SimDuration delay =
996  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
997 
998  PeerGroup behind = sim.createGroup(3);
999  PeerGroup ahead = sim.createGroup(2);
1000  PeerGroup network = ahead + behind;
1001 
1002  hash_set<Peer::NodeKey_t> trustedKeys;
1003  for (Peer* p : network)
1004  trustedKeys.insert(p->key);
1005  for (Peer* p : network)
1006  p->trustedKeys = trustedKeys;
1007 
1008  network.trustAndConnect(network, delay);
1009 
1010  // Initial seed round to set prior state
1011  sim.run(1);
1012 
1013  // Have the "behind" group initially take a really long time to
1014  // accept a ledger after ending deliberation
1015  for (Peer* p : behind)
1016  p->delays.ledgerAccept = 20s;
1017 
1018  // Use the collector to revert the delay after the single
1019  // slow ledger is generated
1020  UndoDelay undoDelay{behind};
1021  sim.collectors.add(undoDelay);
1022 
1023 #if 0
1024  // Have all beast::journal output printed to stdout
1025  for (Peer* p : network)
1026  p->sink.threshold(beast::severities::kAll);
1027 
1028  // Print ledger accept and fully validated events to stdout
1029  StreamCollector sc{std::cout};
1030  sim.collectors.add(sc);
1031 #endif
1032  // Run the simulation for 100 seconds of simulation time with
1033  std::chrono::nanoseconds const simDuration = 100s;
1034 
1035  // Simulate clients submitting 1 tx every 5 seconds to a random
1036  // validator
1037  Rate const rate{1, 5s};
1038  auto peerSelector = makeSelector(
1039  network.begin(),
1040  network.end(),
1041  std::vector<double>(network.size(), 1.),
1042  sim.rng);
1043  auto txSubmitter = makeSubmitter(
1044  ConstantDistribution{rate.inv()},
1045  sim.scheduler.now(),
1046  sim.scheduler.now() + simDuration,
1047  peerSelector,
1048  sim.scheduler,
1049  sim.rng);
1050 
1051  // Run simulation
1052  sim.run(simDuration);
1053 
1054  // Verify that the network recovered
1055  BEAST_EXPECT(sim.synchronized());
1056  }
1057 
1058  void
1059  run() override
1060  {
1063 
1064  testStandalone();
1065  testPeersAgree();
1066  testSlowPeers();
1068  testWrongLCL();
1070  testFork();
1071  testHubNetwork();
1074  }
1075 };
1076 
1078 } // namespace test
1079 } // namespace ripple
ripple::test::csf::SimTime
typename SimClock::time_point SimTime
Definition: SimTime.h:36
ripple::test::Consensus_test::testHubNetwork
void testHubNetwork()
Definition: Consensus_test.cpp:746
ripple::checkConsensus
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, ConsensusParms const &parms, bool proposing, beast::Journal j)
Determine whether the network reached consensus and whether we joined.
Definition: Consensus.cpp:108
ripple::test::BEAST_DEFINE_TESTSUITE
BEAST_DEFINE_TESTSUITE(AccountDelete, app, ripple)
ripple::test::Consensus_test::testSlowPeers
void testSlowPeers()
Definition: Consensus_test.cpp:180
ripple::test::Consensus_test::Disruptor
Definition: Consensus_test.cpp:782
utility
ripple::test::Consensus_test::testPeersAgree
void testPeersAgree()
Definition: Consensus_test.cpp:143
ripple::Rate
Represents a transfer rate.
Definition: Rate.h:37
ripple::shouldCloseLedger
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j)
Determines whether the current ledger should close at this time.
Definition: Consensus.cpp:26
std::unordered_set
STL class.
ripple::test::Consensus_test::testConsensusCloseTimeRounding
void testConsensusCloseTimeRounding()
Definition: Consensus_test.cpp:586
std::vector
STL class.
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:821
ripple::test::Consensus_test::Disruptor::Disruptor
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
Definition: Consensus_test.cpp:790
ripple::Consensus
Generic implementation of consensus algorithm.
Definition: Consensus.h:283
ripple::ConsensusState::Yes
@ Yes
We have consensus along with the network.
ripple::test::Consensus_test::Disruptor::delay
csf::SimDuration delay
Definition: Consensus_test.cpp:787
std::chrono::duration
beast::severities::kAll
@ kAll
Definition: Journal.h:32
ripple::test::csf::FullyValidateLedger
Peer fully validated a new ledger.
Definition: events.h:137
ripple::ConsensusParms::ledgerGRANULARITY
std::chrono::milliseconds ledgerGRANULARITY
How often we check state or change positions.
Definition: ConsensusParms.h:95
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
Definition: Consensus_test.cpp:806
ripple::test::csf::Ledger::seq
Seq seq() const
Definition: ledgers.h:172
ripple::test::Consensus_test::Disruptor::groupCfast
csf::PeerGroup & groupCfast
Definition: Consensus_test.cpp:785
ripple::test::csf::FullyValidateLedger::ledger
Ledger ledger
The new fully validated ledger.
Definition: events.h:140
ripple::test::csf::AcceptLedger
Peer accepted consensus results.
Definition: events.h:118
ripple::test::csf::PeerGroup::connect
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition: PeerGroup.h:164
std::cout
ripple::test::Consensus_test::testCheckConsensus
void testCheckConsensus()
Definition: Consensus_test.cpp:78
ripple::test::csf::AcceptLedger::ledger
Ledger ledger
Definition: events.h:121
std::chrono::time_point
ripple::test::Consensus_test::testStandalone
void testStandalone()
Definition: Consensus_test.cpp:119
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:801
std::uint32_t
ripple::ConsensusState::No
@ No
We do not have consensus.
ripple::ConsensusState::MovedOn
@ MovedOn
The network has consensus without us.
ripple::test::Consensus_test::run
void run() override
Definition: Consensus_test.cpp:1059
ripple::test::Consensus_test::testShouldCloseLedger
void testShouldCloseLedger()
Definition: Consensus_test.cpp:40
ripple::test::SuiteJournal
Definition: SuiteJournal.h:88
ripple::test::csf::PeerGroup::disconnect
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition: PeerGroup.h:184
ripple::test::Consensus_test::testPreferredByBranch
void testPreferredByBranch()
Definition: Consensus_test.cpp:834
ripple::test::csf::PeerGroup
A group of simulation Peers.
Definition: PeerGroup.h:39
ripple::test::Consensus_test::testFork
void testFork()
Definition: Consensus_test.cpp:690
ripple::test::csf::Peer
A single peer in the simulation.
Definition: test/csf/Peer.h:54
ripple::test::Consensus_test::journal_
SuiteJournal journal_
Definition: Consensus_test.cpp:32
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::test::Consensus_test::UndoDelay::g
csf::PeerGroup & g
Definition: Consensus_test.cpp:944
ripple::test::Consensus_test::UndoDelay
Definition: Consensus_test.cpp:942
ripple::ConsensusParms
Consensus algorithm parameters.
Definition: ConsensusParms.h:33
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:952
std::unordered_set::insert
T insert(T... args)
ripple::test::Consensus_test::testWrongLCL
void testWrongLCL()
Definition: Consensus_test.cpp:413
ripple::increaseLedgerTimeResolutionEvery
constexpr auto increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
Definition: LedgerTiming.h:50
ripple::test::Consensus_test::Disruptor::network
csf::PeerGroup & network
Definition: Consensus_test.cpp:784
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:957
ripple::Peer::id
virtual id_t id() const =0
ripple::tagged_integer< std::uint32_t, PeerIDTag >
ripple::test::csf::SimDuration
typename SimClock::duration SimDuration
Definition: SimTime.h:35
ripple::test::Consensus_test::UndoDelay::UndoDelay
UndoDelay(csf::PeerGroup &a)
Definition: Consensus_test.cpp:946
ripple::test::Consensus_test::Disruptor::groupCsplit
csf::PeerGroup & groupCsplit
Definition: Consensus_test.cpp:786
ripple::test::Consensus_test::testPauseForLaggards
void testPauseForLaggards()
Definition: Consensus_test.cpp:968
ripple::test::Consensus_test::Consensus_test
Consensus_test()
Definition: Consensus_test.cpp:35
ripple::test::Consensus_test
Definition: Consensus_test.cpp:30
ripple::test::Consensus_test::testCloseTimeDisagree
void testCloseTimeDisagree()
Definition: Consensus_test.cpp:347
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:44
ripple::test::jtx::rate
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
std::chrono