rippled
Consensus_test.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012-2016 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 #include <ripple/beast/clock/manual_clock.h>
20 #include <ripple/beast/unit_test.h>
21 #include <ripple/consensus/Consensus.h>
22 #include <ripple/consensus/ConsensusProposal.h>
23 #include <test/csf.h>
24 #include <test/unit_test/SuiteJournal.h>
25 #include <utility>
26 
27 namespace ripple {
28 namespace test {
29 
30 class Consensus_test : public beast::unit_test::suite
31 {
33 
34 public:
36  : journal_ ("Consensus_test", *this)
37  { }
38 
39  void
41  {
42  using namespace std::chrono_literals;
43 
44  // Use default parameters
45  ConsensusParms const p{};
46 
47  // Bizarre times forcibly close
48  BEAST_EXPECT(
49  shouldCloseLedger(true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
50  BEAST_EXPECT(
51  shouldCloseLedger(true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
52  BEAST_EXPECT(
53  shouldCloseLedger(true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
54 
55  // Rest of network has closed
56  BEAST_EXPECT(
57  shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
58 
59  // No transactions means wait until end of internval
60  BEAST_EXPECT(
61  !shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
62  BEAST_EXPECT(
63  shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
64 
65  // Enforce minimum ledger open time
66  BEAST_EXPECT(
67  !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
68 
69  // Don't go too much faster than last time
70  BEAST_EXPECT(
71  !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
72 
73  BEAST_EXPECT(
74  shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
75  }
76 
77  void
79  {
80  using namespace std::chrono_literals;
81 
82  // Use default parameterss
83  ConsensusParms const p{};
84 
85  // Not enough time has elapsed
86  BEAST_EXPECT(
88  checkConsensus(10, 2, 2, 0, 3s, 2s, p, true, journal_));
89 
90  // If not enough peers have propsed, ensure
91  // more time for proposals
92  BEAST_EXPECT(
94  checkConsensus(10, 2, 2, 0, 3s, 4s, p, true, journal_));
95 
96  // Enough time has elapsed and we all agree
97  BEAST_EXPECT(
99  checkConsensus(10, 2, 2, 0, 3s, 10s, p, true, journal_));
100 
101  // Enough time has elapsed and we don't yet agree
102  BEAST_EXPECT(
104  checkConsensus(10, 2, 1, 0, 3s, 10s, p, true, journal_));
105 
106  // Our peers have moved on
107  // Enough time has elapsed and we all agree
108  BEAST_EXPECT(
110  checkConsensus(10, 2, 1, 8, 3s, 10s, p, true, journal_));
111 
112  // No peers makes it easy to agree
113  BEAST_EXPECT(
115  checkConsensus(0, 0, 0, 0, 3s, 10s, p, true, journal_));
116  }
117 
118  void
120  {
121  using namespace std::chrono_literals;
122  using namespace csf;
123 
124  Sim s;
125  PeerGroup peers = s.createGroup(1);
126  Peer * peer = peers[0];
127  peer->targetLedgers = 1;
128  peer->start();
129  peer->submit(Tx{1});
130 
131  s.scheduler.step();
132 
133  // Inspect that the proper ledger was created
134  auto const& lcl = peer->lastClosedLedger;
135  BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
136  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
137  BEAST_EXPECT(lcl.txs().size() == 1);
138  BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
139  BEAST_EXPECT(peer->prevProposers == 0);
140  }
141 
142  void
144  {
145  using namespace csf;
146  using namespace std::chrono;
147 
148  ConsensusParms const parms{};
149  Sim sim;
150  PeerGroup peers = sim.createGroup(5);
151 
152  // Connected trust and network graphs with single fixed delay
153  peers.trustAndConnect(
154  peers, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
155 
156  // everyone submits their own ID as a TX
157  for (Peer * p : peers)
158  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
159 
160  sim.run(1);
161 
162  // All peers are in sync
163  if (BEAST_EXPECT(sim.synchronized()))
164  {
165  for (Peer const* peer : peers)
166  {
167  auto const& lcl = peer->lastClosedLedger;
168  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
169  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
170  // All peers proposed
171  BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
172  // All transactions were accepted
173  for (std::uint32_t i = 0; i < peers.size(); ++i)
174  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
175  }
176  }
177  }
178 
179  void
181  {
182  using namespace csf;
183  using namespace std::chrono;
184 
185  // Several tests of a complete trust graph with a subset of peers
186  // that have significantly longer network delays to the rest of the
187  // network
188 
189  // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
190  {
191  ConsensusParms const parms{};
192  Sim sim;
193  PeerGroup slow = sim.createGroup(1);
194  PeerGroup fast = sim.createGroup(4);
195  PeerGroup network = fast + slow;
196 
197  // Fully connected trust graph
198  network.trust(network);
199 
200  // Fast and slow network connections
201  fast.connect(
202  fast, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
203 
204  slow.connect(
205  network,
206  date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
207 
208  // All peers submit their own ID as a transaction
209  for (Peer* peer : network)
210  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
211 
212  sim.run(1);
213 
214  // Verify all peers have same LCL but are missing transaction 0
215  // All peers are in sync even with a slower peer 0
216  if (BEAST_EXPECT(sim.synchronized()))
217  {
218  for (Peer* peer : network)
219  {
220  auto const& lcl = peer->lastClosedLedger;
221  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
222  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
223 
224  BEAST_EXPECT(peer->prevProposers == network.size() - 1);
225  BEAST_EXPECT(
226  peer->prevRoundTime == network[0]->prevRoundTime);
227 
228  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
229  for (std::uint32_t i = 2; i < network.size(); ++i)
230  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
231 
232  // Tx 0 didn't make it
233  BEAST_EXPECT(
234  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
235  }
236  }
237  }
238 
239  // Test when the slow peers delay a consensus quorum (4/6 agree)
240  {
241  // Run two tests
242  // 1. The slow peers are participating in consensus
243  // 2. The slow peers are just observing
244 
245  for (auto isParticipant : {true, false})
246  {
247  ConsensusParms const parms{};
248 
249  Sim sim;
250  PeerGroup slow = sim.createGroup(2);
251  PeerGroup fast = sim.createGroup(4);
252  PeerGroup network = fast + slow;
253 
254  // Connected trust graph
255  network.trust(network);
256 
257  // Fast and slow network connections
258  fast.connect(
259  fast,
260  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
261 
262  slow.connect(
263  network,
264  date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
265 
266  for (Peer* peer : slow)
267  peer->runAsValidator = isParticipant;
268 
269  // All peers submit their own ID as a transaction and relay it
270  // to peers
271  for (Peer* peer : network)
272  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
273 
274  sim.run(1);
275 
276  if (BEAST_EXPECT(sim.synchronized()))
277  {
278  // Verify all peers have same LCL but are missing
279  // transaction 0,1 which was not received by all peers before
280  // the ledger closed
281  for (Peer* peer : network)
282  {
283  // Closed ledger has all but transaction 0,1
284  auto const& lcl = peer->lastClosedLedger;
285  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
286  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
287  BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
288  for (std::uint32_t i = slow.size(); i < network.size();
289  ++i)
290  BEAST_EXPECT(
291  lcl.txs().find(Tx{i}) != lcl.txs().end());
292 
293  // Tx 0-1 didn't make it
294  BEAST_EXPECT(
295  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
296  BEAST_EXPECT(
297  peer->openTxs.find(Tx{1}) != peer->openTxs.end());
298  }
299 
300  Peer const* slowPeer = slow[0];
301  if (isParticipant)
302  BEAST_EXPECT(
303  slowPeer->prevProposers == network.size() - 1);
304  else
305  BEAST_EXPECT(slowPeer->prevProposers == fast.size());
306 
307  for (Peer* peer : fast)
308  {
309  // Due to the network link delay settings
310  // Peer 0 initially proposes {0}
311  // Peer 1 initially proposes {1}
312  // Peers 2-5 initially propose {2,3,4,5}
313  // Since peers 2-5 agree, 4/6 > the initial 50% needed
314  // to include a disputed transaction, so Peer 0/1 switch
315  // to agree with those peers. Peer 0/1 then closes with
316  // an 80% quorum of agreeing positions (5/6) match.
317  //
318  // Peers 2-5 do not change position, since tx 0 or tx 1
319  // have less than the 50% initial threshold. They also
320  // cannot declare consensus, since 4/6 agreeing
321  // positions are < 80% threshold. They therefore need an
322  // additional timerEntry call to see the updated
323  // positions from Peer 0 & 1.
324 
325  if (isParticipant)
326  {
327  BEAST_EXPECT(
328  peer->prevProposers == network.size() - 1);
329  BEAST_EXPECT(
330  peer->prevRoundTime > slowPeer->prevRoundTime);
331  }
332  else
333  {
334  BEAST_EXPECT(
335  peer->prevProposers == fast.size() - 1);
336  // so all peers should have closed together
337  BEAST_EXPECT(
338  peer->prevRoundTime == slowPeer->prevRoundTime);
339  }
340  }
341  }
342 
343  }
344  }
345  }
346 
347  void
349  {
350  using namespace csf;
351  using namespace std::chrono;
352 
353  // This is a very specialized test to get ledgers to disagree on
354  // the close time. It unfortunately assumes knowledge about current
355  // timing constants. This is a necessary evil to get coverage up
356  // pending more extensive refactorings of timing constants.
357 
358  // In order to agree-to-disagree on the close time, there must be no
359  // clear majority of nodes agreeing on a close time. This test
360  // sets a relative offset to the peers internal clocks so that they
361  // send proposals with differing times.
362 
363  // However, agreement is on the effective close time, not the
364  // exact close time. The minimum closeTimeResolution is given by
365  // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
366  // the skews need to be at least 10 seconds to have different effective
367  // close times.
368 
369  // Complicating this matter is that nodes will ignore proposals
370  // with times more than proposeFRESHNESS =20s in the past. So at
371  // the minimum granularity, we have at most 3 types of skews
372  // (0s,10s,20s).
373 
374  // This test therefore has 6 nodes, with 2 nodes having each type of
375  // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
376  // actual close time.
377 
378  ConsensusParms const parms{};
379  Sim sim;
380 
381  PeerGroup groupA = sim.createGroup(2);
382  PeerGroup groupB = sim.createGroup(2);
383  PeerGroup groupC = sim.createGroup(2);
384  PeerGroup network = groupA + groupB + groupC;
385 
386  network.trust(network);
387  network.connect(
388  network, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
389 
390  // Run consensus without skew until we have a short close time
391  // resolution
392  Peer* firstPeer = *groupA.begin();
393  while (firstPeer->lastClosedLedger.closeTimeResolution() >=
394  parms.proposeFRESHNESS)
395  sim.run(1);
396 
397  // Introduce a shift on the time of 2/3 of peers
398  for (Peer* peer : groupA)
399  peer->clockSkew = parms.proposeFRESHNESS / 2;
400  for (Peer* peer : groupB)
401  peer->clockSkew = parms.proposeFRESHNESS;
402 
403  sim.run(1);
404 
405  // All nodes agreed to disagree on the close time
406  if (BEAST_EXPECT(sim.synchronized()))
407  {
408  for (Peer* peer : network)
409  BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
410  }
411  }
412 
413  void
415  {
416  using namespace csf;
417  using namespace std::chrono;
418  // Specialized test to exercise a temporary fork in which some peers
419  // are working on an incorrect prior ledger.
420 
421  ConsensusParms const parms{};
422 
423  // Vary the time it takes to process validations to exercise detecting
424  // the wrong LCL at different phases of consensus
425  for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
426  {
427  // Consider 10 peers:
428  // 0 1 2 3 4 5 6 7 8 9
429  // minority majorityA majorityB
430  //
431  // Nodes 0-1 trust nodes 0-4
432  // Nodes 2-9 trust nodes 2-9
433  //
434  // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
435  // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
436  // nodes will instead accept the ledger with tx 1.
437 
438  // Nodes 0-1 will detect this mismatch during a subsequent round
439  // since nodes 2-4 will validate a different ledger.
440 
441  // Nodes 0-1 will acquire the proper ledger from the network and
442  // resume consensus and eventually generate the dominant network
443  // ledger.
444 
445  // This topology can potentially fork with the above trust relations
446  // but that is intended for this test.
447 
448  Sim sim;
449 
450  PeerGroup minority = sim.createGroup(2);
451  PeerGroup majorityA = sim.createGroup(3);
452  PeerGroup majorityB = sim.createGroup(5);
453 
454  PeerGroup majority = majorityA + majorityB;
455  PeerGroup network = minority + majority;
456 
457  SimDuration delay =
458  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
459  minority.trustAndConnect(minority + majorityA, delay);
460  majority.trustAndConnect(majority, delay);
461 
462  CollectByNode<JumpCollector> jumps;
463  sim.collectors.add(jumps);
464 
465  BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
466 
467  // initial round to set prior state
468  sim.run(1);
469 
470  // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
471  // tx 1
472  for (Peer* peer : network)
473  peer->delays.recvValidation = validationDelay;
474  for (Peer* peer : (minority + majorityA))
475  peer->openTxs.insert(Tx{0});
476  for (Peer* peer : majorityB)
477  peer->openTxs.insert(Tx{1});
478 
479  // Run for additional rounds
480  // With no validation delay, only 2 more rounds are needed.
481  // 1. Round to generate different ledgers
482  // 2. Round to detect different prior ledgers (but still generate
483  // wrong ones) and recover within that round since wrong LCL
484  // is detected before we close
485  //
486  // With a validation delay of ledgerMIN_CLOSE, we need 3 more
487  // rounds.
488  // 1. Round to generate different ledgers
489  // 2. Round to detect different prior ledgers (but still generate
490  // wrong ones) but end up declaring consensus on wrong LCL (but
491  // with the right transaction set!). This is because we detect
492  // the wrong LCL after we have closed the ledger, so we declare
493  // consensus based solely on our peer proposals. But we haven't
494  // had time to acquire the right ledger.
495  // 3. Round to correct
496  sim.run(3);
497 
498  // The network never actually forks, since node 0-1 never see a
499  // quorum of validations to fully validate the incorrect chain.
500 
501  // However, for a non zero-validation delay, the network is not
502  // synchronized because nodes 0 and 1 are running one ledger behind
503  if (BEAST_EXPECT(sim.branches() == 1))
504  {
505  for(Peer const* peer : majority)
506  {
507  // No jumps for majority nodes
508  BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
509  BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
510  }
511  for(Peer const* peer : minority)
512  {
513  auto & peerJumps = jumps[peer->id];
514  // last closed ledger jump between chains
515  {
516  if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
517  {
518  JumpCollector::Jump const& jump =
519  peerJumps.closeJumps.front();
520  // Jump is to a different chain
521  BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
522  BEAST_EXPECT(!jump.to.isAncestor(jump.from));
523  }
524  }
525  // fully validated jump forward in same chain
526  {
527  if (BEAST_EXPECT(
528  peerJumps.fullyValidatedJumps.size() == 1))
529  {
530  JumpCollector::Jump const& jump =
531  peerJumps.fullyValidatedJumps.front();
532  // Jump is to a different chain with same seq
533  BEAST_EXPECT(jump.from.seq() < jump.to.seq());
534  BEAST_EXPECT(jump.to.isAncestor(jump.from));
535  }
536  }
537  }
538  }
539  }
540 
541  {
542  // Additional test engineered to switch LCL during the establish
543  // phase. This was added to trigger a scenario that previously
544  // crashed, in which switchLCL switched from establish to open
545  // phase, but still processed the establish phase logic.
546 
547  // Loner node will accept an initial ledger A, but all other nodes
548  // accept ledger B a bit later. By delaying the time it takes
549  // to process a validation, loner node will detect the wrongLCL
550  // after it is already in the establish phase of the next round.
551 
552  Sim sim;
553  PeerGroup loner = sim.createGroup(1);
554  PeerGroup friends = sim.createGroup(3);
555  loner.trust(loner + friends);
556 
557  PeerGroup others = sim.createGroup(6);
558  PeerGroup clique = friends + others;
559  clique.trust(clique);
560 
561  PeerGroup network = loner + clique;
562  network.connect(
563  network,
564  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
565 
566  // initial round to set prior state
567  sim.run(1);
568  for (Peer* peer : (loner + friends))
569  peer->openTxs.insert(Tx(0));
570  for (Peer* peer : others)
571  peer->openTxs.insert(Tx(1));
572 
573  // Delay validation processing
574  for (Peer* peer : network)
575  peer->delays.recvValidation = parms.ledgerGRANULARITY;
576 
577  // additional rounds to generate wrongLCL and recover
578  sim.run(2);
579 
580  // Check all peers recovered
581  for (Peer * p: network)
582  BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
583  }
584  }
585 
586  void
588  {
589  using namespace csf;
590  using namespace std::chrono;
591 
592  // This is a specialized test engineered to yield ledgers with different
593  // close times even though the peers believe they had close time
594  // consensus on the ledger.
595  ConsensusParms parms;
596 
597  Sim sim;
598 
599  // This requires a group of 4 fast and 2 slow peers to create a
600  // situation in which a subset of peers requires seeing additional
601  // proposals to declare consensus.
602  PeerGroup slow = sim.createGroup(2);
603  PeerGroup fast = sim.createGroup(4);
604  PeerGroup network = fast + slow;
605 
606  for (Peer* peer : network)
607  peer->consensusParms = parms;
608 
609  // Connected trust graph
610  network.trust(network);
611 
612  // Fast and slow network connections
613  fast.connect(
614  fast, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
615  slow.connect(
616  network,
617  date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
618 
619  // Run to the ledger *prior* to decreasing the resolution
621 
622  // In order to create the discrepency, we want a case where if
623  // X = effCloseTime(closeTime, resolution, parentCloseTime)
624  // X != effCloseTime(X, resolution, parentCloseTime)
625  //
626  // That is, the effective close time is not a fixed point. This can
627  // happen if X = parentCloseTime + 1, but a subsequent rounding goes
628  // to the next highest multiple of resolution.
629 
630  // So we want to find an offset (now + offset) % 30s = 15
631  // (now + offset) % 20s = 15
632  // This way, the next ledger will close and round up Due to the
633  // network delay settings, the round of consensus will take 5s, so
634  // the next ledger's close time will
635 
636  NetClock::duration when = network[0]->now().time_since_epoch();
637 
638  // Check we are before the 30s to 20s transition
639  NetClock::duration resolution =
640  network[0]->lastClosedLedger.closeTimeResolution();
641  BEAST_EXPECT(resolution == NetClock::duration{30s});
642 
643  while (
644  ((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
645  ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
646  when += 1s;
647  // Advance the clock without consensus running (IS THIS WHAT
648  // PREVENTS IT IN PRACTICE?)
649  sim.scheduler.step_for(
650  NetClock::time_point{when} - network[0]->now());
651 
652  // Run one more ledger with 30s resolution
653  sim.run(1);
654  if (BEAST_EXPECT(sim.synchronized()))
655  {
656  // close time should be ahead of clock time since we engineered
657  // the close time to round up
658  for (Peer* peer : network)
659  {
660  BEAST_EXPECT(
661  peer->lastClosedLedger.closeTime() > peer->now());
662  BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
663  }
664  }
665 
666  // All peers submit their own ID as a transaction
667  for (Peer* peer : network)
668  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
669 
670  // Run 1 more round, this time it will have a decreased
671  // resolution of 20 seconds.
672 
673  // The network delays are engineered so that the slow peers
674  // initially have the wrong tx hash, but they see a majority
675  // of agreement from their peers and declare consensus
676  //
677  // The trick is that everyone starts with a raw close time of
678  // 84681s
679  // Which has
680  // effCloseTime(86481s, 20s, 86490s) = 86491s
681  // However, when the slow peers update their position, they change
682  // the close time to 86451s. The fast peers declare consensus with
683  // the 86481s as their position still.
684  //
685  // When accepted the ledger
686  // - fast peers use eff(86481s) -> 86491s as the close time
687  // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
688 
689  sim.run(1);
690 
691  BEAST_EXPECT(sim.synchronized());
692  }
693 
694  void
696  {
697  using namespace csf;
698  using namespace std::chrono;
699 
700  std::uint32_t numPeers = 10;
701  // Vary overlap between two UNLs
702  for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
703  {
704  ConsensusParms const parms{};
705  Sim sim;
706 
707  std::uint32_t numA = (numPeers - overlap) / 2;
708  std::uint32_t numB = numPeers - numA - overlap;
709 
710  PeerGroup aOnly = sim.createGroup(numA);
711  PeerGroup bOnly = sim.createGroup(numB);
712  PeerGroup commonOnly = sim.createGroup(overlap);
713 
714  PeerGroup a = aOnly + commonOnly;
715  PeerGroup b = bOnly + commonOnly;
716 
717  PeerGroup network = a + b;
718 
719  SimDuration delay =
720  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
721  a.trustAndConnect(a, delay);
722  b.trustAndConnect(b, delay);
723 
724  // Initial round to set prior state
725  sim.run(1);
726  for (Peer* peer : network)
727  {
728  // Nodes have only seen transactions from their neighbors
729  peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
730  for (Peer* to : sim.trustGraph.trustedPeers(peer))
731  peer->openTxs.insert(
732  Tx{static_cast<std::uint32_t>(to->id)});
733  }
734  sim.run(1);
735 
736  // Fork should not happen for 40% or greater overlap
737  // Since the overlapped nodes have a UNL that is the union of the
738  // two cliques, the maximum sized UNL list is the number of peers
739  if (overlap > 0.4 * numPeers)
740  BEAST_EXPECT(sim.synchronized());
741  else
742  {
743  // Even if we do fork, there shouldn't be more than 3 ledgers
744  // One for cliqueA, one for cliqueB and one for nodes in both
745  BEAST_EXPECT(sim.branches() <= 3);
746  }
747  }
748  }
749 
750  void
752  {
753  using namespace csf;
754  using namespace std::chrono;
755 
756  // Simulate a set of 5 validators that aren't directly connected but
757  // rely on a single hub node for communication
758 
759  ConsensusParms const parms{};
760  Sim sim;
761  PeerGroup validators = sim.createGroup(5);
762  PeerGroup center = sim.createGroup(1);
763  validators.trust(validators);
764  center.trust(validators);
765 
766  SimDuration delay =
767  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
768  validators.connect(center, delay);
769 
770  center[0]->runAsValidator = false;
771 
772  // prep round to set initial state.
773  sim.run(1);
774 
775  // everyone submits their own ID as a TX and relay it to peers
776  for (Peer * p : validators)
777  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
778 
779  sim.run(1);
780 
781  // All peers are in sync
782  BEAST_EXPECT(sim.synchronized());
783  }
784 
785 
786  // Helper collector for testPreferredByBranch
787  // Invasively disconnects network at bad times to cause splits
788  struct Disruptor
789  {
794  bool reconnected = false;
795 
797  csf::PeerGroup& net,
798  csf::PeerGroup& c,
799  csf::PeerGroup& split,
801  : network(net), groupCfast(c), groupCsplit(split), delay(d)
802  {
803  }
804 
805  template <class E>
806  void
808  {
809  }
810 
811 
812  void
814  {
815  using namespace std::chrono;
816  // As soon as the the fastC node fully validates C, disconnect
817  // ALL c nodes from the network. The fast C node needs to disconnect
818  // as well to prevent it from relaying the validations it did see
819  if (who == groupCfast[0]->id &&
820  e.ledger.seq() == csf::Ledger::Seq{2})
821  {
822  network.disconnect(groupCsplit);
823  network.disconnect(groupCfast);
824  }
825  }
826 
827  void
829  {
830  // As soon as anyone generates a child of B or C, reconnect the
831  // network so those validations make it through
832  if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
833  {
834  reconnected = true;
835  network.connect(groupCsplit, delay);
836  }
837  }
838 
839 
840  };
841 
842  void
844  {
845  using namespace csf;
846  using namespace std::chrono;
847 
848  // Simulate network splits that are prevented from forking when using
849  // preferred ledger by trie. This is a contrived example that involves
850  // excessive network splits, but demonstrates the safety improvement
851  // from the preferred ledger by trie approach.
852 
853  // Consider 10 validating nodes that comprise a single common UNL
854  // Ledger history:
855  // 1: A
856  // _/ \_
857  // 2: B C
858  // _/ _/ \_
859  // 3: D C' |||||||| (8 different ledgers)
860 
861  // - All nodes generate the common ledger A
862  // - 2 nodes generate B and 8 nodes generate C
863  // - Only 1 of the C nodes sees all the C validations and fully
864  // validates C. The rest of the C nodes split at just the right time
865  // such that they never see any C validations but their own.
866  // - The C nodes continue and generate 8 different child ledgers.
867  // - Meanwhile, the D nodes only saw 1 validation for C and 2 validations
868  // for B.
869  // - The network reconnects and the validations for generation 3 ledgers
870  // are observed (D and the 8 C's)
871  // - In the old approach, 2 votes for D outweights 1 vote for each C'
872  // so the network would avalanche towards D and fully validate it
873  // EVEN though C was fully validated by one node
874  // - In the new approach, 2 votes for D are not enough to outweight the
875  // 8 implicit votes for C, so nodes will avalanche to C instead
876 
877 
878  ConsensusParms const parms{};
879  Sim sim;
880 
881  // Goes A->B->D
882  PeerGroup groupABD = sim.createGroup(2);
883  // Single node that initially fully validates C before the split
884  PeerGroup groupCfast = sim.createGroup(1);
885  // Generates C, but fails to fully validate before the split
886  PeerGroup groupCsplit = sim.createGroup(7);
887 
888  PeerGroup groupNotFastC = groupABD + groupCsplit;
889  PeerGroup network = groupABD + groupCsplit + groupCfast;
890 
891  SimDuration delay = date::round<milliseconds>(
892  0.2 * parms.ledgerGRANULARITY);
893  SimDuration fDelay = date::round<milliseconds>(
894  0.1 * parms.ledgerGRANULARITY);
895 
896  network.trust(network);
897  // C must have a shorter delay to see all the validations before the
898  // other nodes
899  network.connect(groupCfast, fDelay);
900  // The rest of the network is connected at the same speed
901  groupNotFastC.connect(groupNotFastC, delay);
902 
903  Disruptor dc(network, groupCfast, groupCsplit, delay);
904  sim.collectors.add(dc);
905 
906  // Consensus round to generate ledger A
907  sim.run(1);
908  BEAST_EXPECT(sim.synchronized());
909 
910  // Next round generates B and C
911  // To force B, we inject an extra transaction in to those nodes
912  for(Peer * peer : groupABD)
913  {
914  peer->txInjections.emplace(
915  peer->lastClosedLedger.seq(), Tx{42});
916  }
917  // The Disruptor will ensure that nodes disconnect before the C
918  // validations make it to all but the fastC node
919  sim.run(1);
920 
921  // We are no longer in sync, but have not yet forked:
922  // 9 nodes consider A the last fully validated ledger and fastC sees C
923  BEAST_EXPECT(!sim.synchronized());
924  BEAST_EXPECT(sim.branches() == 1);
925 
926  // Run another round to generate the 8 different C' ledgers
927  for (Peer * p : network)
928  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
929  sim.run(1);
930 
931  // Still not forked
932  BEAST_EXPECT(!sim.synchronized());
933  BEAST_EXPECT(sim.branches() == 1);
934 
935  // Disruptor will reconnect all but the fastC node
936  sim.run(1);
937 
938  if(BEAST_EXPECT(sim.branches() == 1))
939  {
940  BEAST_EXPECT(sim.synchronized());
941  }
942  else // old approach caused a fork
943  {
944  BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
945  BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
946  }
947  }
948 
949  // Helper collector for testPauseForLaggards
950  // This will remove the ledgerAccept delay used to
951  // initially create the slow vs. fast validator groups.
952  struct UndoDelay
953  {
955 
957  {
958  }
959 
960  template <class E>
961  void
963  {
964  }
965 
966  void
968  {
969  for (csf::Peer* p : g)
970  {
971  if (p->id == who)
972  p->delays.ledgerAccept = std::chrono::seconds{0};
973  }
974  }
975  };
976 
977  void
979  {
980  using namespace csf;
981  using namespace std::chrono;
982 
983  // Test that validators that jump ahead of the network slow
984  // down.
985 
986  // We engineer the following validated ledger history scenario:
987  //
988  // / --> B1 --> C1 --> ... -> G1 "ahead"
989  // A
990  // \ --> B2 --> C2 "behind"
991  //
992  // After validating a common ledger A, a set of "behind" validators
993  // briefly run slower and validate the lower chain of ledgers.
994  // The "ahead" validators run normal speed and run ahead validating the
995  // upper chain of ledgers.
996  //
997  // Due to the uncommited support definition of the preferred branch
998  // protocol, even if the "behind" validators are a majority, the "ahead"
999  // validators cannot jump to the proper branch until the "behind"
1000  // validators catch up to the same sequence number. For this test to
1001  // succeed, the ahead validators need to briefly slow down consensus.
1002 
1003  ConsensusParms const parms{};
1004  Sim sim;
1005  SimDuration delay =
1006  date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
1007 
1008  PeerGroup behind = sim.createGroup(3);
1009  PeerGroup ahead = sim.createGroup(2);
1010  PeerGroup network = ahead + behind;
1011 
1012  hash_set<Peer::NodeKey_t> trustedKeys;
1013  for (Peer* p : network)
1014  trustedKeys.insert(p->key);
1015  for (Peer* p : network)
1016  p->trustedKeys = trustedKeys;
1017 
1018  network.trustAndConnect(network, delay);
1019 
1020  // Initial seed round to set prior state
1021  sim.run(1);
1022 
1023  // Have the "behind" group initially take a really long time to
1024  // accept a ledger after ending deliberation
1025  for (Peer* p : behind)
1026  p->delays.ledgerAccept = 20s;
1027 
1028  // Use the collector to revert the delay after the single
1029  // slow ledger is generated
1030  UndoDelay undoDelay{behind};
1031  sim.collectors.add(undoDelay);
1032 
1033 #if 0
1034  // Have all beast::journal output printed to stdout
1035  for (Peer* p : network)
1036  p->sink.threshold(beast::severities::kAll);
1037 
1038  // Print ledger accept and fully validated events to stdout
1039  StreamCollector sc{std::cout};
1040  sim.collectors.add(sc);
1041 #endif
1042  // Run the simulation for 100 seconds of simulation time with
1043  std::chrono::nanoseconds const simDuration = 100s;
1044 
1045  // Simulate clients submitting 1 tx every 5 seconds to a random
1046  // validator
1047  Rate const rate{1, 5s};
1048  auto peerSelector = makeSelector(
1049  network.begin(),
1050  network.end(),
1051  std::vector<double>(network.size(), 1.),
1052  sim.rng);
1053  auto txSubmitter = makeSubmitter(
1054  ConstantDistribution{rate.inv()},
1055  sim.scheduler.now(),
1056  sim.scheduler.now() + simDuration,
1057  peerSelector,
1058  sim.scheduler,
1059  sim.rng);
1060 
1061  // Run simulation
1062  sim.run(simDuration);
1063 
1064  // Verify that the network recovered
1065  BEAST_EXPECT(sim.synchronized());
1066  }
1067 
1068  void
1069  run() override
1070  {
1073 
1074  testStandalone();
1075  testPeersAgree();
1076  testSlowPeers();
1078  testWrongLCL();
1080  testFork();
1081  testHubNetwork();
1084  }
1085 };
1086 
1088 } // namespace test
1089 } // namespace ripple
ripple::test::csf::SimTime
typename SimClock::time_point SimTime
Definition: SimTime.h:36
ripple::test::Consensus_test::testHubNetwork
void testHubNetwork()
Definition: Consensus_test.cpp:751
ripple::test::BEAST_DEFINE_TESTSUITE
BEAST_DEFINE_TESTSUITE(AccountDelete, app, ripple)
ripple::test::Consensus_test::testSlowPeers
void testSlowPeers()
Definition: Consensus_test.cpp:180
ripple::test::Consensus_test::Disruptor
Definition: Consensus_test.cpp:788
utility
ripple::test::Consensus_test::testPeersAgree
void testPeersAgree()
Definition: Consensus_test.cpp:143
ripple::Rate
Represents a transfer rate.
Definition: Rate.h:37
std::unordered_set
STL class.
ripple::test::Consensus_test::testConsensusCloseTimeRounding
void testConsensusCloseTimeRounding()
Definition: Consensus_test.cpp:587
std::vector
STL class.
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:828
ripple::test::Consensus_test::Disruptor::Disruptor
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
Definition: Consensus_test.cpp:796
ripple::Consensus
Generic implementation of consensus algorithm.
Definition: Consensus.h:284
ripple::ConsensusState::Yes
@ Yes
We have consensus along with the network.
ripple::test::Consensus_test::Disruptor::delay
csf::SimDuration delay
Definition: Consensus_test.cpp:793
std::chrono::duration
beast::severities::kAll
@ kAll
Definition: Journal.h:34
ripple::test::csf::FullyValidateLedger
Peer fully validated a new ledger.
Definition: events.h:140
ripple::ConsensusParms::ledgerGRANULARITY
std::chrono::milliseconds ledgerGRANULARITY
How often we check state or change positions.
Definition: ConsensusParms.h:98
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
Definition: Consensus_test.cpp:813
ripple::test::csf::Ledger::seq
Seq seq() const
Definition: ledgers.h:163
ripple::test::Consensus_test::Disruptor::groupCfast
csf::PeerGroup & groupCfast
Definition: Consensus_test.cpp:791
ripple::test::csf::FullyValidateLedger::ledger
Ledger ledger
The new fully validated ledger.
Definition: events.h:143
ripple::test::csf::AcceptLedger
Peer accepted consensus results.
Definition: events.h:121
ripple::test::csf::PeerGroup::connect
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition: PeerGroup.h:163
std::cout
ripple::test::Consensus_test::testCheckConsensus
void testCheckConsensus()
Definition: Consensus_test.cpp:78
ripple::test::csf::AcceptLedger::ledger
Ledger ledger
Definition: events.h:124
std::chrono::time_point
ripple::test::Consensus_test::testStandalone
void testStandalone()
Definition: Consensus_test.cpp:119
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:807
std::uint32_t
ripple::checkConsensus
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, ConsensusParms const &parms, bool proposing, beast::Journal j)
Determine whether the network reached consensus and whether we joined.
Definition: Consensus.cpp:108
ripple::ConsensusState::No
@ No
We do not have consensus.
ripple::ConsensusState::MovedOn
@ MovedOn
The network has consensus without us.
ripple::test::Consensus_test::run
void run() override
Definition: Consensus_test.cpp:1069
ripple::test::Consensus_test::testShouldCloseLedger
void testShouldCloseLedger()
Definition: Consensus_test.cpp:40
ripple::test::SuiteJournal
Definition: SuiteJournal.h:81
ripple::test::csf::PeerGroup::disconnect
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition: PeerGroup.h:183
ripple::test::Consensus_test::testPreferredByBranch
void testPreferredByBranch()
Definition: Consensus_test.cpp:843
ripple::test::csf::PeerGroup
A group of simulation Peers.
Definition: PeerGroup.h:39
ripple::test::Consensus_test::testFork
void testFork()
Definition: Consensus_test.cpp:695
ripple::test::csf::Peer
A single peer in the simulation.
Definition: test/csf/Peer.h:54
ripple::test::Consensus_test::journal_
SuiteJournal journal_
Definition: Consensus_test.cpp:32
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::test::Consensus_test::UndoDelay::g
csf::PeerGroup & g
Definition: Consensus_test.cpp:954
ripple::test::Consensus_test::UndoDelay
Definition: Consensus_test.cpp:952
ripple::ConsensusParms
Consensus algorithm parameters.
Definition: ConsensusParms.h:33
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:962
std::unordered_set::insert
T insert(T... args)
ripple::test::Consensus_test::testWrongLCL
void testWrongLCL()
Definition: Consensus_test.cpp:414
ripple::increaseLedgerTimeResolutionEvery
constexpr auto increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
Definition: LedgerTiming.h:49
ripple::shouldCloseLedger
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j)
Determines whether the current ledger should close at this time.
Definition: Consensus.cpp:26
ripple::test::Consensus_test::Disruptor::network
csf::PeerGroup & network
Definition: Consensus_test.cpp:790
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:967
ripple::Peer::id
virtual id_t id() const =0
ripple::tagged_integer< std::uint32_t, PeerIDTag >
ripple::test::csf::SimDuration
typename SimClock::duration SimDuration
Definition: SimTime.h:35
ripple::test::Consensus_test::UndoDelay::UndoDelay
UndoDelay(csf::PeerGroup &a)
Definition: Consensus_test.cpp:956
ripple::test::Consensus_test::Disruptor::groupCsplit
csf::PeerGroup & groupCsplit
Definition: Consensus_test.cpp:792
ripple::test::Consensus_test::testPauseForLaggards
void testPauseForLaggards()
Definition: Consensus_test.cpp:978
ripple::test::Consensus_test::Consensus_test
Consensus_test()
Definition: Consensus_test.cpp:35
ripple::test::Consensus_test
Definition: Consensus_test.cpp:30
ripple::test::Consensus_test::testCloseTimeDisagree
void testCloseTimeDisagree()
Definition: Consensus_test.cpp:348
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:43
ripple::test::jtx::rate
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
std::chrono