Redesign CSF framework (RIPD-1361):

- Separate `Scheduler` from `BasicNetwork`.
- Add an event/collector framework for monitoring invariants and calculating statistics.
- Allow distinct network and trust connections between Peers.
- Add a simple routing strategy to support broadcasting arbitrary messages.
- Add a common directed graph (`Digraph`) class for representing network and trust topologies.
- Add a `PeerGroup` class for simpler specification of the trust and network topologies.
- Add a `LedgerOracle` class to ensure distinct ledger histories and simplify branch checking.
- Add a `Submitter` to send transactions in at fixed or random intervals to fixed or random peers.

Co-authored-by: Joseph McGee
This commit is contained in:
Brad Chase
2017-06-14 11:59:06 -04:00
committed by seelabs
parent b9fc9f6334
commit 2c13d9eb57
51 changed files with 6642 additions and 2473 deletions

View File

@@ -21,6 +21,7 @@
#include <ripple/beast/unit_test.h>
#include <set>
#include <test/csf/BasicNetwork.h>
#include <test/csf/Scheduler.h>
#include <vector>
namespace ripple {
@@ -43,20 +44,20 @@ public:
template <class Net>
void
start(Net& net)
start(csf::Scheduler& scheduler, Net& net)
{
using namespace std::chrono_literals;
auto t = net.timer(1s, [&] { set.insert(0); });
auto t = scheduler.in(1s, [&] { set.insert(0); });
if (id == 0)
{
for (auto const& link : net.links(this))
net.send(this, link.to, [&, to = link.to ] {
net.send(this, link.target, [&, to = link.target ] {
to->receive(net, this, 1);
});
}
else
{
net.cancel(t);
scheduler.cancel(t);
}
}
@@ -69,7 +70,7 @@ public:
if (m < 5)
{
for (auto const& link : net.links(this))
net.send(this, link.to, [&, mm = m, to = link.to ] {
net.send(this, link.target, [&, mm = m, to = link.target ] {
to->receive(net, this, mm);
});
}
@@ -77,29 +78,26 @@ public:
};
void
run() override
testNetwork()
{
using namespace std::chrono_literals;
std::vector<Peer> pv;
pv.emplace_back(0);
pv.emplace_back(1);
pv.emplace_back(2);
csf::BasicNetwork<Peer*> net;
csf::Scheduler scheduler;
csf::BasicNetwork<Peer*> net(scheduler);
BEAST_EXPECT(!net.connect(&pv[0], &pv[0]));
BEAST_EXPECT(net.connect(&pv[0], &pv[1], 1s));
BEAST_EXPECT(net.connect(&pv[1], &pv[2], 1s));
BEAST_EXPECT(!net.connect(&pv[0], &pv[1]));
std::size_t diameter = 0;
net.bfs(
&pv[0], [&](auto d, Peer*) { diameter = std::max(d, diameter); });
BEAST_EXPECT(diameter == 2);
for (auto& peer : pv)
peer.start(net);
BEAST_EXPECT(net.step_for(0s));
BEAST_EXPECT(net.step_for(1s));
BEAST_EXPECT(net.step());
BEAST_EXPECT(!net.step());
BEAST_EXPECT(!net.step_for(1s));
peer.start(scheduler, net);
BEAST_EXPECT(scheduler.step_for(0s));
BEAST_EXPECT(scheduler.step_for(1s));
BEAST_EXPECT(scheduler.step());
BEAST_EXPECT(!scheduler.step());
BEAST_EXPECT(!scheduler.step_for(1s));
net.send(&pv[0], &pv[1], [] {});
net.send(&pv[1], &pv[0], [] {});
BEAST_EXPECT(net.disconnect(&pv[0], &pv[1]));
@@ -109,16 +107,46 @@ public:
auto const links = net.links(&pv[1]);
if (links.empty())
break;
BEAST_EXPECT(links[0].disconnect());
BEAST_EXPECT(net.disconnect(&pv[1], links[0].target));
}
BEAST_EXPECT(pv[0].set == std::set<int>({0, 2, 4}));
BEAST_EXPECT(pv[1].set == std::set<int>({1, 3}));
BEAST_EXPECT(pv[2].set == std::set<int>({2, 4}));
net.timer(0s, [] {});
}
void
testDisconnect()
{
using namespace std::chrono_literals;
csf::Scheduler scheduler;
csf::BasicNetwork<int> net(scheduler);
BEAST_EXPECT(net.connect(0, 1, 1s));
BEAST_EXPECT(net.connect(0, 2, 2s));
std::set<int> delivered;
net.send(0, 1, [&]() { delivered.insert(1); });
net.send(0, 2, [&]() { delivered.insert(2); });
scheduler.in(1000ms, [&]() { BEAST_EXPECT(net.disconnect(0, 2)); });
scheduler.in(1100ms, [&]() { BEAST_EXPECT(net.connect(0, 2)); });
scheduler.step();
// only the first message is delivered because the disconnect at 1 s
// purges all pending messages from 0 to 2
BEAST_EXPECT(delivered == std::set<int>({1}));
}
void
run() override
{
testNetwork();
testDisconnect();
}
};
BEAST_DEFINE_TESTSUITE(BasicNetwork, test, ripple);
} // test
} // ripple
} // namespace test
} // namespace ripple