Control query depth based on latency:

This changes TMGetLedger protocol in a backward-compatible way to include
a "query depth" parameter - the number of extra levels in the SHAMap tree
that a server should return in the corresponding TMLedgerData. Depending
on the value or absence of the field, a server may adjust the amount of
returned data based on the observed latency of the requestor: higher
latencies will return larger data sets (to compensate for greater
request/response turnaround times).
This commit is contained in:
JoelKatz
2015-04-29 13:09:16 -07:00
committed by Vinnie Falco
parent d44230b745
commit b1881e798b
10 changed files with 142 additions and 80 deletions

View File

@@ -52,6 +52,9 @@ enum
// how many timeouts before we get aggressive // how many timeouts before we get aggressive
,ledgerBecomeAggressiveThreshold = 6 ,ledgerBecomeAggressiveThreshold = 6
// How many nodes to consider a fetch "small"
,fetchSmallNodes = 32
}; };
InboundLedger::InboundLedger (uint256 const& hash, std::uint32_t seq, fcReason reason, InboundLedger::InboundLedger (uint256 const& hash, std::uint32_t seq, fcReason reason,
@@ -505,6 +508,12 @@ void InboundLedger::trigger (Peer::ptr const& peer)
if (mLedger) if (mLedger)
tmGL.set_ledgerseq (mLedger->getLedgerSeq ()); tmGL.set_ledgerseq (mLedger->getLedgerSeq ());
// If the peer has high latency, query extra deep
if (peer && peer->isHighLatency ())
tmGL.set_querydepth (2);
else
tmGL.set_querydepth (1);
// Get the state data first because it's the most likely to be useful // Get the state data first because it's the most likely to be useful
// if we wind up abandoning this fetch. // if we wind up abandoning this fetch.
if (mHaveHeader && !mHaveState && !mFailed) if (mHaveHeader && !mHaveState && !mFailed)
@@ -568,6 +577,12 @@ void InboundLedger::trigger (Peer::ptr const& peer)
{ {
* (tmGL.add_nodeids ()) = id.getRawString (); * (tmGL.add_nodeids ()) = id.getRawString ();
} }
// If we're not querying for a lot of entries,
// query extra deep
if (nodeIDs.size() <= fetchSmallNodes)
tmGL.set_querydepth (tmGL.querydepth() + 1);
if (m_journal.trace) m_journal.trace << if (m_journal.trace) m_journal.trace <<
"Sending AS node " << nodeIDs.size () << "Sending AS node " << nodeIDs.size () <<
" request to " << ( " request to " << (

View File

@@ -124,6 +124,7 @@ void TransactionAcquire::trigger (Peer::ptr const& peer)
protocol::TMGetLedger tmGL; protocol::TMGetLedger tmGL;
tmGL.set_ledgerhash (mHash.begin (), mHash.size ()); tmGL.set_ledgerhash (mHash.begin (), mHash.size ());
tmGL.set_itype (protocol::liTS_CANDIDATE); tmGL.set_itype (protocol::liTS_CANDIDATE);
tmGL.set_querydepth (3); // We probably need the whole thing
if (getTimeouts () != 0) if (getTimeouts () != 0)
tmGL.set_querytype (protocol::qtINDIRECT); tmGL.set_querytype (protocol::qtINDIRECT);

View File

@@ -77,6 +77,10 @@ public:
bool bool
cluster() const = 0; cluster() const = 0;
virtual
bool
isHighLatency() const = 0;
virtual virtual
RippleAddress const& RippleAddress const&
getNodePublic() const = 0; getNodePublic() const = 0;

View File

@@ -41,6 +41,7 @@
#include <beast/weak_fn.h> #include <beast/weak_fn.h>
#include <boost/algorithm/string/predicate.hpp> #include <boost/algorithm/string/predicate.hpp>
#include <boost/asio/io_service.hpp> #include <boost/asio/io_service.hpp>
#include <algorithm>
#include <functional> #include <functional>
#include <beast/cxx14/memory.h> // <memory> #include <beast/cxx14/memory.h> // <memory>
#include <sstream> #include <sstream>
@@ -1833,7 +1834,7 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
protocol::TMGetLedger& packet = *m; protocol::TMGetLedger& packet = *m;
std::shared_ptr<SHAMap> map; std::shared_ptr<SHAMap> map;
protocol::TMLedgerData reply; protocol::TMLedgerData reply;
bool fatLeaves = true, fatRoot = false; bool fatLeaves = true;
if (packet.has_requestcookie ()) if (packet.has_requestcookie ())
reply.set_requestcookie (packet.requestcookie ()); reply.set_requestcookie (packet.requestcookie ());
@@ -1843,13 +1844,14 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
if (packet.itype () == protocol::liTS_CANDIDATE) if (packet.itype () == protocol::liTS_CANDIDATE)
{ {
// Request is for a transaction candidate set // Request is for a transaction candidate set
p_journal_.trace << if (p_journal_.trace) p_journal_.trace <<
"GetLedger: Tx candidate set"; "GetLedger: Tx candidate set";
if ((!packet.has_ledgerhash () || packet.ledgerhash ().size () != 32)) if ((!packet.has_ledgerhash () || packet.ledgerhash ().size () != 32))
{ {
charge (Resource::feeInvalidRequest); charge (Resource::feeInvalidRequest);
p_journal_.warning << "GetLedger: Tx candidate set invalid"; if (p_journal_.warning) p_journal_.warning <<
"GetLedger: Tx candidate set invalid";
return; return;
} }
@@ -1862,14 +1864,14 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
{ {
if (packet.has_querytype () && !packet.has_requestcookie ()) if (packet.has_querytype () && !packet.has_requestcookie ())
{ {
p_journal_.debug << if (p_journal_.debug) p_journal_.debug <<
"GetLedger: Routing Tx set request"; "GetLedger: Routing Tx set request";
auto const v = getPeerWithTree( auto const v = getPeerWithTree(
overlay_, txHash, this); overlay_, txHash, this);
if (! v) if (! v)
{ {
p_journal_.info << if (p_journal_.info) p_journal_.info <<
"GetLedger: Route TX set failed"; "GetLedger: Route TX set failed";
return; return;
} }
@@ -1880,7 +1882,7 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
return; return;
} }
p_journal_.debug << if (p_journal_.debug) p_journal_.debug <<
"GetLedger: Can't provide map "; "GetLedger: Can't provide map ";
charge (Resource::feeInvalidRequest); charge (Resource::feeInvalidRequest);
return; return;
@@ -1890,19 +1892,18 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
reply.set_ledgerhash (txHash.begin (), txHash.size ()); reply.set_ledgerhash (txHash.begin (), txHash.size ());
reply.set_type (protocol::liTS_CANDIDATE); reply.set_type (protocol::liTS_CANDIDATE);
fatLeaves = false; // We'll already have most transactions fatLeaves = false; // We'll already have most transactions
fatRoot = true; // Save a pass
} }
else else
{ {
if (getApp().getFeeTrack().isLoadedLocal() && ! cluster()) if (getApp().getFeeTrack().isLoadedLocal() && ! cluster())
{ {
p_journal_.debug << if (p_journal_.debug) p_journal_.debug <<
"GetLedger: Too busy"; "GetLedger: Too busy";
return; return;
} }
// Figure out what ledger they want // Figure out what ledger they want
p_journal_.trace << if (p_journal_.trace) p_journal_.trace <<
"GetLedger: Received"; "GetLedger: Received";
Ledger::pointer ledger; Ledger::pointer ledger;
@@ -1913,7 +1914,7 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
if (packet.ledgerhash ().size () != 32) if (packet.ledgerhash ().size () != 32)
{ {
charge (Resource::feeInvalidRequest); charge (Resource::feeInvalidRequest);
p_journal_.warning << if (p_journal_.warning) p_journal_.warning <<
"GetLedger: Invalid request"; "GetLedger: Invalid request";
return; return;
} }
@@ -1923,8 +1924,8 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
logMe += to_string (ledgerhash); logMe += to_string (ledgerhash);
ledger = getApp().getLedgerMaster ().getLedgerByHash (ledgerhash); ledger = getApp().getLedgerMaster ().getLedgerByHash (ledgerhash);
if (!ledger && p_journal_.trace) if (!ledger)
p_journal_.trace << if (p_journal_.trace) p_journal_.trace <<
"GetLedger: Don't have " << ledgerhash; "GetLedger: Don't have " << ledgerhash;
if (!ledger && (packet.has_querytype () && if (!ledger && (packet.has_querytype () &&
@@ -1939,7 +1940,7 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
overlay_, ledgerhash, seq, this); overlay_, ledgerhash, seq, this);
if (! v) if (! v)
{ {
p_journal_.trace << if (p_journal_.trace) p_journal_.trace <<
"GetLedger: Cannot route"; "GetLedger: Cannot route";
return; return;
} }
@@ -1947,7 +1948,7 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
packet.set_requestcookie (id ()); packet.set_requestcookie (id ());
v->send (std::make_shared<Message>( v->send (std::make_shared<Message>(
packet, protocol::mtGET_LEDGER)); packet, protocol::mtGET_LEDGER));
p_journal_.debug << if (p_journal_.debug) p_journal_.debug <<
"GetLedger: Request routed"; "GetLedger: Request routed";
return; return;
} }
@@ -1957,14 +1958,14 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
if (packet.ledgerseq() < if (packet.ledgerseq() <
getApp().getLedgerMaster().getEarliestFetch()) getApp().getLedgerMaster().getEarliestFetch())
{ {
p_journal_.debug << if (p_journal_.debug) p_journal_.debug <<
"GetLedger: Early ledger request"; "GetLedger: Early ledger request";
return; return;
} }
ledger = getApp().getLedgerMaster ().getLedgerBySeq ( ledger = getApp().getLedgerMaster ().getLedgerBySeq (
packet.ledgerseq ()); packet.ledgerseq ());
if (!ledger && p_journal_.debug) if (! ledger)
p_journal_.debug << if (p_journal_.debug) p_journal_.debug <<
"GetLedger: Don't have " << packet.ledgerseq (); "GetLedger: Don't have " << packet.ledgerseq ();
} }
else if (packet.has_ltype () && (packet.ltype () == protocol::ltCURRENT)) else if (packet.has_ltype () && (packet.ltype () == protocol::ltCURRENT))
@@ -1982,7 +1983,7 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
else else
{ {
charge (Resource::feeInvalidRequest); charge (Resource::feeInvalidRequest);
p_journal_.warning << if (p_journal_.warning) p_journal_.warning <<
"GetLedger: Unknown request"; "GetLedger: Unknown request";
return; return;
} }
@@ -1992,20 +1993,20 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
{ {
charge (Resource::feeInvalidRequest); charge (Resource::feeInvalidRequest);
if (p_journal_.warning && ledger) if (ledger)
p_journal_.warning << if (p_journal_.warning) p_journal_.warning <<
"GetLedger: Invalid sequence"; "GetLedger: Invalid sequence";
return; return;
} }
if (!packet.has_ledgerseq() && (ledger->getLedgerSeq() < if (!packet.has_ledgerseq() && (ledger->getLedgerSeq() <
getApp().getLedgerMaster().getEarliestFetch())) getApp().getLedgerMaster().getEarliestFetch()))
{ {
p_journal_.debug << if (p_journal_.debug) p_journal_.debug <<
"GetLedger: Early ledger request"; "GetLedger: Early ledger request";
return; return;
} }
// Fill out the reply // Fill out the reply
uint256 lHash = ledger->getHash (); uint256 lHash = ledger->getHash ();
@@ -2016,7 +2017,7 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
if (packet.itype () == protocol::liBASE) if (packet.itype () == protocol::liBASE)
{ {
// they want the ledger base data // they want the ledger base data
p_journal_.trace << if (p_journal_.trace) p_journal_.trace <<
"GetLedger: Base data"; "GetLedger: Base data";
Serializer nData (128); Serializer nData (128);
ledger->addRaw (nData); ledger->addRaw (nData);
@@ -2074,22 +2075,27 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
if (!map || (packet.nodeids_size () == 0)) if (!map || (packet.nodeids_size () == 0))
{ {
p_journal_.warning << if (p_journal_.warning) p_journal_.warning <<
"GetLedger: Can't find map or empty request"; "GetLedger: Can't find map or empty request";
charge (Resource::feeInvalidRequest); charge (Resource::feeInvalidRequest);
return; return;
} }
p_journal_.trace << if (p_journal_.trace) p_journal_.trace <<
"GetLeder: " << logMe; "GetLeder: " << logMe;
auto const depth =
packet.has_querydepth() ?
(std::min(packet.querydepth(), 3u)) :
(isHighLatency() ? 2 : 1);
for (int i = 0; i < packet.nodeids ().size (); ++i) for (int i = 0; i < packet.nodeids ().size (); ++i)
{ {
SHAMapNodeID mn (packet.nodeids (i).data (), packet.nodeids (i).size ()); SHAMapNodeID mn (packet.nodeids (i).data (), packet.nodeids (i).size ());
if (!mn.isValid ()) if (!mn.isValid ())
{ {
p_journal_.warning << if (p_journal_.warning) p_journal_.warning <<
"GetLedger: Invalid node " << logMe; "GetLedger: Invalid node " << logMe;
charge (Resource::feeInvalidRequest); charge (Resource::feeInvalidRequest);
return; return;
@@ -2100,10 +2106,10 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
try try
{ {
if (map->getNodeFat (mn, nodeIDs, rawNodes, fatRoot, fatLeaves)) if (map->getNodeFat (mn, nodeIDs, rawNodes, fatLeaves, depth))
{ {
assert (nodeIDs.size () == rawNodes.size ()); assert (nodeIDs.size () == rawNodes.size ());
p_journal_.trace << if (p_journal_.trace) p_journal_.trace <<
"GetLedger: getNodeFat got " << rawNodes.size () << " nodes"; "GetLedger: getNodeFat got " << rawNodes.size () << " nodes";
std::vector<SHAMapNodeID>::iterator nodeIDIterator; std::vector<SHAMapNodeID>::iterator nodeIDIterator;
std::vector< Blob >::iterator rawNodeIterator; std::vector< Blob >::iterator rawNodeIterator;
@@ -2141,11 +2147,15 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
if (!packet.has_ledgerhash ()) if (!packet.has_ledgerhash ())
info += ", no hash specified"; info += ", no hash specified";
p_journal_.warning << if (p_journal_.warning) p_journal_.warning <<
"getNodeFat( " << mn << ") throws exception: " << info; "getNodeFat( " << mn << ") throws exception: " << info;
} }
} }
if (p_journal_.info) p_journal_.info <<
"Got request for " << packet.nodeids().size() << " nodes at depth " <<
depth << ", return " << reply.nodes().size() << " nodes";
Message::pointer oPacket = std::make_shared<Message> ( Message::pointer oPacket = std::make_shared<Message> (
reply, protocol::mtLEDGER_DATA); reply, protocol::mtLEDGER_DATA);
send (oPacket); send (oPacket);
@@ -2190,4 +2200,11 @@ PeerImp::getScore (bool haveItem)
return score; return score;
} }
bool
PeerImp::isHighLatency() const
{
std::lock_guard<std::mutex> sl (recentLock_);
return latency_.count() >= Tuning::peerHighLatency;
}
} // ripple } // ripple

View File

@@ -144,7 +144,7 @@ private:
std::uint64_t lastPingSeq_ = 0; std::uint64_t lastPingSeq_ = 0;
clock_type::time_point lastPingTime_; clock_type::time_point lastPingTime_;
mutable std::mutex recentLock_; std::mutex mutable recentLock_;
protocol::TMStatusChange last_status_; protocol::TMStatusChange last_status_;
protocol::TMHello hello_; protocol::TMHello hello_;
Resource::Consumer usage_; Resource::Consumer usage_;
@@ -307,6 +307,9 @@ public:
int int
getScore (bool haveItem); getScore (bool haveItem);
bool
isHighLatency() const override;
private: private:
void void
close(); close();

View File

@@ -46,6 +46,10 @@ enum
consider it insane */ consider it insane */
insaneLedgerLimit = 128, insaneLedgerLimit = 128,
/** How many milliseconds to consider high latency
on a peer connection */
peerHighLatency = 120,
/** How often we check connections (seconds) */ /** How often we check connections (seconds) */
checkSeconds = 10, checkSeconds = 10,
}; };

View File

@@ -300,6 +300,7 @@ message TMGetLedger
repeated bytes nodeIDs = 5; repeated bytes nodeIDs = 5;
optional uint64 requestCookie = 6; optional uint64 requestCookie = 6;
optional TMQueryType queryType = 7; optional TMQueryType queryType = 7;
optional uint32 queryDepth = 8; // How deep to go, number of extra levels
} }
enum TMReplyError enum TMReplyError

View File

@@ -146,8 +146,12 @@ public:
// comparison/sync functions // comparison/sync functions
void getMissingNodes (std::vector<SHAMapNodeID>& nodeIDs, std::vector<uint256>& hashes, int max, void getMissingNodes (std::vector<SHAMapNodeID>& nodeIDs, std::vector<uint256>& hashes, int max,
SHAMapSyncFilter * filter); SHAMapSyncFilter * filter);
bool getNodeFat (SHAMapNodeID node, std::vector<SHAMapNodeID>& nodeIDs,
std::vector<Blob >& rawNode, bool fatRoot, bool fatLeaves) const; bool getNodeFat (SHAMapNodeID node,
std::vector<SHAMapNodeID>& nodeIDs,
std::vector<Blob>& rawNode,
bool fatLeaves, std::uint32_t depth) const;
bool getRootNode (Serializer & s, SHANodeFormat format) const; bool getRootNode (Serializer & s, SHANodeFormat format) const;
std::vector<uint256> getNeededHashes (int max, SHAMapSyncFilter * filter); std::vector<uint256> getNeededHashes (int max, SHAMapSyncFilter * filter);
SHAMapAddNode addRootNode (uint256 const& hash, Blob const& rootNode, SHANodeFormat format, SHAMapAddNode addRootNode (uint256 const& hash, Blob const& rootNode, SHANodeFormat format,

View File

@@ -302,18 +302,24 @@ std::vector<uint256> SHAMap::getNeededHashes (int max, SHAMapSyncFilter* filter)
return nodeHashes; return nodeHashes;
} }
bool SHAMap::getNodeFat (SHAMapNodeID wanted, std::vector<SHAMapNodeID>& nodeIDs, bool SHAMap::getNodeFat (SHAMapNodeID wanted,
std::vector<Blob >& rawNodes, bool fatRoot, bool fatLeaves) const std::vector<SHAMapNodeID>& nodeIDs,
std::vector<Blob>& rawNodes, bool fatLeaves,
std::uint32_t depth) const
{ {
// Gets a node and some of its children // Gets a node and some of its children
// to a specified depth
SHAMapTreeNode* node = root_.get (); SHAMapTreeNode* node = root_.get ();
SHAMapNodeID nodeID; SHAMapNodeID nodeID;
while (node && node->isInner () && (nodeID.getDepth() < wanted.getDepth())) while (node && node->isInner () && (nodeID.getDepth() < wanted.getDepth()))
{ {
int branch = nodeID.selectBranch (wanted.getNodeID()); int branch = nodeID.selectBranch (wanted.getNodeID());
if (node->isEmptyBranch (branch))
return false;
node = descendThrow (node, branch); node = descendThrow (node, branch);
nodeID = nodeID.getChildNodeID (branch); nodeID = nodeID.getChildNodeID (branch);
} }
@@ -322,7 +328,7 @@ bool SHAMap::getNodeFat (SHAMapNodeID wanted, std::vector<SHAMapNodeID>& nodeIDs
{ {
if (journal_.warning) journal_.warning << if (journal_.warning) journal_.warning <<
"peer requested node that is not in the map: " << wanted; "peer requested node that is not in the map: " << wanted;
throw std::runtime_error ("Peer requested node not in map"); return false;
} }
if (node->isInner () && node->isEmpty ()) if (node->isInner () && node->isEmpty ())
@@ -332,51 +338,56 @@ bool SHAMap::getNodeFat (SHAMapNodeID wanted, std::vector<SHAMapNodeID>& nodeIDs
return false; return false;
} }
int count; std::stack<std::tuple <SHAMapTreeNode*, SHAMapNodeID, int>> stack;
bool skipNode = false; stack.emplace (node, nodeID, depth);
do
while (! stack.empty ())
{ {
std::tie (node, nodeID, depth) = stack.top ();
stack.pop ();
if (skipNode) // Add this node to the reply
skipNode = false; Serializer s;
else node->addRaw (s, snfWIRE);
nodeIDs.push_back (nodeID);
rawNodes.push_back (std::move (s.peekData()));
if (node->isInner())
{ {
Serializer s; // We descend inner nodes with only a single child
node->addRaw (s, snfWIRE); // without decrementing the depth
nodeIDs.push_back (wanted); int bc = node->getBranchCount();
rawNodes.push_back (std::move (s.peekData ())); if ((depth > 0) || (bc == 1))
}
if ((!fatRoot && wanted.isRoot ()) || node->isLeaf ()) // don't get a fat root_, can't get a fat leaf
return true;
SHAMapTreeNode* nextNode = nullptr;
SHAMapNodeID nextNodeID;
count = 0;
for (int i = 0; i < 16; ++i)
{
if (!node->isEmptyBranch (i))
{ {
SHAMapNodeID nextNodeID = wanted.getChildNodeID (i); // We need to process this node's children
nextNode = descendThrow (node, i); for (int i = 0; i < 16; ++i)
++count;
if (fatLeaves || nextNode->isInner ())
{ {
Serializer s; if (! node->isEmptyBranch (i))
nextNode->addRaw (s, snfWIRE); {
nodeIDs.push_back (nextNodeID); SHAMapNodeID childID = nodeID.getChildNodeID (i);
rawNodes.push_back (std::move (s.peekData ())); SHAMapTreeNode* childNode = descendThrow (node, i);
skipNode = true; // Don't add this node again if we loop
if (childNode->isInner () &&
((depth > 1) || (bc == 1)))
{
// If there's more than one child, reduce the depth
// If only one child, follow the chain
stack.emplace (childNode, childID,
(bc > 1) ? (depth - 1) : depth);
}
else if (childNode->isInner() || fatLeaves)
{
// Just include this node
Serializer s;
childNode->addRaw (s, snfWIRE);
nodeIDs.push_back (childID);
rawNodes.push_back (std::move (s.peekData ()));
}
}
} }
} }
} }
}
node = nextNode;
wanted = nextNodeID;
// So long as there's exactly one inner node, we take it
} while ((count == 1) && node->isInner());
return true; return true;
} }

View File

@@ -124,7 +124,8 @@ public:
destination.setSynching (); destination.setSynching ();
unexpected (!source.getNodeFat (SHAMapNodeID (), nodeIDs, gotNodes, (rand () % 2) == 0, (rand () % 2) == 0), unexpected (!source.getNodeFat (SHAMapNodeID (), nodeIDs, gotNodes,
(rand () % 2) == 0, rand () % 3),
"GetNodeFat"); "GetNodeFat");
unexpected (gotNodes.size () < 1, "NodeSize"); unexpected (gotNodes.size () < 1, "NodeSize");
@@ -152,7 +153,8 @@ public:
// get as many nodes as possible based on this information // get as many nodes as possible based on this information
for (nodeIDIterator = nodeIDs.begin (); nodeIDIterator != nodeIDs.end (); ++nodeIDIterator) for (nodeIDIterator = nodeIDs.begin (); nodeIDIterator != nodeIDs.end (); ++nodeIDIterator)
{ {
if (!source.getNodeFat (*nodeIDIterator, gotNodeIDs, gotNodes, (rand () % 2) == 0, (rand () % 2) == 0)) if (!source.getNodeFat (*nodeIDIterator, gotNodeIDs, gotNodes,
(rand () % 2) == 0, rand () % 3))
{ {
fail ("GetNodeFat"); fail ("GetNodeFat");
} }