Some fetch pack optimizations.

This commit is contained in:
JoelKatz
2013-06-11 00:01:43 -07:00
parent b757d9fd72
commit a31e291ecc
3 changed files with 39 additions and 30 deletions

View File

@@ -1988,6 +1988,15 @@ void NetworkOPs::getBookPage(Ledger::pointer lpLedger, const uint160& uTakerPays
// jvResult["nodes"] = Json::Value(Json::arrayValue);
}
static void fpAppender(ripple::TMGetObjectByHash* reply, uint32 ledgerSeq,
const uint256& hash, const Blob& blob)
{
ripple::TMIndexedObject& newObj = *(reply->add_objects());
newObj.set_ledgerseq(ledgerSeq);
newObj.set_hash(hash.begin(), 256 / 8);
newObj.set_data(&blob[0], blob.size());
}
void NetworkOPs::makeFetchPack(Job&, boost::weak_ptr<Peer> wPeer,
boost::shared_ptr<ripple::TMGetObjectByHash> request,
Ledger::pointer wantLedger, Ledger::pointer haveLedger, uint32 uUptime)
@@ -2028,27 +2037,13 @@ void NetworkOPs::makeFetchPack(Job&, boost::weak_ptr<Peer> wPeer,
newObj.set_data(s.getDataPtr(), s.getLength());
newObj.set_ledgerseq(lSeq);
std::list<SHAMap::fetchPackEntry_t> pack = wantLedger->peekAccountStateMap()->getFetchPack(
haveLedger->peekAccountStateMap().get(), true, 1024);
BOOST_FOREACH(SHAMap::fetchPackEntry_t& node, pack)
{
ripple::TMIndexedObject& newObj = *reply.add_objects();
newObj.set_hash(node.first.begin(), 256 / 8);
newObj.set_data(&node.second[0], node.second.size());
newObj.set_ledgerseq(lSeq);
}
wantLedger->peekAccountStateMap()->getFetchPack(haveLedger->peekAccountStateMap().get(), true, 1024,
BIND_TYPE(fpAppender, &reply, lSeq, P_1, P_2));
if (wantLedger->getAccountHash().isNonZero())
wantLedger->peekTransactionMap()->getFetchPack(NULL, true, 256,
BIND_TYPE(fpAppender, &reply, lSeq, P_1, P_2));
if (wantLedger->getAccountHash().isNonZero() && (pack.size() < 512))
{
pack = wantLedger->peekTransactionMap()->getFetchPack(NULL, true, 256);
BOOST_FOREACH(SHAMap::fetchPackEntry_t& node, pack)
{
ripple::TMIndexedObject& newObj = *reply.add_objects();
newObj.set_hash(node.first.begin(), 256 / 8);
newObj.set_data(&node.second[0], node.second.size());
newObj.set_ledgerseq(lSeq);
}
}
if (reply.objects().size() >= 256)
break;
haveLedger = wantLedger;

View File

@@ -124,6 +124,7 @@ public:
typedef std::pair <uint256, Blob> fetchPackEntry_t;
std::list<fetchPackEntry_t> getFetchPack(SHAMap* have, bool includeLeaves, int max);
void getFetchPack(SHAMap* have, bool includeLeaves, int max, FUNCTION_TYPE<void (const uint256&, const Blob&)>);
static int getFullBelowSize() { return fullBelowCache.getSize(); }
static void sweep() { fullBelowCache.sweep(); }

View File

@@ -419,15 +419,29 @@ bool SHAMap::hasLeafNode(uint256 const& tag, uint256 const& nodeHash)
int branch = node->selectBranch(tag);
if (node->isEmptyBranch(branch))
return false;
node = getNodePointer(node->getChildNodeID(branch), node->getChildHash(branch));
const uint256& nextHash = node->getChildHash(branch);
if (nextHash == nodeHash)
return true;
node = getNodePointer(node->getChildNodeID(branch), nextHash);
}
return node->getNodeHash() == nodeHash;
return false;
}
static void addFPtoList(std::list<SHAMap::fetchPackEntry_t>& list, const uint256& hash, const Blob& blob)
{
list.push_back(SHAMap::fetchPackEntry_t(hash, blob));
}
std::list<SHAMap::fetchPackEntry_t> SHAMap::getFetchPack(SHAMap* have, bool includeLeaves, int max)
{
std::list<fetchPackEntry_t> ret;
getFetchPack(have, includeLeaves, max, BIND_TYPE(addFPtoList, boost::ref(ret), P_1, P_2));
return ret;
}
void SHAMap::getFetchPack(SHAMap* have, bool includeLeaves, int max,
FUNCTION_TYPE<void (const uint256&, const Blob&)> func)
{
boost::recursive_mutex::scoped_lock ul1(mLock);
boost::shared_ptr< boost::unique_lock<boost::recursive_mutex> > ul2;
@@ -439,7 +453,7 @@ std::list<SHAMap::fetchPackEntry_t> SHAMap::getFetchPack(SHAMap* have, bool incl
if (!(*ul2))
{
WriteLog (lsINFO, SHAMap) << "Unable to create pack due to lock";
return ret;
return;
}
}
@@ -451,16 +465,16 @@ std::list<SHAMap::fetchPackEntry_t> SHAMap::getFetchPack(SHAMap* have, bool incl
{
Serializer s;
root->addRaw(s, snfPREFIX);
ret.push_back(fetchPackEntry_t(root->getNodeHash(), s.peekData()));
func(root->getNodeHash(), s.peekData());
}
return ret;
return;
}
if (root->getNodeHash().isZero())
return ret;
return;
if (have && (root->getNodeHash() == have->root->getNodeHash()))
return ret;
return;
std::stack<SHAMapTreeNode*> stack; // contains unexplored non-matching inner node entries
stack.push(root.get());
@@ -473,7 +487,7 @@ std::list<SHAMap::fetchPackEntry_t> SHAMap::getFetchPack(SHAMap* have, bool incl
// 1) Add this node to the pack
Serializer s;
node->addRaw(s, snfPREFIX);
ret.push_back(fetchPackEntry_t(node->getNodeHash(), s.peekData()));
func(node->getNodeHash(), s.peekData());
--max;
// 2) push non-matching child inner nodes
@@ -494,7 +508,7 @@ std::list<SHAMap::fetchPackEntry_t> SHAMap::getFetchPack(SHAMap* have, bool incl
{
Serializer s;
node->addRaw(s, snfPREFIX);
ret.push_back(fetchPackEntry_t(node->getNodeHash(), s.peekData()));
func(node->getNodeHash(), s.peekData());
--max;
}
}
@@ -503,7 +517,6 @@ std::list<SHAMap::fetchPackEntry_t> SHAMap::getFetchPack(SHAMap* have, bool incl
if (max <= 0)
break;
}
return ret;
}
#ifdef DEBUG