Merge branch 'develop' of github.com:jedmccaleb/NewCoin into develop

This commit is contained in:
Arthur Britto
2013-04-26 11:14:32 -07:00
15 changed files with 128 additions and 137 deletions

View File

@@ -1,24 +1,16 @@
To use LevelDB, follow these steps: To use LevelDB, follow these steps:
1) Obtain the latest LevelDB package from http://code.google.com/p/leveldb/ 1) At the top of the directory tree, type:
1.9.0 is known to work. Build it. make -C src/cpp/leveldb libleveldb.a
2) In the SConstruct file, change "LevelDB = bool(0)" to 2) In the SConstruct file, change
"LevelDB = bool(1)". LevelDB = bool(0)
to
LevelDB = bool(1)
3) In your main directory, create a 'leveldb' directory with 'include' and 3) Compile with 'scons'.
'lib' subdirectories.
4) In the 'leveldb/lib' file, place the 'libleveldb.a' file you built.
5) Make a 'levedb/include/leveldb' directory. Place the leveldb header files
(leveldb.h, slice.h, and so on) in this directory.
6) Compile with 'scons'.
There is no import or export method, so your server will have to refetch There is no import or export method, so your server will have to refetch
all nodes. For simplicity, we recommend clearing your entire 'db' directory. all nodes. For simplicity, we recommend clearing your entire 'db' directory.
LevelDB will store the hash nodes in a 'db/hashnode' directory. LevelDB will store the hash nodes in a 'db/hashnode' directory.

View File

@@ -7,11 +7,12 @@ import platform
import commands import commands
import re import re
LevelDB = bool(0)
OSX = bool(platform.mac_ver()[0]) OSX = bool(platform.mac_ver()[0])
FreeBSD = bool('FreeBSD' == platform.system()) FreeBSD = bool('FreeBSD' == platform.system())
Linux = bool('Linux' == platform.system()) Linux = bool('Linux' == platform.system())
Ubuntu = bool(Linux and 'Ubuntu' == platform.linux_distribution()[0]) Ubuntu = bool(Linux and 'Ubuntu' == platform.linux_distribution()[0])
LevelDB = bool(0)
if OSX or Ubuntu: if OSX or Ubuntu:
CTAGS = '/usr/bin/ctags' CTAGS = '/usr/bin/ctags'
@@ -115,8 +116,8 @@ if OSX:
env.Append(CXXFLAGS = ['-I/usr/local/opt/openssl/include']) env.Append(CXXFLAGS = ['-I/usr/local/opt/openssl/include'])
if LevelDB: if LevelDB:
env.Append(CXXFLAGS = [ '-Ileveldb/include', '-DUSE_LEVELDB']) env.Append(CXXFLAGS = [ '-Isrc/cpp/leveldb/include', '-DUSE_LEVELDB'])
env.Append(LINKFLAGS = [ '-Lleveldb/lib' ]) env.Append(LINKFLAGS = [ '-Lsrc/cpp/leveldb' ])
env.Append(LIBS = [ '-lleveldb']) env.Append(LIBS = [ '-lleveldb'])
DB_SRCS = glob.glob('src/cpp/database/*.c') + glob.glob('src/cpp/database/*.cpp') DB_SRCS = glob.glob('src/cpp/database/*.c') + glob.glob('src/cpp/database/*.cpp')

View File

@@ -1,5 +1,10 @@
#include "Application.h" #include "Application.h"
#ifdef USE_LEVELDB
#include "leveldb/cache.h"
#endif
#include "AcceptedLedger.h" #include "AcceptedLedger.h"
#include "Config.h" #include "Config.h"
#include "PeerDoor.h" #include "PeerDoor.h"
@@ -79,6 +84,11 @@ void Application::stop()
mAuxService.stop(); mAuxService.stop();
mJobQueue.shutdown(); mJobQueue.shutdown();
#ifdef HAVE_LEVELDB
delete mHashNodeDB:
mHashNodeDB = NULL;
#endif
cLog(lsINFO) << "Stopped: " << mIOService.stopped(); cLog(lsINFO) << "Stopped: " << mIOService.stopped();
Instance::shutdown(); Instance::shutdown();
} }
@@ -157,6 +167,7 @@ void Application::setup()
#ifdef USE_LEVELDB #ifdef USE_LEVELDB
leveldb::Options options; leveldb::Options options;
options.create_if_missing = true; options.create_if_missing = true;
options.block_cache = leveldb::NewLRUCache(theConfig.getSize(siHashNodeDBCache) * 1024 * 1024);
leveldb::Status status = leveldb::DB::Open(options, (theConfig.DATA_DIR / "hashnode").string(), &mHashNodeDB); leveldb::Status status = leveldb::DB::Open(options, (theConfig.DATA_DIR / "hashnode").string(), &mHashNodeDB);
if (!status.ok() || !mHashNodeDB) if (!status.ok() || !mHashNodeDB)
{ {

View File

@@ -37,7 +37,7 @@ bool HashedObjectStore::store(HashedObjectType type, uint32 index,
{ // return: false = already in cache, true = added to cache { // return: false = already in cache, true = added to cache
if (!theApp->getHashNodeDB()) if (!theApp->getHashNodeDB())
{ {
cLog(lsTRACE) << "HOS: no db"; cLog(lsWARNING) << "HOS: no db";
return true; return true;
} }
if (mCache.touch(hash)) if (mCache.touch(hash))
@@ -50,7 +50,7 @@ bool HashedObjectStore::store(HashedObjectType type, uint32 index,
HashedObject::pointer object = boost::make_shared<HashedObject>(type, index, data, hash); HashedObject::pointer object = boost::make_shared<HashedObject>(type, index, data, hash);
if (!mCache.canonicalize(hash, object)) if (!mCache.canonicalize(hash, object))
{ {
Serializer s(1 + (32 / 8) + (32 / 8) + data.size()); Serializer s(9 + data.size());
s.add8(static_cast<unsigned char>(type)); s.add8(static_cast<unsigned char>(type));
s.add32(index); s.add32(index);
s.add32(index); s.add32(index);
@@ -63,6 +63,8 @@ bool HashedObjectStore::store(HashedObjectType type, uint32 index,
assert(false); assert(false);
} }
} }
else
cLog(lsDEBUG) << "HOS: store race";
return true; return true;
} }
@@ -77,12 +79,18 @@ HashedObject::pointer HashedObjectStore::retrieve(const uint256& hash)
return obj; return obj;
if (!theApp || !theApp->getHashNodeDB()) if (!theApp || !theApp->getHashNodeDB())
{
cLog(lsWARNING) << "HOS: no db";
return obj; return obj;
}
std::string sData; std::string sData;
leveldb::Status st = theApp->getHashNodeDB()->Get(leveldb::ReadOptions(), hash.GetHex(), &sData); leveldb::Status st = theApp->getHashNodeDB()->Get(leveldb::ReadOptions(), hash.GetHex(), &sData);
if (!st.ok()) if (!st.ok())
{
assert(st.IsNotFound());
return obj; return obj;
}
Serializer s(sData); Serializer s(sData);
@@ -132,6 +140,7 @@ bool HashedObjectStore::store(HashedObjectType type, uint32 index,
} }
// else // else
// cLog(lsTRACE) << "HOS: already had " << hash; // cLog(lsTRACE) << "HOS: already had " << hash;
mNegativeCache.del(hash);
return true; return true;
} }

View File

@@ -960,7 +960,10 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
cLog(lsWARNING) << "Included TXbase invalid"; cLog(lsWARNING) << "Included TXbase invalid";
} }
if (!san.isInvalid()) if (!san.isInvalid())
{
ledger->progress();
ledger->trigger(peer); ledger->trigger(peer);
}
else else
cLog(lsDEBUG) << "Peer sends invalid base data"; cLog(lsDEBUG) << "Peer sends invalid base data";
return; return;
@@ -996,7 +999,10 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
else else
ledger->takeAsNode(nodeIDs, nodeData, ret); ledger->takeAsNode(nodeIDs, nodeData, ret);
if (!ret.isInvalid()) if (!ret.isInvalid())
ledger->trigger(peer); {
ledger->progress();
ledger->trigger(peer);
}
else else
cLog(lsDEBUG) << "Peer sends invalid node data"; cLog(lsDEBUG) << "Peer sends invalid node data";
return; return;

View File

@@ -267,6 +267,7 @@ void LedgerConsensus::checkLCL()
void LedgerConsensus::handleLCL(const uint256& lclHash) void LedgerConsensus::handleLCL(const uint256& lclHash)
{ {
assert((lclHash != mPrevLedgerHash) || (mPreviousLedger->getHash() != lclHash));
if (mPrevLedgerHash != lclHash) if (mPrevLedgerHash != lclHash)
{ // first time switching to this ledger { // first time switching to this ledger
mPrevLedgerHash = lclHash; mPrevLedgerHash = lclHash;
@@ -286,30 +287,32 @@ void LedgerConsensus::handleLCL(const uint256& lclHash)
playbackProposals(); playbackProposals();
} }
if (mPreviousLedger->getHash() != mPrevLedgerHash) if (mPreviousLedger->getHash() == mPrevLedgerHash)
{ // we need to switch the ledger we're working from return;
Ledger::pointer newLCL = theApp->getLedgerMaster().getLedgerByHash(lclHash);
if (newLCL)
{
mPreviousLedger = newLCL;
mPrevLedgerHash = newLCL->getHash();
}
else if (!mAcquiringLedger || (mAcquiringLedger->getHash() != mPrevLedgerHash))
{ // need to start acquiring the correct consensus LCL
cLog(lsWARNING) << "Need consensus ledger " << mPrevLedgerHash;
mAcquiringLedger = theApp->getMasterLedgerAcquire().findCreate(mPrevLedgerHash, 0); // we need to switch the ledger we're working from
mHaveCorrectLCL = false; Ledger::pointer newLCL = theApp->getLedgerMaster().getLedgerByHash(lclHash);
return; if (newLCL)
} {
assert(newLCL->isClosed());
assert(newLCL->isImmutable());
assert(newLCL->getHash() == lclHash);
mPreviousLedger = newLCL;
mPrevLedgerHash = lclHash;
}
else if (!mAcquiringLedger || (mAcquiringLedger->getHash() != mPrevLedgerHash))
{ // need to start acquiring the correct consensus LCL
cLog(lsWARNING) << "Need consensus ledger " << mPrevLedgerHash;
if (mAcquiringLedger)
theApp->getMasterLedgerAcquire().dropLedger(mAcquiringLedger->getHash());
mAcquiringLedger = theApp->getMasterLedgerAcquire().findCreate(mPrevLedgerHash, 0);
mHaveCorrectLCL = false;
return;
} }
cLog(lsINFO) << "Have the consensus ledger " << mPrevLedgerHash; cLog(lsINFO) << "Have the consensus ledger " << mPrevLedgerHash;
mHaveCorrectLCL = true; mHaveCorrectLCL = true;
#if 0 // FIXME: can trigger early
if (mAcquiringLedger && mAcquiringLedger->isComplete())
theApp->getOPs().clearNeedNetworkLedger();
#endif
mCloseResolution = ContinuousLedgerTiming::getNextLedgerTimeResolution( mCloseResolution = ContinuousLedgerTiming::getNextLedgerTimeResolution(
mPreviousLedger->getCloseResolution(), mPreviousLedger->getCloseAgree(), mPreviousLedger->getCloseResolution(), mPreviousLedger->getCloseAgree(),
mPreviousLedger->getLedgerSeq() + 1); mPreviousLedger->getLedgerSeq() + 1);

View File

@@ -284,7 +284,7 @@ bool LedgerMaster::acquireMissingLedger(Ledger::ref origLedger, const uint256& l
} }
} }
if (theApp->getOPs().shouldFetchPack() && (ledgerSeq > 40000)) if (theApp->getOPs().shouldFetchPack(ledgerSeq) && (ledgerSeq > 40000))
{ // refill our fetch pack { // refill our fetch pack
Ledger::pointer nextLedger = mLedgerHistory.getLedgerBySeq(ledgerSeq + 1); Ledger::pointer nextLedger = mLedgerHistory.getLedgerBySeq(ledgerSeq + 1);
if (nextLedger) if (nextLedger)

View File

@@ -36,7 +36,7 @@ NetworkOPs::NetworkOPs(boost::asio::io_service& io_service, LedgerMaster* pLedge
mMode(omDISCONNECTED), mNeedNetworkLedger(false), mProposing(false), mValidating(false), mMode(omDISCONNECTED), mNeedNetworkLedger(false), mProposing(false), mValidating(false),
mNetTimer(io_service), mLedgerMaster(pLedgerMaster), mCloseTimeOffset(0), mLastCloseProposers(0), mNetTimer(io_service), mLedgerMaster(pLedgerMaster), mCloseTimeOffset(0), mLastCloseProposers(0),
mLastCloseConvergeTime(1000 * LEDGER_IDLE_INTERVAL), mLastValidationTime(0), mLastCloseConvergeTime(1000 * LEDGER_IDLE_INTERVAL), mLastValidationTime(0),
mFetchPack("FetchPack", 2048, 30), mLastFetchPack(0), mFetchPack("FetchPack", 2048, 20), mLastFetchPack(0), mFetchSeq(static_cast<uint32>(-1)),
mLastLoadBase(256), mLastLoadFactor(256) mLastLoadBase(256), mLastLoadFactor(256)
{ {
} }
@@ -2091,13 +2091,19 @@ bool NetworkOPs::getFetchPack(const uint256& hash, std::vector<unsigned char>& d
return true; return true;
} }
bool NetworkOPs::shouldFetchPack() bool NetworkOPs::shouldFetchPack(uint32 seq)
{ {
uint32 now = getNetworkTimeNC(); uint32 now = getNetworkTimeNC();
if ((mLastFetchPack == now) || ((mLastFetchPack + 1) == now)) if ((mLastFetchPack == now) || ((mLastFetchPack + 1) == now))
return false; return false;
mFetchPack.sweep(); if (seq < mFetchSeq) // fetch pack has only data for ledgers ahead of where we are
if (mFetchPack.getCacheSize() > 384) mFetchPack.clear();
else
mFetchPack.sweep();
int size = mFetchPack.getCacheSize();
if (size == 0)
mFetchSeq = static_cast<uint32>(-1);
else if (mFetchPack.getCacheSize() > 64)
return false; return false;
mLastFetchPack = now; mLastFetchPack = now;
return true; return true;
@@ -2108,9 +2114,10 @@ int NetworkOPs::getFetchSize()
return mFetchPack.getCacheSize(); return mFetchPack.getCacheSize();
} }
void NetworkOPs::gotFetchPack(bool progress) void NetworkOPs::gotFetchPack(bool progress, uint32 seq)
{ {
mLastFetchPack = 0; mLastFetchPack = 0;
mFetchSeq = seq; // earliest pack we have data on
theApp->getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", theApp->getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack",
boost::bind(&LedgerAcquireMaster::gotFetchPack, &theApp->getMasterLedgerAcquire(), _1)); boost::bind(&LedgerAcquireMaster::gotFetchPack, &theApp->getMasterLedgerAcquire(), _1));
} }

View File

@@ -130,6 +130,7 @@ protected:
TaggedCache< uint256, std::vector<unsigned char> > mFetchPack; TaggedCache< uint256, std::vector<unsigned char> > mFetchPack;
uint32 mLastFetchPack; uint32 mLastFetchPack;
uint32 mFetchSeq;
uint32 mLastLoadBase; uint32 mLastLoadBase;
uint32 mLastLoadFactor; uint32 mLastLoadFactor;
@@ -262,8 +263,8 @@ public:
bool stillNeedTXSet(const uint256& hash); bool stillNeedTXSet(const uint256& hash);
void makeFetchPack(Job&, boost::weak_ptr<Peer> peer, boost::shared_ptr<ripple::TMGetObjectByHash> request, void makeFetchPack(Job&, boost::weak_ptr<Peer> peer, boost::shared_ptr<ripple::TMGetObjectByHash> request,
Ledger::pointer wantLedger, Ledger::pointer haveLedger); Ledger::pointer wantLedger, Ledger::pointer haveLedger);
bool shouldFetchPack(); bool shouldFetchPack(uint32 seq);
void gotFetchPack(bool progress); void gotFetchPack(bool progress, uint32 seq);
void addFetchPack(const uint256& hash, boost::shared_ptr< std::vector<unsigned char> >& data); void addFetchPack(const uint256& hash, boost::shared_ptr< std::vector<unsigned char> >& data);
bool getFetchPack(const uint256& hash, std::vector<unsigned char>& data); bool getFetchPack(const uint256& hash, std::vector<unsigned char>& data);
int getFetchSize(); int getFetchSize();

View File

@@ -1,6 +1,8 @@
#ifndef PATHDB__H #ifndef PATHDB__H
#define PATHBD__H #define PATHBD__H
#include <set>
#include "uint256.h" #include "uint256.h"
#include "TaggedCache.h" #include "TaggedCache.h"
@@ -45,7 +47,7 @@ protected:
boost::recursive_mutex mLock; boost::recursive_mutex mLock;
TaggedCache<currencyIssuer_t, PathDBEntry> mFromCache; TaggedCache<currencyIssuer_t, PathDBEntry> mFromCache;
TaggedCache<currencyIssuer_t, PathDBEntry> mToCache; TaggedCache<currencyIssuer_t, PathDBEntry> mToCache;
std::set<PathDBEntry::pointer> mDirtyPaths; // std::set<PathDBEntry::pointer> mDirtyPaths;
public: public:

View File

@@ -1274,7 +1274,7 @@ void Peer::recvGetObjectByHash(const boost::shared_ptr<ripple::TMGetObjectByHash
} }
tLog(pLDo && (pLSeq != 0), lsDEBUG) << "Received partial fetch pack for " << pLSeq; tLog(pLDo && (pLSeq != 0), lsDEBUG) << "Received partial fetch pack for " << pLSeq;
if (packet.type() == ripple::TMGetObjectByHash::otFETCH_PACK) if (packet.type() == ripple::TMGetObjectByHash::otFETCH_PACK)
theApp->getOPs().gotFetchPack(progress); theApp->getOPs().gotFetchPack(progress, pLSeq);
} }
} }

View File

@@ -236,6 +236,31 @@ SHAMapTreeNode* SHAMap::getNodePointer(const SHAMapNode& id, const uint256& hash
return fetchNodeExternal(id, hash).get(); return fetchNodeExternal(id, hash).get();
} }
SHAMapTreeNode* SHAMap::getNodePointer(const SHAMapNode& id, const uint256& hash, SHAMapSyncFilter* filter)
{
try
{
return getNodePointer(id, hash);
}
catch (SHAMapMissingNode)
{
if (filter)
{
std::vector<unsigned char> nodeData;
if (filter->haveNode(id, hash, nodeData))
{
SHAMapTreeNode::pointer node = boost::make_shared<SHAMapTreeNode>(
boost::cref(id), boost::cref(nodeData), mSeq - 1, snfPREFIX, boost::cref(hash), true);
mTNByID[id] = node;
filter->gotNode(true, id, hash, nodeData, node->getType());
return node.get();
}
}
throw;
}
}
void SHAMap::returnNode(SHAMapTreeNode::pointer& node, bool modify) void SHAMap::returnNode(SHAMapTreeNode::pointer& node, bool modify)
{ // make sure the node is suitable for the intended operation (copy on write) { // make sure the node is suitable for the intended operation (copy on write)
assert(node->isValid()); assert(node->isValid());

View File

@@ -369,6 +369,7 @@ protected:
SHAMapTreeNode::pointer getNode(const SHAMapNode& id); SHAMapTreeNode::pointer getNode(const SHAMapNode& id);
SHAMapTreeNode::pointer getNode(const SHAMapNode& id, const uint256& hash, bool modify); SHAMapTreeNode::pointer getNode(const SHAMapNode& id, const uint256& hash, bool modify);
SHAMapTreeNode* getNodePointer(const SHAMapNode& id, const uint256& hash); SHAMapTreeNode* getNodePointer(const SHAMapNode& id, const uint256& hash);
SHAMapTreeNode* getNodePointer(const SHAMapNode& id, const uint256& hash, SHAMapSyncFilter* filter);
SHAMapTreeNode* firstBelow(SHAMapTreeNode*); SHAMapTreeNode* firstBelow(SHAMapTreeNode*);
SHAMapTreeNode* lastBelow(SHAMapTreeNode*); SHAMapTreeNode* lastBelow(SHAMapTreeNode*);

View File

@@ -58,38 +58,21 @@ void SHAMap::getMissingNodes(std::vector<SHAMapNode>& nodeIDs, std::vector<uint2
SHAMapTreeNode* d = NULL; SHAMapTreeNode* d = NULL;
try try
{ {
d = getNodePointer(childID, childHash); d = getNodePointer(childID, childHash, filter);
if (d->isInner() && !d->isFullBelow())
{
have_all = false;
stack.push(d);
}
} }
catch (SHAMapMissingNode&) catch (SHAMapMissingNode&)
{ // node is not in the map { // node is not in the map
if (filter != NULL)
{
std::vector<unsigned char> nodeData;
if (filter->haveNode(childID, childHash, nodeData))
{
assert(mSeq >= 1);
SHAMapTreeNode::pointer ptr =
boost::make_shared<SHAMapTreeNode>(childID, nodeData, mSeq - 1,
snfPREFIX, childHash, true);
mTNByID[*ptr] = ptr;
d = ptr.get();
filter->gotNode(true, childID, childHash, nodeData, ptr->getType());
}
}
}
if (!d)
{ // we need this node
nodeIDs.push_back(childID); nodeIDs.push_back(childID);
hashes.push_back(childHash); hashes.push_back(childHash);
if (--max <= 0) if (--max <= 0)
return; return;
have_all = false; have_all = false;
} }
else if (d->isInner() && !d->isFullBelow()) // we might need children of this node
{
have_all = false;
stack.push(d);
}
} }
} }
} }
@@ -142,32 +125,15 @@ std::vector<uint256> SHAMap::getNeededHashes(int max, SHAMapSyncFilter* filter)
SHAMapTreeNode* d = NULL; SHAMapTreeNode* d = NULL;
try try
{ {
d = getNodePointer(childID, childHash); d = getNodePointer(childID, childHash, filter);
assert(d);
}
catch (SHAMapMissingNode&)
{ // node is not in the map
std::vector<unsigned char> nodeData;
if (filter && filter->haveNode(childID, childHash, nodeData))
{
SHAMapTreeNode::pointer ptr =
boost::make_shared<SHAMapTreeNode>(childID, nodeData, mSeq -1,
snfPREFIX, childHash, true);
mTNByID[*ptr] = ptr;
d = ptr.get();
filter->gotNode(true, childID, childHash, nodeData, ptr->getType());
}
}
if (d)
{
if (d->isInner() && !d->isFullBelow()) if (d->isInner() && !d->isFullBelow())
{ {
have_all = false; have_all = false;
stack.push(d); stack.push(d);
} }
} }
else catch (SHAMapMissingNode&)
{ { // node is not in the map
have_all = false; have_all = false;
ret.push_back(childHash); ret.push_back(childHash);
if (--max <= 0) if (--max <= 0)
@@ -191,14 +157,6 @@ std::vector<uint256> SHAMap::getNeededHashes(int max, SHAMapSyncFilter* filter)
return ret; return ret;
} }
std::list< std::pair<uint256, std::vector<unsigned char> > >
getSyncInfo(SHAMap::pointer have, SHAMap::pointer want, int max)
{
std::list< std::pair< uint256, std::vector<unsigned char> > > ret;
// WRITEME
return ret;
}
bool SHAMap::getNodeFat(const SHAMapNode& wanted, std::vector<SHAMapNode>& nodeIDs, bool SHAMap::getNodeFat(const SHAMapNode& wanted, std::vector<SHAMapNode>& nodeIDs,
std::list<std::vector<unsigned char> >& rawNodes, bool fatRoot, bool fatLeaves) std::list<std::vector<unsigned char> >& rawNodes, bool fatRoot, bool fatLeaves)
{ // Gets a node and some of its children { // Gets a node and some of its children
@@ -340,11 +298,8 @@ SMAddNode SHAMap::addKnownNode(const SHAMapNode& node, const std::vector<unsigne
return SMAddNode::okay(); return SMAddNode::okay();
SHAMapTreeNode* iNode = root.get(); SHAMapTreeNode* iNode = root.get();
while (!iNode->isLeaf() && !iNode->isFullBelow()) while (!iNode->isLeaf() && !iNode->isFullBelow() && (iNode->getDepth() < node.getDepth()))
{ {
if (iNode->isLeaf() || iNode->isFullBelow() || (iNode->getDepth() >= node.getDepth()))
return SMAddNode::okay();
int branch = iNode->selectBranch(node.getNodeID()); int branch = iNode->selectBranch(node.getNodeID());
assert(branch >= 0); assert(branch >= 0);
@@ -358,7 +313,7 @@ SMAddNode SHAMap::addKnownNode(const SHAMapNode& node, const std::vector<unsigne
try try
{ {
iNode = getNodePointer(iNode->getChildNodeID(branch), iNode->getChildHash(branch)); iNode = getNodePointer(iNode->getChildNodeID(branch), iNode->getChildHash(branch), filter);
} }
catch (SHAMapMissingNode) catch (SHAMapMissingNode)
{ {
@@ -374,6 +329,7 @@ SMAddNode SHAMap::addKnownNode(const SHAMapNode& node, const std::vector<unsigne
boost::make_shared<SHAMapTreeNode>(node, rawNode, mSeq - 1, snfWIRE, uZero, false); boost::make_shared<SHAMapTreeNode>(node, rawNode, mSeq - 1, snfWIRE, uZero, false);
if (iNode->getChildHash(branch) != newNode->getNodeHash()) if (iNode->getChildHash(branch) != newNode->getNodeHash())
{ {
cLog(lsWARNING) << "Corrupt node recevied";
return SMAddNode::invalid(); return SMAddNode::invalid();
} }
@@ -384,41 +340,11 @@ SMAddNode SHAMap::addKnownNode(const SHAMapNode& node, const std::vector<unsigne
filter->gotNode(false, node, iNode->getChildHash(branch), s.peekData(), newNode->getType()); filter->gotNode(false, node, iNode->getChildHash(branch), s.peekData(), newNode->getType());
} }
mTNByID[node] = newNode; mTNByID[node] = newNode;
if (!newNode->isLeaf()) // only a leaf can fill an inner node
return SMAddNode::useful();
try
{
for (int i = 0; i < 16; ++i)
{ // does the parent still need more nodes
if (!iNode->isEmptyBranch(i) && !fullBelowCache.isPresent(iNode->getChildHash(i)))
{
SHAMapTreeNode* d = getNodePointer(iNode->getChildNodeID(i), iNode->getChildHash(i));
if (d->isInner() && !d->isFullBelow()) // unfilled inner node
return SMAddNode::useful();
}
}
}
catch (SHAMapMissingNode)
{ // still missing something
return SMAddNode::useful();
}
// received leaf fills its parent
iNode->setFullBelow();
if (mType == smtSTATE)
{
fullBelowCache.add(iNode->getNodeHash());
dropBelow(iNode);
}
if (root->isFullBelow())
clearSynching();
return SMAddNode::useful(); return SMAddNode::useful();
} }
} }
cLog(lsTRACE) << "got inner node, already had it (late)"; cLog(lsTRACE) << "got node, already had it (late)";
return SMAddNode::okay(); return SMAddNode::okay();
} }

View File

@@ -77,6 +77,7 @@ public:
void setTargetSize(int size); void setTargetSize(int size);
void setTargetAge(int age); void setTargetAge(int age);
void sweep(); void sweep();
void clear();
bool touch(const key_type& key); bool touch(const key_type& key);
bool del(const key_type& key, bool valid); bool del(const key_type& key, bool valid);
@@ -128,6 +129,12 @@ template<typename c_Key, typename c_Data> int TaggedCache<c_Key, c_Data>::getTra
return mCache.size(); return mCache.size();
} }
template<typename c_Key, typename c_Data> void TaggedCache<c_Key, c_Data>::clear()
{
mCache.clear();
mCacheCount = 0;
}
template<typename c_Key, typename c_Data> void TaggedCache<c_Key, c_Data>::sweep() template<typename c_Key, typename c_Data> void TaggedCache<c_Key, c_Data>::sweep()
{ {
boost::recursive_mutex::scoped_lock sl(mLock); boost::recursive_mutex::scoped_lock sl(mLock);