SHAMap improvements:

* Run key SHAMap unit tests on both backed and unbacked maps
* Remove obsolete version of SHAMapTree::addRootNode
* Our position maps should be unbacked and immutable
* Minimize hash operations for unbacked SHAMaps
  Not setting the sequence numbers to zero in
  SHAMap::walkSubTree causes extra hashes to be
  performed on subsequent operations
This commit is contained in:
JoelKatz
2015-12-23 11:09:28 -08:00
committed by Nik Bougalis
parent 6dab1657b1
commit 44fcab1081
7 changed files with 56 additions and 54 deletions

View File

@@ -1386,6 +1386,7 @@ void LedgerConsensusImp::takeInitialPosition (
{
std::shared_ptr<SHAMap> initialSet = std::make_shared <SHAMap> (
SHAMapType::TRANSACTION, app_.family());
initialSet->setUnbacked ();
// Build SHAMap containing all transactions in our open ledger
for (auto const& tx : initialLedger->txs)
@@ -1629,6 +1630,8 @@ void LedgerConsensusImp::updateOurPositions ()
if (changes)
{
ourPosition = ourPosition->snapShot (false);
auto newHash = ourPosition->getHash ().as_uint256();
JLOG (j_.info)
<< "Position change: CTime "

View File

@@ -179,8 +179,6 @@ public:
std::vector<uint256> getNeededHashes (int max, SHAMapSyncFilter * filter);
SHAMapAddNode addRootNode (SHAMapHash const& hash, Blob const& rootNode,
SHANodeFormat format, SHAMapSyncFilter * filter);
SHAMapAddNode addRootNode (Blob const& rootNode, SHANodeFormat format,
SHAMapSyncFilter * filter);
SHAMapAddNode addKnownNode (SHAMapNodeID const& nodeID, Blob const& rawNode,
SHAMapSyncFilter * filter);

View File

@@ -930,6 +930,7 @@ SHAMap::preFlushNode (std::shared_ptr<Node> node) const
int SHAMap::unshare ()
{
// Don't share nodes wth parent map
return walkSubTree (false, hotUNKNOWN, 0);
}
@@ -952,13 +953,21 @@ SHAMap::walkSubTree (bool doWrite, NodeObjectType t, std::uint32_t seq)
if (root_->isLeaf())
{ // special case -- root_ is leaf
root_ = preFlushNode (std::move(root_));
root_->updateHash();
if (doWrite && backed_)
root_ = writeNode(t, seq, std::move(root_));
else
root_->setSeq (0);
return 1;
}
auto node = std::static_pointer_cast<SHAMapInnerNode>(root_);
if (node->isEmpty ())
return flushed;
{ // replace empty root with a new empty root
root_ = std::make_shared <SHAMapInnerNode> (0);
return 1;
}
// Stack of {parent,index,child} pointers representing
// inner nodes we are in the process of flushing
@@ -989,10 +998,11 @@ SHAMap::walkSubTree (bool doWrite, NodeObjectType t, std::uint32_t seq)
{
// This is a node that needs to be flushed
child = preFlushNode(std::move(child));
if (child->isInner ())
{
// save our place and work on this node
child = preFlushNode(std::move(child));
stack.emplace (std::move (node), branch);
@@ -1004,13 +1014,13 @@ SHAMap::walkSubTree (bool doWrite, NodeObjectType t, std::uint32_t seq)
// flush this leaf
++flushed;
child = preFlushNode(std::move(child));
assert (node->getSeq() == seq_);
child->updateHash();
if (doWrite && backed_)
child = writeNode(t, seq, std::move(child));
else
child->setSeq (0);
node->shareChild (branch, child);
}
@@ -1025,6 +1035,8 @@ SHAMap::walkSubTree (bool doWrite, NodeObjectType t, std::uint32_t seq)
if (doWrite && backed_)
node = std::static_pointer_cast<SHAMapInnerNode>(writeNode(t, seq,
std::move(node)));
else
node->setSeq (0);
++flushed;

View File

@@ -407,46 +407,6 @@ bool SHAMap::getRootNode (Serializer& s, SHANodeFormat format) const
return true;
}
SHAMapAddNode SHAMap::addRootNode (Blob const& rootNode,
SHANodeFormat format, SHAMapSyncFilter* filter)
{
// we already have a root_ node
if (root_->getNodeHash ().isNonZero ())
{
if (journal_.trace) journal_.trace <<
"got root node, already have one";
return SHAMapAddNode::duplicate ();
}
assert (seq_ >= 1);
auto node = SHAMapAbstractNode::make(
rootNode, 0, format, SHAMapHash{uZero}, false, f_.journal ());
if (!node || !node->isValid ())
return SHAMapAddNode::invalid ();
#ifdef BEAST_DEBUG
node->dump (SHAMapNodeID (), journal_);
#endif
if (backed_)
canonicalize (node->getNodeHash (), node);
root_ = node;
if (root_->isLeaf())
clearSynching ();
if (filter)
{
Serializer s;
root_->addRaw (s, snfPREFIX);
filter->gotNode (false, SHAMapNodeID{}, root_->getNodeHash (),
s.modData (), root_->getType ());
}
return SHAMapAddNode::useful ();
}
SHAMapAddNode SHAMap::addRootNode (SHAMapHash const& hash, Blob const& rootNode, SHANodeFormat format,
SHAMapSyncFilter* filter)
{

View File

@@ -48,7 +48,16 @@ public:
void run ()
{
testcase ("add/traverse");
run (true);
run (false);
}
void run (bool backed)
{
if (backed)
testcase ("add/traverse backed");
else
testcase ("add/traverse unbacked");
beast::Journal const j; // debug journal
tests::TestFamily f(j);
@@ -62,6 +71,9 @@ public:
h5.SetHex ("a92891fe4ef6cee585fdc6fda0e09eb4d386363158ec3321b8123e5a772c6ca7");
SHAMap sMap (SHAMapType::FREE, f);
if (! backed)
sMap.setUnbacked ();
SHAMapItem i1 (h1, IntToVUC (1)), i2 (h2, IntToVUC (2)), i3 (h3, IntToVUC (3)), i4 (h4, IntToVUC (4)), i5 (h5, IntToVUC (5));
unexpected (!sMap.addItem (i2, true, false), "no add");
unexpected (!sMap.addItem (i1, true, false), "no add");
@@ -86,7 +98,11 @@ public:
++i;
unexpected (i != e, "bad traverse");
testcase ("snapshot");
if (backed)
testcase ("snapshot backed");
else
testcase ("snapshot unbacked");
SHAMapHash mapHash = sMap.getHash ();
std::shared_ptr<SHAMap> map2 = sMap.snapShot (false);
unexpected (sMap.getHash () != mapHash, "bad snapshot");
@@ -95,7 +111,10 @@ public:
unexpected (sMap.getHash () == mapHash, "bad snapshot");
unexpected (map2->getHash () != mapHash, "bad snapshot");
testcase ("build/tear");
if (backed)
testcase ("build/tear backed");
else
testcase ("build/tear unbacked");
{
std::vector<uint256> keys(8);
keys[0].SetHex ("b92891fe4ef6cee585fdc6fda1e09eb4d386363158ec3321b8123e5a772c6ca8");
@@ -118,23 +137,29 @@ public:
hashes[7].SetHex ("DF4220E93ADC6F5569063A01B4DC79F8DB9553B6A3222ADE23DEA02BBE7230E5");
SHAMap map (SHAMapType::FREE, f);
if (! backed)
map.setUnbacked ();
expect (map.getHash() == zero, "bad initial empty map hash");
for (int i = 0; i < keys.size(); ++i)
{
SHAMapItem item (keys[i], IntToVUC (i));
map.addItem (item, true, false);
expect (map.addItem (item, true, false), "unable to add item");
expect (map.getHash().as_uint256() == hashes[i], "bad buildup map hash");
}
for (int i = keys.size() - 1; i >= 0; --i)
{
expect (map.getHash().as_uint256() == hashes[i], "bad teardown hash");
map.delItem (keys[i]);
expect (map.delItem (keys[i]), "unable to remove item");
}
expect (map.getHash() == zero, "bad final empty map hash");
}
testcase ("iterate");
if (backed)
testcase ("iterate backed");
else
testcase ("iterate unbacked");
{
std::vector<uint256> keys(8);
keys[0].SetHex ("f22891fe4ef6cee585fdc6fda1e09eb4d386363158ec3321b8123e5a772c6ca8");
@@ -148,6 +173,8 @@ public:
tests::TestFamily f{beast::Journal{}};
SHAMap map{SHAMapType::FREE, f};
if (! backed)
map.setUnbacked ();
for (auto const& k : keys)
map.addItem(SHAMapItem{k, IntToVUC(0)}, true, false);

View File

@@ -123,7 +123,8 @@ public:
unexpected (gotNodes.size () < 1, "NodeSize");
unexpected (!destination.addRootNode (*gotNodes.begin (), snfWIRE, nullptr).isGood(), "AddRootNode");
unexpected (!destination.addRootNode (source.getHash(),
*gotNodes.begin (), snfWIRE, nullptr).isGood(), "AddRootNode");
nodeIDs.clear ();
gotNodes.clear ();

View File

@@ -50,6 +50,7 @@ public:
TestFamily (beast::Journal j)
: treecache_ ("TreeNodeCache", 65536, 60, clock_, j)
, fullbelow_ ("full_below", clock_)
, j_ (j)
{
Section testSection;
testSection.set("type", "memory");