diff --git a/src/ripple_app/ledger/Ledger.cpp b/src/ripple_app/ledger/Ledger.cpp index 5620a3a33c..dd9cad724d 100644 --- a/src/ripple_app/ledger/Ledger.cpp +++ b/src/ripple_app/ledger/Ledger.cpp @@ -1046,7 +1046,6 @@ Json::Value Ledger::getJson (int options) { Json::Value txns (Json::arrayValue); SHAMapTreeNode::TNType type; - SHAMap::ScopedLockType l (mTransactionMap->peekMutex (), __FILE__, __LINE__); for (SHAMapItem::pointer item = mTransactionMap->peekFirstItem (type); !!item; item = mTransactionMap->peekNextItem (item->getTag (), type)) @@ -1131,10 +1130,8 @@ void Ledger::setCloseTime (boost::posix_time::ptime ptm) mCloseTime = iToSeconds (ptm); } -// XXX Use shared locks where possible? LedgerStateParms Ledger::writeBack (LedgerStateParms parms, SLE::ref entry) { - SHAMap::ScopedLockType l (mAccountStateMap->peekMutex (), __FILE__, __LINE__); bool create = false; if (!mAccountStateMap->hasItem (entry->getIndex ())) @@ -1187,8 +1184,6 @@ SLE::pointer Ledger::getSLEi (uint256 const& uId) { uint256 hash; - SHAMap::ScopedLockType sl (mAccountStateMap->peekMutex (), __FILE__, __LINE__); - SHAMapItem::pointer node = mAccountStateMap->peekItem (uId, hash); if (!node) diff --git a/src/ripple_app/ripple_app.h b/src/ripple_app/ripple_app.h index 65a9790abd..65325e3a71 100644 --- a/src/ripple_app/ripple_app.h +++ b/src/ripple_app/ripple_app.h @@ -44,6 +44,7 @@ #include #include #include +#include //------------------------------------------------------------------------------ diff --git a/src/ripple_app/shamap/SHAMap.cpp b/src/ripple_app/shamap/SHAMap.cpp index a3a84c3e8c..39ecfc2a22 100644 --- a/src/ripple_app/shamap/SHAMap.cpp +++ b/src/ripple_app/shamap/SHAMap.cpp @@ -28,8 +28,7 @@ void SHAMap::DefaultMissingNodeHandler::operator() (uint32 refNUm) SHAMap::SHAMap (SHAMapType t, uint32 seq, MissingNodeHandler missing_node_handler) - : mLock (this, "SHAMap", __FILE__, __LINE__) - , mSeq (seq) + : mSeq (seq) , mLedgerSeq (0) , mState (smsModifying) , mType (t) @@ -41,13 +40,12 @@ SHAMap::SHAMap (SHAMapType t, uint32 seq, root = boost::make_shared (mSeq, SHAMapNode (0, uint256 ())); root->makeInner (); - mTNByID[*root] = root; + mTNByID.replace(*root, root); } SHAMap::SHAMap (SHAMapType t, uint256 const& hash, MissingNodeHandler missing_node_handler) - : mLock (this, "SHAMap", __FILE__, __LINE__) - , mSeq (1) + : mSeq (1) , mLedgerSeq (0) , mState (smsSynching) , mType (t) @@ -58,7 +56,7 @@ SHAMap::SHAMap (SHAMapType t, uint256 const& hash, root = boost::make_shared (mSeq, SHAMapNode (0, uint256 ())); root->makeInner (); - mTNByID[*root] = root; + mTNByID.replace(*root, root); } TaggedCacheType< SHAMap::TNIndex, SHAMapTreeNode, UptimeTimerAdapter> @@ -109,12 +107,12 @@ std::size_t hash_value (const SHAMapNode& mn) SHAMap::pointer SHAMap::snapShot (bool isMutable) { SHAMap::pointer ret = boost::make_shared (mType); - SHAMap& newMap = *ret; + SHAMap& newMap = *ret; // Return a new SHAMap that is a snapshot of this one // Initially most nodes are shared and CoW is forced where needed { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); newMap.mSeq = mSeq; newMap.mTNByID = mTNByID; newMap.root = root; @@ -122,15 +120,15 @@ SHAMap::pointer SHAMap::snapShot (bool isMutable) if (!isMutable) newMap.mState = smsImmutable; - // If the existing map has any nodes it might modify, unshare them now + // If the existing map has any nodes it might modify, unshare ours now if (mState != smsImmutable) { - BOOST_FOREACH(NodeMap::value_type& nodeIt, mTNByID) + BOOST_FOREACH(NodeMap::value_type& nodeIt, mTNByID.peekMap()) { if (nodeIt.second->getSeq() == mSeq) { // We might modify this node, so duplicate it in the snapShot SHAMapTreeNode::pointer newNode = boost::make_shared (*nodeIt.second, mSeq); - newMap.mTNByID[*newNode] = newNode; + newMap.mTNByID.replace (*newNode, newNode); if (newNode->isRoot ()) newMap.root = newNode; } @@ -212,13 +210,10 @@ void SHAMap::dirtyUp (std::stack& stack, uint256 const& SHAMapTreeNode::pointer SHAMap::checkCacheNode (const SHAMapNode& iNode) { - boost::unordered_map::iterator it = mTNByID.find (iNode); - - if (it == mTNByID.end ()) - return SHAMapTreeNode::pointer (); - - it->second->touch (mSeq); - return it->second; + SHAMapTreeNode::pointer ret = mTNByID.retrieve(iNode); + if (ret && (ret->getSeq()!= 0)) + ret->touch (mSeq); + return ret; } SHAMapTreeNode::pointer SHAMap::walkTo (uint256 const& id, bool modify) @@ -311,13 +306,10 @@ SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& has SHAMapTreeNode* SHAMap::getNodePointerNT (const SHAMapNode& id, uint256 const& hash) { - // fast, but you do not hold a reference - boost::unordered_map::iterator it = mTNByID.find (id); - - if (it != mTNByID.end ()) - return it->second.get (); - - return fetchNodeExternalNT (id, hash).get (); + SHAMapTreeNode::pointer ret = mTNByID.retrieve (id); + if (!ret) + ret = fetchNodeExternalNT (id, hash); + return ret ? ret.get() : nullptr; } SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& hash, SHAMapSyncFilter* filter) @@ -335,7 +327,7 @@ SHAMapTreeNode* SHAMap::getNodePointerNT (const SHAMapNode& id, uint256 const& h SHAMapTreeNode* node = getNodePointerNT (id, hash); if (!node && filter) - { + { // Our regular node store didn't have the node. See if the filter does Blob nodeData; if (filter->haveNode (id, hash, nodeData)) @@ -343,8 +335,12 @@ SHAMapTreeNode* SHAMap::getNodePointerNT (const SHAMapNode& id, uint256 const& h SHAMapTreeNode::pointer node = boost::make_shared ( boost::cref (id), boost::cref (nodeData), 0, snfPREFIX, boost::cref (hash), true); canonicalize (hash, node); - mTNByID[id] = node; - filter->gotNode (true, id, hash, nodeData, node->getType ()); + + // Canonicalize the node with mTNByID to make sure all threads gets the same node + // If the node is new, tell the filter + if (mTNByID.canonicalize (id, &node)) + filter->gotNode (true, id, hash, nodeData, node->getType ()); + return node.get (); } } @@ -363,11 +359,12 @@ void SHAMap::returnNode (SHAMapTreeNode::pointer& node, bool modify) { // have a CoW assert (node->getSeq () < mSeq); + assert (mState != smsImmutable); node = boost::make_shared (*node, mSeq); // here's to the new node, same as the old node assert (node->isValid ()); - mTNByID[*node] = node; + mTNByID.replace (*node, node); if (node->isRoot ()) root = node; @@ -501,7 +498,8 @@ static const SHAMapItem::pointer no_item; SHAMapItem::pointer SHAMap::peekFirstItem () { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + SHAMapTreeNode* node = firstBelow (root.get ()); if (!node) @@ -512,7 +510,8 @@ SHAMapItem::pointer SHAMap::peekFirstItem () SHAMapItem::pointer SHAMap::peekFirstItem (SHAMapTreeNode::TNType& type) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + SHAMapTreeNode* node = firstBelow (root.get ()); if (!node) @@ -524,7 +523,8 @@ SHAMapItem::pointer SHAMap::peekFirstItem (SHAMapTreeNode::TNType& type) SHAMapItem::pointer SHAMap::peekLastItem () { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + SHAMapTreeNode* node = lastBelow (root.get ()); if (!node) @@ -543,7 +543,7 @@ SHAMapItem::pointer SHAMap::peekNextItem (uint256 const& id) SHAMapItem::pointer SHAMap::peekNextItem (uint256 const& id, SHAMapTreeNode::TNType& type) { // Get a pointer to the next item in the tree after a given item - item need not be in tree - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); std::stack stack = getStack (id, true); @@ -583,7 +583,7 @@ SHAMapItem::pointer SHAMap::peekNextItem (uint256 const& id, SHAMapTreeNode::TNT // Get a pointer to the previous item in the tree after a given item - item need not be in tree SHAMapItem::pointer SHAMap::peekPrevItem (uint256 const& id) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); std::stack stack = getStack (id, true); @@ -621,7 +621,8 @@ SHAMapItem::pointer SHAMap::peekPrevItem (uint256 const& id) SHAMapItem::pointer SHAMap::peekItem (uint256 const& id) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + SHAMapTreeNode* leaf = walkToPointer (id); if (!leaf) @@ -632,7 +633,8 @@ SHAMapItem::pointer SHAMap::peekItem (uint256 const& id) SHAMapItem::pointer SHAMap::peekItem (uint256 const& id, SHAMapTreeNode::TNType& type) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + SHAMapTreeNode* leaf = walkToPointer (id); if (!leaf) @@ -644,7 +646,8 @@ SHAMapItem::pointer SHAMap::peekItem (uint256 const& id, SHAMapTreeNode::TNType& SHAMapItem::pointer SHAMap::peekItem (uint256 const& id, uint256& hash) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + SHAMapTreeNode* leaf = walkToPointer (id); if (!leaf) @@ -658,7 +661,7 @@ SHAMapItem::pointer SHAMap::peekItem (uint256 const& id, uint256& hash) bool SHAMap::hasItem (uint256 const& id) { // does the tree have an item with this ID - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); SHAMapTreeNode* leaf = walkToPointer (id); return (leaf != NULL); @@ -667,7 +670,7 @@ bool SHAMap::hasItem (uint256 const& id) bool SHAMap::delItem (uint256 const& id) { // delete the item with this ID - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); assert (mState != smsImmutable); std::stack stack = getStack (id, true); @@ -748,7 +751,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta SHAMapTreeNode::TNType type = !isTransaction ? SHAMapTreeNode::tnACCOUNT_STATE : (hasMeta ? SHAMapTreeNode::tnTRANSACTION_MD : SHAMapTreeNode::tnTRANSACTION_NM); - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); assert (mState != smsImmutable); std::stack stack = getStack (tag, true); @@ -773,7 +776,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta SHAMapTreeNode::pointer newNode = boost::make_shared (node->getChildNodeID (branch), item, type, mSeq); - if (!mTNByID.emplace (SHAMapNode (*newNode), newNode).second) + if (!mTNByID.peekMap().emplace (SHAMapNode (*newNode), newNode).second) { WriteLog (lsFATAL, SHAMap) << "Node: " << *node; WriteLog (lsFATAL, SHAMap) << "NewNode: " << *newNode; @@ -802,7 +805,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta boost::make_shared (mSeq, node->getChildNodeID (b1)); newNode->makeInner (); - if (!mTNByID.emplace (SHAMapNode (*newNode), newNode).second) + if (!mTNByID.peekMap().emplace (SHAMapNode (*newNode), newNode).second) assert (false); stack.push (node); @@ -816,7 +819,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta boost::make_shared (node->getChildNodeID (b1), item, type, mSeq); assert (newNode->isValid () && newNode->isLeaf ()); - if (!mTNByID.emplace (SHAMapNode (*newNode), newNode).second) + if (!mTNByID.peekMap().emplace (SHAMapNode (*newNode), newNode).second) assert (false); node->setChildHash (b1, newNode->getNodeHash ()); // OPTIMIZEME hash op not needed @@ -825,7 +828,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta newNode = boost::make_shared (node->getChildNodeID (b2), otherItem, type, mSeq); assert (newNode->isValid () && newNode->isLeaf ()); - if (!mTNByID.emplace (SHAMapNode (*newNode), newNode).second) + if (!mTNByID.peekMap().emplace (SHAMapNode (*newNode), newNode).second) assert (false); node->setChildHash (b2, newNode->getNodeHash ()); @@ -846,7 +849,7 @@ bool SHAMap::updateGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasM // can't change the tag but can change the hash uint256 tag = item->getTag (); - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); assert (mState != smsImmutable); std::stack stack = getStack (tag, true); @@ -891,6 +894,12 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternal (const SHAMapNode& id, uint256 return ret; } +/** Look at the cache and back end (things external to this SHAMap) to + find a tree node. Only a read lock is required because mTNByID has its + own, internal synchronization. Every thread calling this function must + get a shared pointer to the same underlying node. + This function does not throw. +*/ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint256 const& hash) { SHAMapTreeNode::pointer ret; @@ -898,6 +907,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2 if (!getApp().running ()) return ret; + // Check the cache of shared, immutable tree nodes ret = getCache (hash, id); if (ret) { // The node was found in the TreeNodeCache @@ -920,6 +930,8 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2 try { + // We make this node immutable (seq == 0) so that it can be shared + // CoW is needed if it is modified ret = boost::make_shared (id, obj->getData (), 0, snfPREFIX, hash, true); if (id != *ret) @@ -936,6 +948,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2 return SHAMapTreeNode::pointer (); } + // Share this immutable tree node in thre TreeNodeCache canonicalize (hash, ret); } catch (...) @@ -947,11 +960,11 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2 if (id.isRoot ()) // it is legal to replace an existing root { - mTNByID[id] = ret; + mTNByID.replace(id, ret); root = ret; } - else if (!mTNByID.emplace (id, ret).second) - assert (false); + else // Make sure other threads get pointers to the same underlying object + mTNByID.canonicalize (id, &ret); return ret; } @@ -985,7 +998,7 @@ bool SHAMap::fetchRoot (uint256 const& hash, SHAMapSyncFilter* filter) root = boost::make_shared (SHAMapNode (), nodeData, mSeq - 1, snfPREFIX, hash, true); - mTNByID[*root] = root; + mTNByID.replace(*root, root); filter->gotNode (true, SHAMapNode (), hash, nodeData, root->getType ()); } @@ -1036,7 +1049,7 @@ int SHAMap::flushDirty (NodeMap& map, int maxNodes, NodeObjectType t, uint32 seq boost::shared_ptr SHAMap::disarmDirty () { // stop saving dirty nodes - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); boost::shared_ptr ret; ret.swap (mDirtyNodes); @@ -1072,17 +1085,21 @@ SHAMapTreeNode::pointer SHAMap::getNode (const SHAMapNode& nodeID) // It throws if the map is incomplete SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& nodeID) { - boost::unordered_map::iterator it = mTNByID.find (nodeID); - if (it != mTNByID.end()) + SHAMapTreeNode::pointer nodeptr = mTNByID.retrieve (nodeID); + if (nodeptr) { - it->second->touch(mSeq); - return it->second.get(); + SHAMapTreeNode* ret = nodeptr.get (); + ret->touch(mSeq); + return ret; } SHAMapTreeNode* node = root.get(); while (nodeID != *node) { + if (node->isLeaf ()) + return NULL; + int branch = node->selectBranch (nodeID.getNodeID ()); assert (branch >= 0); @@ -1101,7 +1118,8 @@ bool SHAMap::getPath (uint256 const& index, std::vector< Blob >& nodes, SHANodeF // Return the path of nodes to the specified index in the specified format // Return value: true = node present, false = node not present - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + SHAMapTreeNode* inNode = root.get (); while (!inNode->isLeaf ()) @@ -1131,13 +1149,13 @@ bool SHAMap::getPath (uint256 const& index, std::vector< Blob >& nodes, SHANodeF void SHAMap::dropCache () { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); assert (mState == smsImmutable); mTNByID.clear (); if (root) - mTNByID[*root] = root; + mTNByID.canonicalize(*root, &root); } void SHAMap::dropBelow (SHAMapTreeNode* d) @@ -1151,10 +1169,10 @@ void SHAMap::dropBelow (SHAMapTreeNode* d) void SHAMap::dump (bool hash) { WriteLog (lsINFO, SHAMap) << " MAP Contains"; - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); - for (boost::unordered_map::iterator it = mTNByID.begin (); - it != mTNByID.end (); ++it) + for (boost::unordered_map::iterator it = mTNByID.peekMap().begin (); + it != mTNByID.peekMap().end (); ++it) { WriteLog (lsINFO, SHAMap) << it->second->getString (); CondLog (hash, lsINFO, SHAMap) << it->second->getNodeHash (); diff --git a/src/ripple_app/shamap/SHAMap.h b/src/ripple_app/shamap/SHAMap.h index 0eac5e8f46..20e657f502 100644 --- a/src/ripple_app/shamap/SHAMap.h +++ b/src/ripple_app/shamap/SHAMap.h @@ -57,8 +57,9 @@ public: typedef std::map Delta; typedef boost::unordered_map NodeMap; - typedef RippleRecursiveMutex LockType; - typedef LockType::ScopedLockType ScopedLockType; + typedef boost::shared_mutex LockType; + typedef boost::shared_lock ScopedReadLockType; + typedef boost::unique_lock ScopedWriteLockType; public: // build new map @@ -86,12 +87,6 @@ public: mLedgerSeq = lseq; } - // hold the map stable across operations - LockType const& peekMutex () const - { - return mLock; - } - bool hasNode (const SHAMapNode & id); bool fetchRoot (uint256 const & hash, SHAMapSyncFilter * filter); @@ -148,9 +143,9 @@ public: assert (mState != smsInvalid); mState = smsImmutable; } - void clearImmutable () + bool isImmutable () { - mState = smsModifying; + return mState == smsImmutable; } bool isSynching () const { @@ -192,8 +187,8 @@ public: } // overloads for backed maps - boost::shared_ptr fetchNodeExternal (const SHAMapNode & id, uint256 const & hash); // throws - boost::shared_ptr fetchNodeExternalNT (const SHAMapNode & id, uint256 const & hash); // no throw + SHAMapTreeNode::pointer fetchNodeExternal (const SHAMapNode & id, uint256 const & hash); // throws + SHAMapTreeNode::pointer fetchNodeExternalNT (const SHAMapNode & id, uint256 const & hash); // no throw bool operator== (const SHAMap & s) { @@ -277,15 +272,15 @@ private: void visitLeavesInternal (std::function& function); private: -#if 1 - LockType mLock; -#else + + // This lock protects key SHAMap structures. + // One may change anything with a write lock. + // With a read lock, one may not invalidate pointers to existing members of mTNByID mutable LockType mLock; -#endif uint32 mSeq; uint32 mLedgerSeq; // sequence number of ledger this is part of - NodeMap mTNByID; + SyncUnorderedMapType< SHAMapNode, SHAMapTreeNode::pointer > mTNByID; boost::shared_ptr mDirtyNodes; SHAMapTreeNode::pointer root; SHAMapState mState; diff --git a/src/ripple_app/shamap/SHAMapDelta.cpp b/src/ripple_app/shamap/SHAMapDelta.cpp index 44e8bb929f..3639814453 100644 --- a/src/ripple_app/shamap/SHAMapDelta.cpp +++ b/src/ripple_app/shamap/SHAMapDelta.cpp @@ -138,7 +138,7 @@ bool SHAMap::compare (SHAMap::ref otherMap, Delta& differences, int maxCount) std::stack nodeStack; // track nodes we've pushed - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); if (getHash () == otherMap->getHash ()) return true; @@ -236,7 +236,7 @@ void SHAMap::walkMap (std::vector& missingNodes, int maxMissi { std::stack nodeStack; - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); if (!root->isInner ()) // root is only node, and we have it return; diff --git a/src/ripple_app/shamap/SHAMapSync.cpp b/src/ripple_app/shamap/SHAMapSync.cpp index bf637e1bac..a4d4013975 100644 --- a/src/ripple_app/shamap/SHAMapSync.cpp +++ b/src/ripple_app/shamap/SHAMapSync.cpp @@ -25,12 +25,9 @@ KeyCache SHAMap::fullBelowCache ("fullBelowCache", void SHAMap::visitLeaves (std::function function) { - SHAMap::pointer snap; - { - ScopedLockType sl (mLock, __FILE__, __LINE__); - snap = snapShot (false); - } - snap->visitLeavesInternal(function); + // Make a snapshot of this map so we don't need to hold + // a lock on the map we're visiting + snapShot (false)->visitLeavesInternal (function); } void SHAMap::visitLeavesInternal (std::function& function) @@ -95,10 +92,13 @@ void SHAMap::visitLeavesInternal (std::function& fu } } +/** Get a list of node IDs and hashes for nodes that are part of this SHAMap but not available locally. + The filter can hold alternate sources of nodes that are not permanently stored locally +*/ void SHAMap::getMissingNodes (std::vector& nodeIDs, std::vector& hashes, int max, SHAMapSyncFilter* filter) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); assert (root->isValid ()); assert (root->getNodeHash().isNonZero ()); @@ -192,7 +192,7 @@ bool SHAMap::getNodeFat (const SHAMapNode& wanted, std::vector& node std::list& rawNodes, bool fatRoot, bool fatLeaves) { // Gets a node and some of its children - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); SHAMapTreeNode* node = getNodePointer(wanted); @@ -247,7 +247,7 @@ bool SHAMap::getNodeFat (const SHAMapNode& wanted, std::vector& node bool SHAMap::getRootNode (Serializer& s, SHANodeFormat format) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); root->addRaw (s, format); return true; } @@ -255,7 +255,7 @@ bool SHAMap::getRootNode (Serializer& s, SHANodeFormat format) SHAMapAddNode SHAMap::addRootNode (Blob const& rootNode, SHANodeFormat format, SHAMapSyncFilter* filter) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); // we already have a root node if (root->getNodeHash ().isNonZero ()) @@ -276,7 +276,7 @@ SHAMapAddNode SHAMap::addRootNode (Blob const& rootNode, SHANodeFormat format, #endif root = node; - mTNByID[*root] = root; + mTNByID.replace(*root, root); if (root->isLeaf()) clearSynching (); @@ -294,7 +294,7 @@ SHAMapAddNode SHAMap::addRootNode (Blob const& rootNode, SHANodeFormat format, SHAMapAddNode SHAMap::addRootNode (uint256 const& hash, Blob const& rootNode, SHANodeFormat format, SHAMapSyncFilter* filter) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedWriteLockType sl (mLock); // we already have a root node if (root->getNodeHash ().isNonZero ()) @@ -312,7 +312,7 @@ SHAMapAddNode SHAMap::addRootNode (uint256 const& hash, Blob const& rootNode, SH return SHAMapAddNode::invalid (); root = node; - mTNByID[*root] = root; + mTNByID.replace(*root, root); if (root->isLeaf()) clearSynching (); @@ -329,6 +329,8 @@ SHAMapAddNode SHAMap::addRootNode (uint256 const& hash, Blob const& rootNode, SH SHAMapAddNode SHAMap::addKnownNode (const SHAMapNode& node, Blob const& rawNode, SHAMapSyncFilter* filter) { + ScopedWriteLockType sl (mLock); + // return value: true=okay, false=error assert (!node.isRoot ()); @@ -338,8 +340,6 @@ SHAMapAddNode SHAMap::addKnownNode (const SHAMapNode& node, Blob const& rawNode, return SHAMapAddNode::duplicate (); } - ScopedLockType sl (mLock, __FILE__, __LINE__); - if (checkCacheNode (node)) // Do we already have this node? return SHAMapAddNode::duplicate (); @@ -383,14 +383,13 @@ SHAMapAddNode SHAMap::addKnownNode (const SHAMapNode& node, Blob const& rawNode, canonicalize (iNode->getChildHash (branch), newNode); - if (filter) + if (mTNByID.canonicalize(node, &newNode) && filter) { Serializer s; newNode->addRaw (s, snfPREFIX); filter->gotNode (false, node, iNode->getChildHash (branch), s.modData (), newNode->getType ()); } - mTNByID[node] = newNode; return SHAMapAddNode::useful (); } iNode = nextNode; @@ -404,7 +403,7 @@ bool SHAMap::deepCompare (SHAMap& other) { // Intended for debug/test only std::stack stack; - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); stack.push (root); @@ -472,11 +471,14 @@ bool SHAMap::deepCompare (SHAMap& other) return true; } +/** Does this map have this inner node? + You must hold a read lock to call this function +*/ bool SHAMap::hasInnerNode (const SHAMapNode& nodeID, uint256 const& nodeHash) { - boost::unordered_map::iterator it = mTNByID.find (nodeID); - if (it != mTNByID.end()) - return it->second->getNodeHash() == nodeHash; + SHAMapTreeNode::pointer ptr = mTNByID.retrieve (nodeID); + if (ptr) + return ptr->getNodeHash() == nodeHash; SHAMapTreeNode* node = root.get (); @@ -493,6 +495,9 @@ bool SHAMap::hasInnerNode (const SHAMapNode& nodeID, uint256 const& nodeHash) return node->getNodeHash () == nodeHash; } +/** Does this map have this leaf node? + You must hold a read lock to call this function +*/ bool SHAMap::hasLeafNode (uint256 const& tag, uint256 const& nodeHash) { SHAMapTreeNode* node = root.get (); @@ -534,14 +539,14 @@ std::list SHAMap::getFetchPack (SHAMap* have, bool inc void SHAMap::getFetchPack (SHAMap* have, bool includeLeaves, int max, std::function func) { - ScopedLockType ul1 (mLock, __FILE__, __LINE__); + ScopedReadLockType ul1 (mLock); - std::unique_ptr ul2; + std::unique_ptr ul2; if (have) { // VFALCO NOTE This looks like a mess. A dynamically allocated scoped lock? - ul2.reset (new LockType::ScopedTryLockType (have->mLock, __FILE__, __LINE__)); + ul2.reset (new ScopedReadLockType (have->mLock, boost::try_to_lock)); if (! ul2->owns_lock ()) { @@ -565,6 +570,7 @@ void SHAMap::getFetchPack (SHAMap* have, bool includeLeaves, int max, Serializer s; root->addRaw (s, snfPREFIX); func (boost::cref(root->getNodeHash ()), boost::cref(s.peekData ())); + --max; } return; @@ -613,7 +619,8 @@ void SHAMap::getFetchPack (SHAMap* have, bool includeLeaves, int max, std::list SHAMap::getTrustedPath (uint256 const& index) { - ScopedLockType sl (mLock, __FILE__, __LINE__); + ScopedReadLockType sl (mLock); + std::stack stack = SHAMap::getStack (index, false); if (stack.empty () || !stack.top ()->isLeaf ()) diff --git a/src/ripple_app/shamap/SHAMapTreeNode.h b/src/ripple_app/shamap/SHAMapTreeNode.h index c07312266e..f9f13cb54e 100644 --- a/src/ripple_app/shamap/SHAMapTreeNode.h +++ b/src/ripple_app/shamap/SHAMapTreeNode.h @@ -74,7 +74,8 @@ public: } void touch (uint32 s) { - mAccessSeq = s; + if (mSeq != 0) + mAccessSeq = s; } uint256 const& getNodeHash () const { diff --git a/src/ripple_basics/containers/SyncUnorderedMap.h b/src/ripple_basics/containers/SyncUnorderedMap.h new file mode 100644 index 0000000000..f33855022b --- /dev/null +++ b/src/ripple_basics/containers/SyncUnorderedMap.h @@ -0,0 +1,166 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_SYNC_UNORDERED_MAP_H +#define RIPPLE_SYNC_UNORDERED_MAP_H + +// Common base +class SyncUnorderedMap +{ +public: + typedef RippleRecursiveMutex LockType; + typedef LockType::ScopedLockType ScopedLockType; +}; + +/** This is a synchronized unordered map. + It is useful for cases where an unordered map contains all + or a subset of an unchanging data set. +*/ + +template +class SyncUnorderedMapType : public SyncUnorderedMap +{ +public: + typedef c_Key key_type; + typedef c_Data data_type; + typedef boost::unordered_map map_type; + + class iterator + { + public: + bool operator== (const iterator& i) { return it == i.it; } + bool operator!= (const iterator& i) { return it != i.it; } + key_type const& key () { return it.first; } + data_type& data () { return it.second; } + + protected: + typename map_type::iterator it; + }; + +public: + typedef SyncUnorderedMap::LockType LockType; + typedef SyncUnorderedMap::ScopedLockType ScopedLockType; + + SyncUnorderedMapType (const SyncUnorderedMapType& m) + { + ScopedLockType sl (m.mLock, __FILE__, __LINE__); + mMap = m.mMap; + } + + SyncUnorderedMapType () + { ; } + + // Operations that are not inherently synchronous safe + // (Usually because they can change the contents of the map or + // invalidated its members.) + + void operator= (const SyncUnorderedMapType& m) + { + ScopedLockType sl (m.mLock, __FILE__, __LINE__); + mMap = m.mMap; + } + + void clear () + { + mMap.clear(); + } + + int erase (key_type const& key) + { + return mMap.erase (key); + } + + void erase (iterator& iterator) + { + mMap.erase (iterator.it); + } + + void replace (key_type const& key, data_type const& data) + { + mMap[key] = data; + } + + void rehash (int s) + { + mMap.rehash (s); + } + + map_type& peekMap () + { + return mMap; + } + + // Operations that are inherently synchronous safe + + std::size_t size () const + { + ScopedLockType sl (mLock, __FILE__, __LINE__); + return mMap.size (); + } + + // If the value is already in the map, replace it with the old value + // Otherwise, store the value passed. + // Returns 'true' if the value was added to the map + bool canonicalize (key_type const& key, data_type* value) + { + ScopedLockType sl (mLock, __FILE__, __LINE__); + + typename std::pair < typename map_type::iterator, bool > it = + mMap.insert (typename map_type::value_type (key, *value)); + + if (!it.second) // Value was not added, take existing value + *value = it.first->second; + + return it.second; + } + + // Retrieve the existing value from the map. + // If none, return an 'empty' value + data_type retrieve (key_type const& key) + { + data_type ret; + { + ScopedLockType sl (mLock, __FILE__, __LINE__); + typename map_type::iterator it = mMap.find (key); + if (it != mMap.end ()) + ret = it->second; + } + return ret; + } + +private: + map_type mMap; + mutable LockType mLock; +}; + + +namespace detail +{ + +template +struct Destroyer > +{ + static void destroy (SyncUnorderedMapType & v) + { + v.clear (); + } +}; + +} +#endif diff --git a/src/ripple_basics/ripple_basics.h b/src/ripple_basics/ripple_basics.h index 9991b89058..ba20b96bcd 100644 --- a/src/ripple_basics/ripple_basics.h +++ b/src/ripple_basics/ripple_basics.h @@ -85,6 +85,7 @@ using namespace beast; #include "containers/RangeSet.h" #include "containers/BlackList.h" #include "containers/TaggedCache.h" +#include "containers/SyncUnorderedMap.h" }