mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-28 06:55:50 +00:00
Rename canonicalize into two functions:
* canonicalize_replace_cache * canonicalize_replace_client Now it is clear at the call site that if there are duplicate copies of the data between the cache and the caller, which copy gets replaced. Additionally data parameter is now const-correct. If it is not going to be replaced (canonicalize_replace_cache), then the shared_ptr to the client data is const.
This commit is contained in:
committed by
manojsdoshi
parent
e257a226f3
commit
f22fcb3b2a
@@ -835,7 +835,7 @@ static bool saveValidatedLedger (
|
||||
if (! aLedger)
|
||||
{
|
||||
aLedger = std::make_shared<AcceptedLedger>(ledger, app.accountIDCache(), app.logs());
|
||||
app.getAcceptedLedgerCache().canonicalize(ledger->info().hash, aLedger);
|
||||
app.getAcceptedLedgerCache().canonicalize_replace_client(ledger->info().hash, aLedger);
|
||||
}
|
||||
}
|
||||
catch (std::exception const&)
|
||||
|
||||
@@ -62,8 +62,8 @@ LedgerHistory::insert(
|
||||
|
||||
std::unique_lock sl (m_ledgers_by_hash.peekMutex ());
|
||||
|
||||
const bool alreadyHad = m_ledgers_by_hash.canonicalize (
|
||||
ledger->info().hash, ledger, true);
|
||||
const bool alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
|
||||
ledger->info().hash, ledger);
|
||||
if (validated)
|
||||
mLedgersByIndex[ledger->info().seq] = ledger->info().hash;
|
||||
|
||||
@@ -108,7 +108,7 @@ LedgerHistory::getLedgerBySeq (LedgerIndex index)
|
||||
std::unique_lock sl (m_ledgers_by_hash.peekMutex ());
|
||||
|
||||
assert (ret->isImmutable ());
|
||||
m_ledgers_by_hash.canonicalize (ret->info().hash, ret);
|
||||
m_ledgers_by_hash.canonicalize_replace_client(ret->info().hash, ret);
|
||||
mLedgersByIndex[ret->info().seq] = ret->info().hash;
|
||||
return (ret->info().seq == index) ? ret : nullptr;
|
||||
}
|
||||
@@ -133,7 +133,7 @@ LedgerHistory::getLedgerByHash (LedgerHash const& hash)
|
||||
|
||||
assert (ret->isImmutable ());
|
||||
assert (ret->info().hash == hash);
|
||||
m_ledgers_by_hash.canonicalize (ret->info().hash, ret);
|
||||
m_ledgers_by_hash.canonicalize_replace_client(ret->info().hash, ret);
|
||||
assert (ret->info().hash == hash);
|
||||
|
||||
return ret;
|
||||
@@ -432,7 +432,7 @@ void LedgerHistory::builtLedger (
|
||||
m_consensus_validated.peekMutex());
|
||||
|
||||
auto entry = std::make_shared<cv_entry>();
|
||||
m_consensus_validated.canonicalize(index, entry, false);
|
||||
m_consensus_validated.canonicalize_replace_client(index, entry);
|
||||
|
||||
if (entry->validated && ! entry->built)
|
||||
{
|
||||
@@ -472,7 +472,7 @@ void LedgerHistory::validatedLedger (
|
||||
m_consensus_validated.peekMutex());
|
||||
|
||||
auto entry = std::make_shared<cv_entry>();
|
||||
m_consensus_validated.canonicalize(index, entry, false);
|
||||
m_consensus_validated.canonicalize_replace_client(index, entry);
|
||||
|
||||
if (entry->built && ! entry->validated)
|
||||
{
|
||||
|
||||
@@ -1976,7 +1976,7 @@ LedgerMaster::addFetchPack (
|
||||
uint256 const& hash,
|
||||
std::shared_ptr< Blob >& data)
|
||||
{
|
||||
fetch_packs_.canonicalize (hash, data);
|
||||
fetch_packs_.canonicalize_replace_client(hash, data);
|
||||
}
|
||||
|
||||
boost::optional<Blob>
|
||||
|
||||
@@ -62,7 +62,7 @@ TransactionMaster::fetch (uint256 const& txnID, error_code_i& ec)
|
||||
if (!txn)
|
||||
return txn;
|
||||
|
||||
mCache.canonicalize (txnID, txn);
|
||||
mCache.canonicalize_replace_client(txnID, txn);
|
||||
|
||||
return txn;
|
||||
}
|
||||
@@ -82,7 +82,7 @@ TransactionMaster::fetch (uint256 const& txnID, ClosedInterval<uint32_t> const&
|
||||
txnID, mApp, range, ec);
|
||||
|
||||
if (v.which () == 0 && boost::get<pointer> (v))
|
||||
mCache.canonicalize (txnID, boost::get<pointer> (v));
|
||||
mCache.canonicalize_replace_client(txnID, boost::get<pointer> (v));
|
||||
|
||||
return v;
|
||||
}
|
||||
@@ -127,7 +127,7 @@ TransactionMaster::canonicalize(std::shared_ptr<Transaction>* pTransaction)
|
||||
{
|
||||
auto txn = *pTransaction;
|
||||
// VFALCO NOTE canonicalize can change the value of txn!
|
||||
mCache.canonicalize(tid, txn);
|
||||
mCache.canonicalize_replace_client(tid, txn);
|
||||
*pTransaction = txn;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2665,7 +2665,7 @@ void NetworkOPsImp::pubLedger (
|
||||
{
|
||||
alpAccepted = std::make_shared<AcceptedLedger> (
|
||||
lpAccepted, app_.accountIDCache(), app_.logs());
|
||||
app_.getAcceptedLedgerCache().canonicalize (
|
||||
app_.getAcceptedLedgerCache().canonicalize_replace_client(
|
||||
lpAccepted->info().hash, alpAccepted);
|
||||
}
|
||||
|
||||
|
||||
@@ -288,7 +288,14 @@ public:
|
||||
|
||||
@return `true` If the key already existed.
|
||||
*/
|
||||
bool canonicalize (const key_type& key, std::shared_ptr<T>& data, bool replace = false)
|
||||
private:
|
||||
|
||||
template <bool replace>
|
||||
bool
|
||||
canonicalize(
|
||||
const key_type& key,
|
||||
std::conditional_t<replace, std::shared_ptr<T> const, std::shared_ptr<T>>& data
|
||||
)
|
||||
{
|
||||
// Return canonical value, store if needed, refresh in cache
|
||||
// Return values: true=we had the data already
|
||||
@@ -310,7 +317,7 @@ public:
|
||||
|
||||
if (entry.isCached ())
|
||||
{
|
||||
if (replace)
|
||||
if constexpr (replace)
|
||||
{
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
@@ -327,7 +334,7 @@ public:
|
||||
|
||||
if (cachedData)
|
||||
{
|
||||
if (replace)
|
||||
if constexpr (replace)
|
||||
{
|
||||
entry.ptr = data;
|
||||
entry.weak_ptr = data;
|
||||
@@ -349,6 +356,17 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
public:
|
||||
bool canonicalize_replace_cache(const key_type& key, std::shared_ptr<T> const& data)
|
||||
{
|
||||
return canonicalize<true>(key, data);
|
||||
}
|
||||
|
||||
bool canonicalize_replace_client(const key_type& key, std::shared_ptr<T>& data)
|
||||
{
|
||||
return canonicalize<false>(key, data);
|
||||
}
|
||||
|
||||
std::shared_ptr<T> fetch (const key_type& key)
|
||||
{
|
||||
// fetch us a shared pointer to the stored data object
|
||||
@@ -393,7 +411,7 @@ public:
|
||||
{
|
||||
mapped_ptr p (std::make_shared <T> (
|
||||
std::cref (value)));
|
||||
return canonicalize (key, p);
|
||||
return canonicalize_replace_client(key, p);
|
||||
}
|
||||
|
||||
// VFALCO NOTE It looks like this returns a copy of the data in
|
||||
|
||||
@@ -216,7 +216,7 @@ Database::doFetch(uint256 const& hash, std::uint32_t seq,
|
||||
else
|
||||
{
|
||||
// Ensure all threads get the same object
|
||||
pCache.canonicalize(hash, nObj);
|
||||
pCache.canonicalize_replace_client(hash, nObj);
|
||||
|
||||
// Since this was a 'hard' fetch, we will log it.
|
||||
JLOG(j_.trace()) <<
|
||||
@@ -265,7 +265,7 @@ Database::storeLedger(
|
||||
{
|
||||
for (auto& nObj : batch)
|
||||
{
|
||||
dstPCache->canonicalize(nObj->getHash(), nObj, true);
|
||||
dstPCache->canonicalize_replace_cache(nObj->getHash(), nObj);
|
||||
dstNCache->erase(nObj->getHash());
|
||||
storeStats(nObj->getData().size());
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ DatabaseNodeImp::store(NodeObjectType type, Blob&& data,
|
||||
uint256 const& hash, std::uint32_t seq)
|
||||
{
|
||||
auto nObj = NodeObject::createObject(type, std::move(data), hash);
|
||||
pCache_->canonicalize(hash, nObj, true);
|
||||
pCache_->canonicalize_replace_cache(hash, nObj);
|
||||
backend_->store(nObj);
|
||||
nCache_->erase(hash);
|
||||
storeStats(nObj->getData().size());
|
||||
|
||||
@@ -64,7 +64,7 @@ DatabaseRotatingImp::store(NodeObjectType type, Blob&& data,
|
||||
uint256 const& hash, std::uint32_t seq)
|
||||
{
|
||||
auto nObj = NodeObject::createObject(type, std::move(data), hash);
|
||||
pCache_->canonicalize(hash, nObj, true);
|
||||
pCache_->canonicalize_replace_cache(hash, nObj);
|
||||
getWritableBackend()->store(nObj);
|
||||
nCache_->erase(hash);
|
||||
storeStats(nObj->getData().size());
|
||||
|
||||
@@ -892,7 +892,7 @@ DatabaseShardImp::store(
|
||||
auto [backend, pCache, nCache] = shard->getBackendAll();
|
||||
auto nObj {NodeObject::createObject(type, std::move(data), hash)};
|
||||
|
||||
pCache->canonicalize(hash, nObj, true);
|
||||
pCache->canonicalize_replace_cache(hash, nObj);
|
||||
backend->store(nObj);
|
||||
nCache->erase(hash);
|
||||
|
||||
|
||||
@@ -1072,7 +1072,7 @@ SHAMap::canonicalize(SHAMapHash const& hash, std::shared_ptr<SHAMapAbstractNode>
|
||||
assert (node->getSeq() == 0);
|
||||
assert (node->getNodeHash() == hash);
|
||||
|
||||
f_.treecache().canonicalize (hash.as_uint256(), node);
|
||||
f_.treecache().canonicalize_replace_client(hash.as_uint256(), node);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -103,7 +103,7 @@ public:
|
||||
{
|
||||
Cache::mapped_ptr const p1 (c.fetch (3));
|
||||
Cache::mapped_ptr p2 (std::make_shared <Value> ("three"));
|
||||
c.canonicalize (3, p2);
|
||||
c.canonicalize_replace_client(3, p2);
|
||||
BEAST_EXPECT(p1.get() == p2.get());
|
||||
}
|
||||
++clock;
|
||||
@@ -134,7 +134,7 @@ public:
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
// Canonicalize a new object with the same key
|
||||
Cache::mapped_ptr p2 (std::make_shared <std::string> ("four"));
|
||||
BEAST_EXPECT(c.canonicalize (4, p2, false));
|
||||
BEAST_EXPECT(c.canonicalize_replace_client(4, p2));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
// Make sure we get the original object
|
||||
|
||||
Reference in New Issue
Block a user