Add a flag to defer cache reads while the cache is being updated (#97)

This commit is contained in:
CJ Cobb
2022-02-18 20:44:07 -05:00
committed by GitHub
parent db601b587c
commit 199144d092
2 changed files with 32 additions and 19 deletions

View File

@@ -6,6 +6,8 @@ SimpleCache::update(
std::vector<LedgerObject> const& objs,
uint32_t seq,
bool isBackground)
{
deferReads_ = true;
{
std::unique_lock lck{mtx_};
if (seq > latestSeq_)
@@ -33,9 +35,13 @@ SimpleCache::update(
}
}
}
deferReads_ = false;
}
std::optional<LedgerObject>
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
{
if (deferReads_)
return {};
if (!full_)
return {};
std::shared_lock{mtx_};
@@ -49,6 +55,8 @@ SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
std::optional<LedgerObject>
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
{
if (deferReads_)
return {};
if (!full_)
return {};
std::shared_lock lck{mtx_};
@@ -63,6 +71,8 @@ SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
std::optional<Blob>
SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
{
if (deferReads_)
return {};
if (seq > latestSeq_)
return {};
std::shared_lock lck{mtx_};

View File

@@ -19,6 +19,9 @@ class SimpleCache
};
std::map<ripple::uint256, CacheEntry> map_;
mutable std::shared_mutex mtx_;
// flag set in update to prevent reads from starving the update, and to
// prevent reads from piling up behind the update
std::atomic_bool deferReads_ = false;
uint32_t latestSeq_ = 0;
std::atomic_bool full_ = false;
// temporary set to prevent background thread from writing already deleted