Refactor NetworkOPs:

* Reduce public API
* Use LedgerMaster interface from RPC
* Remove fetch pack management to LedgerMaster
This commit is contained in:
Nik Bougalis
2015-06-30 18:27:05 -07:00
parent 163e8eb8fc
commit 761f218c0a
46 changed files with 552 additions and 609 deletions

View File

@@ -19,6 +19,7 @@
#include <BeastConfig.h>
#include <ripple/app/ledger/AccountStateSF.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/main/Application.h>
#include <ripple/app/misc/NetworkOPs.h>
#include <ripple/app/tx/TransactionMaster.h>
@@ -27,10 +28,6 @@
namespace ripple {
AccountStateSF::AccountStateSF()
{
}
void AccountStateSF::gotNode (bool fromFilter,
SHAMapNodeID const& id,
uint256 const& nodeHash,
@@ -48,7 +45,7 @@ bool AccountStateSF::haveNode (SHAMapNodeID const& id,
uint256 const& nodeHash,
Blob& nodeData)
{
return getApp().getOPs ().getFetchPack (nodeHash, nodeData);
return getApp().getLedgerMaster ().getFetchPack (nodeHash, nodeData);
}
} // ripple

View File

@@ -26,10 +26,11 @@ namespace ripple {
// This class is only needed on add functions
// sync filter for account state nodes during ledger sync
class AccountStateSF : public SHAMapSyncFilter
class AccountStateSF
: public SHAMapSyncFilter
{
public:
AccountStateSF();
AccountStateSF() = default;
// Note that the nodeData is overwritten by this call
void gotNode (bool fromFilter,

View File

@@ -802,7 +802,7 @@ void finishLoadByIndexOrHash(Ledger::pointer& ledger)
ledger->setClosed ();
ledger->setImmutable ();
if (getApp ().getOPs ().haveLedger (ledger->getLedgerSeq ()))
if (getApp ().getLedgerMaster ().haveLedger (ledger->getLedgerSeq ()))
ledger->setAccepted ();
WriteLog (lsTRACE, Ledger)

View File

@@ -22,16 +22,22 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/ledger/LedgerHolder.h>
#include <ripple/basics/chrono.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/RippleLedgerHash.h>
#include <ripple/protocol/STValidation.h>
#include <ripple/core/Config.h>
#include <beast/insight/Collector.h>
#include <beast/threads/Stoppable.h>
#include <beast/threads/UnlockGuard.h>
#include <beast/utility/PropertyStream.h>
#include "ripple.pb.h"
namespace ripple {
class Peer;
// Tracks the current ledger and any ledgers in the process of closing
// Tracks ledger history
// Tracks held transactions
@@ -53,7 +59,7 @@ public:
using ScopedLockType = std::unique_lock <LockType>;
using ScopedUnlockType = beast::GenericScopedUnlock <LockType>;
virtual ~LedgerMaster () = 0;
virtual ~LedgerMaster () = default;
virtual LedgerIndex getCurrentLedgerIndex () = 0;
virtual LedgerIndex getValidLedgerIndex () = 0;
@@ -128,7 +134,6 @@ public:
virtual void tune (int size, int age) = 0;
virtual void sweep () = 0;
virtual float getCacheHitRate () = 0;
virtual void addValidateCallback (callback& c) = 0;
virtual void checkAccept (Ledger::ref ledger) = 0;
virtual void checkAccept (uint256 const& hash, std::uint32_t seq) = 0;
@@ -147,18 +152,44 @@ public:
virtual beast::PropertyStream::Source& getPropertySource () = 0;
static bool shouldAcquire (std::uint32_t currentLedgerID,
std::uint32_t ledgerHistory, std::uint32_t ledgerHistoryIndex,
std::uint32_t targetLedger);
virtual void clearPriorLedgers (LedgerIndex seq) = 0;
virtual void clearLedgerCachePrior (LedgerIndex seq) = 0;
// Fetch Packs
virtual
void gotFetchPack (
bool progress,
std::uint32_t seq) = 0;
virtual
void addFetchPack (
uint256 const& hash,
std::shared_ptr<Blob>& data) = 0;
virtual
bool getFetchPack (
uint256 const& hash,
Blob& data) = 0;
virtual
void makeFetchPack (
Job&, std::weak_ptr<Peer> const& wPeer,
std::shared_ptr<protocol::TMGetObjectByHash> const& request,
uint256 haveLedgerHash,
std::uint32_t uUptime) = 0;
virtual
std::size_t getFetchPackCacheSize () const = 0;
};
std::unique_ptr <LedgerMaster>
make_LedgerMaster (Config const& config, beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector, beast::Journal journal);
make_LedgerMaster (
Config const& config,
Stopwatch& stopwatch,
beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector,
beast::Journal journal);
} // ripple

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include <BeastConfig.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/TransactionStateSF.h>
#include <ripple/app/main/Application.h>
#include <ripple/app/misc/NetworkOPs.h>
@@ -27,10 +28,6 @@
namespace ripple {
TransactionStateSF::TransactionStateSF()
{
}
// VFALCO This might be better as Blob&&
void TransactionStateSF::gotNode (bool fromFilter,
SHAMapNodeID const& id,
@@ -52,7 +49,7 @@ bool TransactionStateSF::haveNode (SHAMapNodeID const& id,
uint256 const& nodeHash,
Blob& nodeData)
{
return getApp().getOPs ().getFetchPack (nodeHash, nodeData);
return getApp().getLedgerMaster ().getFetchPack (nodeHash, nodeData);
}
} // ripple

View File

@@ -27,10 +27,11 @@ namespace ripple {
// This class is only needed on add functions
// sync filter for transactions tree during ledger sync
class TransactionStateSF : public SHAMapSyncFilter
class TransactionStateSF
: public SHAMapSyncFilter
{
public:
TransactionStateSF();
TransactionStateSF() = default;
// Note that the nodeData is overwritten by this call
void gotNode (bool fromFilter,

View File

@@ -156,7 +156,7 @@ bool InboundLedger::tryLocal ()
{
Blob data;
if (!getApp().getOPs ().getFetchPack (mHash, data))
if (!getApp().getLedgerMaster ().getFetchPack (mHash, data))
return false;
if (m_journal.trace) m_journal.trace <<

View File

@@ -19,6 +19,7 @@
#include <BeastConfig.h>
#include <ripple/app/ledger/InboundLedgers.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/main/Application.h>
#include <ripple/app/misc/NetworkOPs.h>
#include <ripple/basics/DecayingSample.h>
@@ -253,7 +254,7 @@ public:
auto blob = std::make_shared<Blob> (s.begin(), s.end());
getApp().getOPs().addFetchPack (newNode->getNodeHash(), blob);
getApp().getLedgerMaster().addFetchPack (newNode->getNodeHash(), blob);
}
}
catch (...)

View File

@@ -36,9 +36,14 @@
#include <ripple/app/paths/PathRequests.h>
#include <ripple/basics/Log.h>
#include <ripple/basics/RangeSet.h>
#include <ripple/basics/TaggedCache.h>
#include <ripple/basics/UptimeTimer.h>
#include <ripple/core/LoadFeeTrack.h>
#include <ripple/overlay/Overlay.h>
#include <ripple/overlay/Peer.h>
#include <ripple/protocol/digest.h>
#include <ripple/protocol/HashPrefix.h>
#include <ripple/resource/Fees.h>
#include <ripple/validators/Manager.h>
#include <algorithm>
#include <cassert>
@@ -55,8 +60,6 @@ class LedgerMasterImp
: public LedgerMaster
{
public:
using callback = std::function <void (Ledger::ref)>;
using LockType = RippleRecursiveMutex;
using ScopedLockType = std::lock_guard <LockType>;
using ScopedUnlockType = beast::GenericScopedUnlock <LockType>;
@@ -84,7 +87,6 @@ public:
int mMinValidations; // The minimum validations to publish a ledger
uint256 mLastValidateHash;
std::uint32_t mLastValidateSeq;
std::list<callback> mOnValidate; // Called when a ledger has enough validations
bool mAdvanceThread; // Publish thread is running
bool mAdvanceWork; // Publish thread has work to do
@@ -110,9 +112,14 @@ public:
int const ledger_fetch_size_;
TaggedCache<uint256, Blob> fetch_packs_;
std::uint32_t fetch_seq_;
//--------------------------------------------------------------------------
LedgerMasterImp (Config const& config, Stoppable& parent,
LedgerMasterImp (Config const& config, Stopwatch& stopwatch,
Stoppable& parent,
beast::insight::Collector::ptr const& collector, beast::Journal journal)
: LedgerMaster (parent)
, m_journal (journal)
@@ -136,6 +143,9 @@ public:
, fetch_depth_ (getApp ().getSHAMapStore ().clampFetchDepth (config.FETCH_DEPTH))
, ledger_history_ (config.LEDGER_HISTORY)
, ledger_fetch_size_ (config.getSize (siLedgerFetch))
, fetch_packs_ ("FetchPack", 65536, 45, stopwatch,
deprecatedLogs().journal("TaggedCache"))
, fetch_seq_ (0)
{
}
@@ -377,7 +387,7 @@ public:
#endif
{
OpenView view(&*ledger);
OpenView view(&*ledger);
for (auto const& it : mHeldTransactions)
{
ApplyFlags tepFlags = tapNONE;
@@ -975,145 +985,20 @@ public:
return *ret;
}
// Try to publish ledgers, acquire missing ledgers
void doAdvance ()
bool shouldFetchPack (std::uint32_t seq) const
{
do
{
mAdvanceWork = false; // If there's work to do, we'll make progress
bool progress = false;
auto const pubLedgers = findNewLedgersToPublish ();
if (pubLedgers.empty())
{
if (!standalone_ && !getApp().getFeeTrack().isLoadedLocal() &&
(getApp().getJobQueue().getJobCount(jtPUBOLDLEDGER) < 10) &&
(mValidLedgerSeq == mPubLedgerSeq) &&
(getValidatedLedgerAge() < MAX_LEDGER_AGE_ACQUIRE))
{ // We are in sync, so can acquire
std::uint32_t missing;
{
ScopedLockType sl (mCompleteLock);
missing = mCompleteLedgers.prevMissing(mPubLedger->getLedgerSeq());
}
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance discovered missing " << missing;
if ((missing != RangeSet::absent) && (missing > 0) &&
shouldAcquire (mValidLedgerSeq, ledger_history_,
getApp ().getSHAMapStore ().getCanDelete (), missing) &&
((mFillInProgress == 0) || (missing > mFillInProgress)))
{
WriteLog (lsTRACE, LedgerMaster) << "advanceThread should acquire";
{
ScopedUnlockType sl(m_mutex);
uint256 hash = getLedgerHashForHistory (missing);
if (hash.isNonZero())
{
Ledger::pointer ledger = getLedgerByHash (hash);
if (!ledger)
{
if (!getApp().getInboundLedgers().isFailure (hash))
{
ledger =
getApp().getInboundLedgers().acquire(hash,
missing,
InboundLedger::fcHISTORY);
if (! ledger && (missing > 32600) && getApp().getOPs().shouldFetchPack (missing))
{
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance want fetch pack " << missing;
getFetchPack(hash, missing);
}
else
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance no fetch pack for " << missing;
}
else
WriteLog (lsDEBUG, LedgerMaster) << "tryAdvance found failed acquire";
}
if (ledger)
{
assert(ledger->getLedgerSeq() == missing);
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance acquired " << ledger->getLedgerSeq();
setFullLedger(ledger, false, false);
mHistLedger = ledger;
if ((mFillInProgress == 0) && (Ledger::getHashByIndex(ledger->getLedgerSeq() - 1) == ledger->getParentHash()))
{
// Previous ledger is in DB
ScopedLockType lock (m_mutex);
mFillInProgress = ledger->getLedgerSeq();
getApp().getJobQueue().addJob(jtADVANCE, "tryFill", std::bind (
&LedgerMasterImp::tryFill, this,
std::placeholders::_1, ledger));
}
progress = true;
}
else
{
try
{
for (int i = 0; i < ledger_fetch_size_; ++i)
{
std::uint32_t seq = missing - i;
uint256 hash = getLedgerHashForHistory (seq);
if (hash.isNonZero())
getApp().getInboundLedgers().acquire(hash,
seq, InboundLedger::fcHISTORY);
}
}
catch (...)
{
WriteLog (lsWARNING, LedgerMaster) << "Threw while prefetching";
}
}
}
else
{
WriteLog (lsFATAL, LedgerMaster) << "Unable to find ledger following prevMissing " << missing;
WriteLog (lsFATAL, LedgerMaster) << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
WriteLog (lsFATAL, LedgerMaster) << "Ledgers: " << getApp().getLedgerMaster().getCompleteLedgers();
clearLedger (missing + 1);
progress = true;
}
}
if (mValidLedgerSeq != mPubLedgerSeq)
{
WriteLog (lsDEBUG, LedgerMaster) << "tryAdvance found last valid changed";
progress = true;
}
}
}
else
{
mHistLedger.reset();
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance not fetching history";
}
}
else
{
WriteLog (lsTRACE, LedgerMaster) <<
"tryAdvance found " << pubLedgers.size() <<
" ledgers to publish";
for(auto ledger : pubLedgers)
{
{
ScopedUnlockType sul (m_mutex);
WriteLog(lsDEBUG, LedgerMaster) <<
"tryAdvance publishing seq " << ledger->getLedgerSeq();
setFullLedger(ledger, true, true);
getApp().getOPs().pubLedger(ledger);
}
setPubLedger(ledger);
progress = true;
}
getApp().getOPs().clearNeedNetworkLedger();
newPFWork ("pf:newLedger");
}
if (progress)
mAdvanceWork = true;
} while (mAdvanceWork);
return (fetch_seq_ != seq);
}
bool shouldAcquire (
std::uint32_t const currentLedger,
std::uint32_t const ledgerHistory,
std::uint32_t const ledgerHistoryIndex,
std::uint32_t const candidateLedger) const;
// Try to publish ledgers, acquire missing ledgers
void doAdvance ();
std::vector<Ledger::pointer> findNewLedgersToPublish ()
{
std::vector<Ledger::pointer> ret;
@@ -1555,6 +1440,7 @@ public:
void sweep ()
{
mLedgerHistory.sweep ();
fetch_packs_.sweep ();
}
float getCacheHitRate ()
@@ -1562,11 +1448,6 @@ public:
return mLedgerHistory.getCacheHitRate ();
}
void addValidateCallback (callback& c)
{
mOnValidate.push_back (c);
}
beast::PropertyStream::Source& getPropertySource ()
{
return *mLedgerCleaner;
@@ -1586,22 +1467,34 @@ public:
{
mLedgerHistory.clearLedgerCachePrior (seq);
}
// Fetch packs:
void gotFetchPack (
bool progress,
std::uint32_t seq) override;
void addFetchPack (
uint256 const& hash,
std::shared_ptr<Blob>& data) override;
bool getFetchPack (
uint256 const& hash,
Blob& data) override;
void makeFetchPack (
Job&, std::weak_ptr<Peer> const& wPeer,
std::shared_ptr<protocol::TMGetObjectByHash> const& request,
uint256 haveLedgerHash,
std::uint32_t uUptime) override;
std::size_t getFetchPackCacheSize () const;
};
//------------------------------------------------------------------------------
LedgerMaster::LedgerMaster (Stoppable& parent)
: Stoppable ("LedgerMaster", parent)
{
}
LedgerMaster::~LedgerMaster ()
{
}
bool LedgerMaster::shouldAcquire (std::uint32_t const currentLedger,
std::uint32_t const ledgerHistory, std::uint32_t const ledgerHistoryIndex,
std::uint32_t const candidateLedger)
bool LedgerMasterImp::shouldAcquire (
std::uint32_t const currentLedger,
std::uint32_t const ledgerHistory,
std::uint32_t const ledgerHistoryIndex,
std::uint32_t const candidateLedger) const
{
bool ret (candidateLedger >= currentLedger ||
candidateLedger > ledgerHistoryIndex ||
@@ -1615,11 +1508,342 @@ bool LedgerMaster::shouldAcquire (std::uint32_t const currentLedger,
return ret;
}
std::unique_ptr <LedgerMaster>
make_LedgerMaster (Config const& config, beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector, beast::Journal journal)
// Try to publish ledgers, acquire missing ledgers
void LedgerMasterImp::doAdvance ()
{
return std::make_unique <LedgerMasterImp> (config, parent, collector, journal);
// TODO NIKB: simplify and unindent this a bit!
do
{
mAdvanceWork = false; // If there's work to do, we'll make progress
bool progress = false;
auto const pubLedgers = findNewLedgersToPublish ();
if (pubLedgers.empty())
{
if (!standalone_ && !getApp().getFeeTrack().isLoadedLocal() &&
(getApp().getJobQueue().getJobCount(jtPUBOLDLEDGER) < 10) &&
(mValidLedgerSeq == mPubLedgerSeq) &&
(getValidatedLedgerAge() < MAX_LEDGER_AGE_ACQUIRE))
{ // We are in sync, so can acquire
std::uint32_t missing;
{
ScopedLockType sl (mCompleteLock);
missing = mCompleteLedgers.prevMissing(mPubLedger->getLedgerSeq());
}
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance discovered missing " << missing;
if ((missing != RangeSet::absent) && (missing > 0) &&
shouldAcquire (mValidLedgerSeq, ledger_history_,
getApp ().getSHAMapStore ().getCanDelete (), missing) &&
((mFillInProgress == 0) || (missing > mFillInProgress)))
{
WriteLog (lsTRACE, LedgerMaster) << "advanceThread should acquire";
{
ScopedUnlockType sl(m_mutex);
uint256 hash = getLedgerHashForHistory (missing);
if (hash.isNonZero())
{
Ledger::pointer ledger = getLedgerByHash (hash);
if (!ledger)
{
if (!getApp().getInboundLedgers().isFailure (hash))
{
ledger =
getApp().getInboundLedgers().acquire(hash,
missing,
InboundLedger::fcHISTORY);
if (! ledger && (missing > 32600) && shouldFetchPack (missing))
{
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance want fetch pack " << missing;
fetch_seq_ = missing;
getFetchPack(hash, missing);
}
else
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance no fetch pack for " << missing;
}
else
WriteLog (lsDEBUG, LedgerMaster) << "tryAdvance found failed acquire";
}
if (ledger)
{
assert(ledger->getLedgerSeq() == missing);
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance acquired " << ledger->getLedgerSeq();
setFullLedger(ledger, false, false);
mHistLedger = ledger;
if ((mFillInProgress == 0) && (Ledger::getHashByIndex(ledger->getLedgerSeq() - 1) == ledger->getParentHash()))
{
// Previous ledger is in DB
ScopedLockType lock (m_mutex);
mFillInProgress = ledger->getLedgerSeq();
getApp().getJobQueue().addJob(jtADVANCE, "tryFill", std::bind (
&LedgerMasterImp::tryFill, this,
std::placeholders::_1, ledger));
}
progress = true;
}
else
{
try
{
for (int i = 0; i < ledger_fetch_size_; ++i)
{
std::uint32_t seq = missing - i;
uint256 hash = getLedgerHashForHistory (seq);
if (hash.isNonZero())
getApp().getInboundLedgers().acquire(hash,
seq, InboundLedger::fcHISTORY);
}
}
catch (...)
{
WriteLog (lsWARNING, LedgerMaster) << "Threw while prefetching";
}
}
}
else
{
WriteLog (lsFATAL, LedgerMaster) << "Unable to find ledger following prevMissing " << missing;
WriteLog (lsFATAL, LedgerMaster) << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
WriteLog (lsFATAL, LedgerMaster) << "Ledgers: " << getApp().getLedgerMaster().getCompleteLedgers();
clearLedger (missing + 1);
progress = true;
}
}
if (mValidLedgerSeq != mPubLedgerSeq)
{
WriteLog (lsDEBUG, LedgerMaster) << "tryAdvance found last valid changed";
progress = true;
}
}
}
else
{
mHistLedger.reset();
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance not fetching history";
}
}
else
{
WriteLog (lsTRACE, LedgerMaster) <<
"tryAdvance found " << pubLedgers.size() <<
" ledgers to publish";
for(auto ledger : pubLedgers)
{
{
ScopedUnlockType sul (m_mutex);
WriteLog(lsDEBUG, LedgerMaster) <<
"tryAdvance publishing seq " << ledger->getLedgerSeq();
setFullLedger(ledger, true, true);
getApp().getOPs().pubLedger(ledger);
}
setPubLedger(ledger);
progress = true;
}
getApp().getOPs().clearNeedNetworkLedger();
newPFWork ("pf:newLedger");
}
if (progress)
mAdvanceWork = true;
} while (mAdvanceWork);
}
void LedgerMasterImp::addFetchPack (
uint256 const& hash,
std::shared_ptr< Blob >& data)
{
fetch_packs_.canonicalize (hash, data);
}
bool LedgerMasterImp::getFetchPack (
uint256 const& hash,
Blob& data)
{
if (!fetch_packs_.retrieve (hash, data))
return false;
fetch_packs_.del (hash, false);
return hash == sha512Half(makeSlice(data));
}
void LedgerMasterImp::gotFetchPack (
bool progress,
std::uint32_t seq)
{
// FIXME: Calling this function more than once will result in
// InboundLedgers::gotFetchPack being called more than once
// which is expensive. A flag should track whether we've already dispatched
getApp().getJobQueue().addJob (
jtLEDGER_DATA, "gotFetchPack",
std::bind (&InboundLedgers::gotFetchPack,
&getApp().getInboundLedgers (), std::placeholders::_1));
}
void LedgerMasterImp::makeFetchPack (
Job&, std::weak_ptr<Peer> const& wPeer,
std::shared_ptr<protocol::TMGetObjectByHash> const& request,
uint256 haveLedgerHash,
std::uint32_t uUptime)
{
if (UptimeTimer::getInstance ().getElapsedSeconds () > (uUptime + 1))
{
m_journal.info << "Fetch pack request got stale";
return;
}
if (getApp().getFeeTrack ().isLoadedLocal () ||
(getValidatedLedgerAge() > 40))
{
m_journal.info << "Too busy to make fetch pack";
return;
}
Peer::ptr peer = wPeer.lock ();
if (!peer)
return;
auto haveLedger = getLedgerByHash (haveLedgerHash);
if (!haveLedger)
{
m_journal.info
<< "Peer requests fetch pack for ledger we don't have: "
<< haveLedger;
peer->charge (Resource::feeRequestNoReply);
return;
}
if (!haveLedger->isClosed ())
{
m_journal.warning
<< "Peer requests fetch pack from open ledger: "
<< haveLedger;
peer->charge (Resource::feeInvalidRequest);
return;
}
if (haveLedger->getLedgerSeq() < getEarliestFetch())
{
m_journal.debug << "Peer requests fetch pack that is too early";
peer->charge (Resource::feeInvalidRequest);
return;
}
auto wantLedger = getLedgerByHash (haveLedger->getParentHash ());
if (!wantLedger)
{
m_journal.info
<< "Peer requests fetch pack for ledger whose predecessor we "
<< "don't have: " << haveLedger;
peer->charge (Resource::feeRequestNoReply);
return;
}
auto fpAppender = [](
protocol::TMGetObjectByHash* reply,
std::uint32_t ledgerSeq,
uint256 const& hash,
const Blob& blob)
{
protocol::TMIndexedObject& newObj = * (reply->add_objects ());
newObj.set_ledgerseq (ledgerSeq);
newObj.set_hash (hash.begin (), 256 / 8);
newObj.set_data (&blob[0], blob.size ());
};
try
{
protocol::TMGetObjectByHash reply;
reply.set_query (false);
if (request->has_seq ())
reply.set_seq (request->seq ());
reply.set_ledgerhash (request->ledgerhash ());
reply.set_type (protocol::TMGetObjectByHash::otFETCH_PACK);
// Building a fetch pack:
// 1. Add the header for the requested ledger.
// 2. Add the nodes for the AccountStateMap of that ledger.
// 3. If there are transactions, add the nodes for the
// transactions of the ledger.
// 4. If the FetchPack now contains greater than or equal to
// 256 entries then stop.
// 5. If not very much time has elapsed, then loop back and repeat
// the same process adding the previous ledger to the FetchPack.
do
{
std::uint32_t lSeq = wantLedger->getLedgerSeq ();
protocol::TMIndexedObject& newObj = *reply.add_objects ();
newObj.set_hash (wantLedger->getHash ().begin (), 256 / 8);
Serializer s (256);
s.add32 (HashPrefix::ledgerMaster);
wantLedger->addRaw (s);
newObj.set_data (s.getDataPtr (), s.getLength ());
newObj.set_ledgerseq (lSeq);
wantLedger->stateMap().getFetchPack
(&haveLedger->stateMap(), true, 16384,
std::bind (fpAppender, &reply, lSeq, std::placeholders::_1,
std::placeholders::_2));
if (wantLedger->getTransHash ().isNonZero ())
wantLedger->txMap().getFetchPack (
nullptr, true, 512,
std::bind (fpAppender, &reply, lSeq, std::placeholders::_1,
std::placeholders::_2));
if (reply.objects ().size () >= 512)
break;
// move may save a ref/unref
haveLedger = std::move (wantLedger);
wantLedger = getLedgerByHash (haveLedger->getParentHash ());
}
while (wantLedger &&
UptimeTimer::getInstance ().getElapsedSeconds () <= uUptime + 1);
m_journal.info
<< "Built fetch pack with " << reply.objects ().size () << " nodes";
auto msg = std::make_shared<Message> (reply, protocol::mtGET_OBJECTS);
peer->send (msg);
}
catch (...)
{
m_journal.warning << "Exception building fetch pach";
}
}
std::size_t LedgerMasterImp::getFetchPackCacheSize () const
{
return fetch_packs_.getCacheSize ();
}
//------------------------------------------------------------------------------
LedgerMaster::LedgerMaster (Stoppable& parent)
: Stoppable ("LedgerMaster", parent)
{
}
std::unique_ptr <LedgerMaster>
make_LedgerMaster (
Config const& config,
Stopwatch& stopwatch,
beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector,
beast::Journal journal)
{
return std::make_unique <LedgerMasterImp> (
config, stopwatch, parent, collector, journal);
}
} // ripple

View File

@@ -170,9 +170,12 @@ public:
}
void
missing_node (std::uint32_t refNum) override
missing_node (std::uint32_t seq) override
{
getApp().getOPs().missingNodeInLedger (refNum);
uint256 const hash = getApp().getLedgerMaster ().getHashBySeq (seq);
if (hash.isZero())
getApp().getInboundLedgers ().acquire (
hash, seq, InboundLedger::fcGENERIC);
}
};
@@ -369,8 +372,9 @@ public:
, m_pathRequests (new PathRequests (
m_logs.journal("PathRequest"), m_collectorManager->collector ()))
, m_ledgerMaster (make_LedgerMaster (getConfig (), *m_jobQueue,
m_collectorManager->collector (), m_logs.journal("LedgerMaster")))
, m_ledgerMaster (make_LedgerMaster (getConfig (), stopwatch (),
*m_jobQueue, m_collectorManager->collector (),
m_logs.journal("LedgerMaster")))
// VFALCO NOTE must come before NetworkOPs to prevent a crash due
// to dependencies in the destructor.
@@ -1018,8 +1022,8 @@ public:
getInboundLedgers().sweep();
AcceptedLedger::sweep();
family().treecache().sweep();
getOPs().sweepFetchPack();
cachedSLEs_.expire();
// VFALCO NOTE does the call to sweep() happen on another thread?
m_sweepTimer.setExpiration (getConfig ().getSize (siSweepInterval));
}
@@ -1094,7 +1098,7 @@ ApplicationImp::getLastFullLedger()
ledger->setClosed ();
if (getApp().getOPs ().haveLedger (ledgerSeq))
if (getApp().getLedgerMaster ().haveLedger (ledgerSeq))
{
ledger->setAccepted ();
ledger->setValidated ();

View File

@@ -93,7 +93,8 @@ void startServer ()
Resource::Charge loadType = Resource::feeReferenceRPC;
RPC::Context context {
jvCommand, loadType, getApp().getOPs (), Role::ADMIN};
jvCommand, loadType, getApp().getOPs (),
getApp().getLedgerMaster(), Role::ADMIN};
Json::Value jvResult;
RPC::doCommand (context, jvResult);

View File

@@ -129,9 +129,6 @@ public:
, mConsensus (make_Consensus ())
, m_ledgerMaster (ledgerMaster)
, mCloseTimeOffset (0)
, mFetchPack ("FetchPack", 65536, 45, clock,
deprecatedLogs().journal("TaggedCache"))
, mFetchSeq (0)
, mLastLoadBase (256)
, mLastLoadFactor (256)
, m_job_queue (job_queue)
@@ -151,60 +148,18 @@ public:
private:
std::uint32_t getCloseTimeNC (int& offset) const;
bool isValidated (std::uint32_t seq, uint256 const& hash);
public:
void closeTimeOffset (int) override;
/** On return the offset param holds the System time offset in seconds.
*/
boost::posix_time::ptime getNetworkTimePT(int& offset) const;
std::uint32_t getLedgerID (uint256 const& hash) override;
std::uint32_t getCurrentLedgerID () override;
OperatingMode getOperatingMode () const override
{
return mMode;
}
std::string strOperatingMode () const override;
Ledger::pointer getClosedLedger () override
{
return m_ledgerMaster.getClosedLedger ();
}
Ledger::pointer getValidatedLedger () override
{
return m_ledgerMaster.getValidatedLedger ();
}
Ledger::pointer getCurrentLedger () override
{
return m_ledgerMaster.getCurrentLedger ();
}
Ledger::pointer getLedgerByHash (uint256 const& hash) override
{
return m_ledgerMaster.getLedgerByHash (hash);
}
Ledger::pointer getLedgerBySeq (const std::uint32_t seq) override;
void missingNodeInLedger (const std::uint32_t seq) override;
uint256 getClosedLedgerHash () override
{
return m_ledgerMaster.getClosedLedger ()->getHash ();
}
// Do we have this inclusive range of ledgers in our database
bool haveLedger (std::uint32_t seq) override;
std::uint32_t getValidatedSeq () override;
bool isValidated (Ledger::ref l) override
{
return isValidated (l->getLedgerSeq (), l->getHash ());
}
bool getValidatedRange (
std::uint32_t& minVal, std::uint32_t& maxVal) override
{
return m_ledgerMaster.getValidatedRange (minVal, maxVal);
}
//
// Transaction operations.
//
@@ -281,18 +236,6 @@ public:
protocol::TxSetStatus status);
void mapComplete (uint256 const& hash, std::shared_ptr<SHAMap> const& map) override;
void makeFetchPack (
Job&, std::weak_ptr<Peer> peer,
std::shared_ptr<protocol::TMGetObjectByHash> request,
uint256 haveLedger, std::uint32_t uUptime) override;
bool shouldFetchPack (std::uint32_t seq) override;
void gotFetchPack (bool progress, std::uint32_t seq) override;
void addFetchPack (
uint256 const& hash, std::shared_ptr< Blob >& data) override;
bool getFetchPack (uint256 const& hash, Blob& data) override;
int getFetchSize () override;
void sweepFetchPack () override;
// Network state machine.
@@ -524,9 +467,6 @@ private:
SubMapType mSubTransactions; // All accepted transactions.
SubMapType mSubRTTransactions; // All proposed and accepted transactions.
TaggedCache<uint256, Blob> mFetchPack;
std::uint32_t mFetchSeq;
std::uint32_t mLastLoadBase;
std::uint32_t mLastLoadFactor;
@@ -755,44 +695,6 @@ void NetworkOPsImp::closeTimeOffset (int offset)
}
}
std::uint32_t NetworkOPsImp::getLedgerID (uint256 const& hash)
{
Ledger::pointer lrLedger = m_ledgerMaster.getLedgerByHash (hash);
return lrLedger ? lrLedger->getLedgerSeq () : 0;
}
Ledger::pointer NetworkOPsImp::getLedgerBySeq (const std::uint32_t seq)
{
return m_ledgerMaster.getLedgerBySeq (seq);
}
std::uint32_t NetworkOPsImp::getCurrentLedgerID ()
{
return m_ledgerMaster.getCurrentLedger ()->getLedgerSeq ();
}
bool NetworkOPsImp::haveLedger (std::uint32_t seq)
{
return m_ledgerMaster.haveLedger (seq);
}
std::uint32_t NetworkOPsImp::getValidatedSeq ()
{
return m_ledgerMaster.getValidLedgerIndex ();
}
bool NetworkOPsImp::isValidated (std::uint32_t seq, uint256 const& hash)
{
if (!haveLedger (seq))
return false;
if (seq > m_ledgerMaster.getValidatedLedger ()->getLedgerSeq ())
return false;
return m_ledgerMaster.getHashBySeq (seq) == hash;
}
void NetworkOPsImp::submitTransaction (Job&, STTx::pointer iTrans)
{
if (isNeedNetworkLedger ())
@@ -1853,7 +1755,7 @@ NetworkOPs::AccountTxs NetworkOPsImp::getAccountTxs (
ledgerSeq.value_or (0));
m_journal.warning << "Recovering ledger " << seq
<< ", txn " << txn->getID();
Ledger::pointer ledger = getLedgerBySeq(seq);
Ledger::pointer ledger = m_ledgerMaster.getLedgerBySeq(seq);
if (ledger)
ledger->pendSaveValidated(false, false);
}
@@ -1982,7 +1884,6 @@ Json::Value NetworkOPsImp::getConsensusInfo ()
return info;
}
Json::Value NetworkOPsImp::getServerInfo (bool human, bool admin)
{
Json::Value info = Json::objectValue;
@@ -2026,7 +1927,7 @@ Json::Value NetworkOPsImp::getServerInfo (bool human, bool admin)
if (m_amendmentBlocked)
info[jss::amendment_blocked] = true;
size_t fp = mFetchPack.getCacheSize ();
auto const fp = m_ledgerMaster.getFetchPackCacheSize ();
if (fp != 0)
info[jss::fetch_pack] = Json::UInt (fp);
@@ -2084,12 +1985,12 @@ Json::Value NetworkOPsImp::getServerInfo (bool human, bool admin)
}
bool valid = false;
Ledger::pointer lpClosed = getValidatedLedger ();
Ledger::pointer lpClosed = m_ledgerMaster.getValidatedLedger ();
if (lpClosed)
valid = true;
else
lpClosed = getClosedLedger ();
lpClosed = m_ledgerMaster.getClosedLedger ();
if (lpClosed)
{
@@ -2563,7 +2464,7 @@ std::uint32_t NetworkOPsImp::acceptLedger ()
// <-- bool: true=added, false=already there
bool NetworkOPsImp::subLedger (InfoSub::ref isrListener, Json::Value& jvResult)
{
Ledger::pointer lpClosed = getValidatedLedger ();
Ledger::pointer lpClosed = m_ledgerMaster.getValidatedLedger ();
if (lpClosed)
{
@@ -3051,212 +2952,6 @@ void NetworkOPsImp::getBookPage (
#endif
static void fpAppender (
protocol::TMGetObjectByHash* reply, std::uint32_t ledgerSeq,
uint256 const& hash, const Blob& blob)
{
protocol::TMIndexedObject& newObj = * (reply->add_objects ());
newObj.set_ledgerseq (ledgerSeq);
newObj.set_hash (hash.begin (), 256 / 8);
newObj.set_data (&blob[0], blob.size ());
}
void NetworkOPsImp::makeFetchPack (
Job&, std::weak_ptr<Peer> wPeer,
std::shared_ptr<protocol::TMGetObjectByHash> request,
uint256 haveLedgerHash, std::uint32_t uUptime)
{
if (UptimeTimer::getInstance ().getElapsedSeconds () > (uUptime + 1))
{
m_journal.info << "Fetch pack request got stale";
return;
}
if (getApp().getFeeTrack ().isLoadedLocal () ||
(m_ledgerMaster.getValidatedLedgerAge() > 40))
{
m_journal.info << "Too busy to make fetch pack";
return;
}
Peer::ptr peer = wPeer.lock ();
if (!peer)
return;
Ledger::pointer haveLedger = getLedgerByHash (haveLedgerHash);
if (!haveLedger)
{
m_journal.info
<< "Peer requests fetch pack for ledger we don't have: "
<< haveLedger;
peer->charge (Resource::feeRequestNoReply);
return;
}
if (!haveLedger->isClosed ())
{
m_journal.warning
<< "Peer requests fetch pack from open ledger: "
<< haveLedger;
peer->charge (Resource::feeInvalidRequest);
return;
}
if (haveLedger->getLedgerSeq() < m_ledgerMaster.getEarliestFetch())
{
m_journal.debug << "Peer requests fetch pack that is too early";
peer->charge (Resource::feeInvalidRequest);
return;
}
Ledger::pointer wantLedger = getLedgerByHash (haveLedger->getParentHash ());
if (!wantLedger)
{
m_journal.info
<< "Peer requests fetch pack for ledger whose predecessor we "
<< "don't have: " << haveLedger;
peer->charge (Resource::feeRequestNoReply);
return;
}
try
{
protocol::TMGetObjectByHash reply;
reply.set_query (false);
if (request->has_seq ())
reply.set_seq (request->seq ());
reply.set_ledgerhash (request->ledgerhash ());
reply.set_type (protocol::TMGetObjectByHash::otFETCH_PACK);
// Building a fetch pack:
// 1. Add the header for the requested ledger.
// 2. Add the nodes for the AccountStateMap of that ledger.
// 3. If there are transactions, add the nodes for the
// transactions of the ledger.
// 4. If the FetchPack now contains greater than or equal to
// 256 entries then stop.
// 5. If not very much time has elapsed, then loop back and repeat
// the same process adding the previous ledger to the FetchPack.
do
{
std::uint32_t lSeq = wantLedger->getLedgerSeq ();
protocol::TMIndexedObject& newObj = *reply.add_objects ();
newObj.set_hash (wantLedger->getHash ().begin (), 256 / 8);
Serializer s (256);
s.add32 (HashPrefix::ledgerMaster);
wantLedger->addRaw (s);
newObj.set_data (s.getDataPtr (), s.getLength ());
newObj.set_ledgerseq (lSeq);
wantLedger->stateMap().getFetchPack
(&haveLedger->stateMap(), true, 16384,
std::bind (fpAppender, &reply, lSeq, std::placeholders::_1,
std::placeholders::_2));
if (wantLedger->getTransHash ().isNonZero ())
wantLedger->txMap().getFetchPack (
nullptr, true, 512,
std::bind (fpAppender, &reply, lSeq, std::placeholders::_1,
std::placeholders::_2));
if (reply.objects ().size () >= 512)
break;
// move may save a ref/unref
haveLedger = std::move (wantLedger);
wantLedger = getLedgerByHash (haveLedger->getParentHash ());
}
while (wantLedger &&
UptimeTimer::getInstance ().getElapsedSeconds () <= uUptime + 1);
m_journal.info
<< "Built fetch pack with " << reply.objects ().size () << " nodes";
auto msg = std::make_shared<Message> (reply, protocol::mtGET_OBJECTS);
peer->send (msg);
}
catch (...)
{
m_journal.warning << "Exception building fetch pach";
}
}
void NetworkOPsImp::sweepFetchPack ()
{
mFetchPack.sweep ();
}
void NetworkOPsImp::addFetchPack (
uint256 const& hash, std::shared_ptr< Blob >& data)
{
mFetchPack.canonicalize (hash, data);
}
bool NetworkOPsImp::getFetchPack (uint256 const& hash, Blob& data)
{
bool ret = mFetchPack.retrieve (hash, data);
if (!ret)
return false;
mFetchPack.del (hash, false);
if (hash != sha512Half(makeSlice(data)))
{
m_journal.warning << "Bad entry in fetch pack";
return false;
}
return true;
}
bool NetworkOPsImp::shouldFetchPack (std::uint32_t seq)
{
if (mFetchSeq == seq)
return false;
mFetchSeq = seq;
return true;
}
int NetworkOPsImp::getFetchSize ()
{
return mFetchPack.getCacheSize ();
}
void NetworkOPsImp::gotFetchPack (bool progress, std::uint32_t seq)
{
// FIXME: Calling this function more than once will result in
// InboundLedgers::gotFetchPack being called more than once
// which is expensive. A flag should track whether we've already dispatched
m_job_queue.addJob (
jtLEDGER_DATA, "gotFetchPack",
std::bind (&InboundLedgers::gotFetchPack,
&getApp().getInboundLedgers (), std::placeholders::_1));
}
void NetworkOPsImp::missingNodeInLedger (std::uint32_t seq)
{
uint256 hash = getApp().getLedgerMaster ().getHashBySeq (seq);
if (hash.isZero())
{
m_journal.warning
<< "Missing a node in ledger " << seq << " cannot fetch";
}
else
{
m_journal.warning << "Missing a node in ledger " << seq << " fetching";
getApp().getInboundLedgers ().acquire (
hash, seq, InboundLedger::fcGENERIC);
}
}
//------------------------------------------------------------------------------
NetworkOPs::NetworkOPs (Stoppable& parent)

View File

@@ -106,25 +106,9 @@ public:
// Our best estimate of current ledger close time
virtual std::uint32_t getCloseTimeNC () const = 0;
virtual void closeTimeOffset (int) = 0;
virtual std::uint32_t getLedgerID (uint256 const& hash) = 0;
virtual std::uint32_t getCurrentLedgerID () = 0;
virtual OperatingMode getOperatingMode () const = 0;
virtual std::string strOperatingMode () const = 0;
virtual Ledger::pointer getClosedLedger () = 0;
virtual Ledger::pointer getValidatedLedger () = 0;
virtual Ledger::pointer getCurrentLedger () = 0;
virtual Ledger::pointer getLedgerByHash (uint256 const& hash) = 0;
virtual Ledger::pointer getLedgerBySeq (const std::uint32_t seq) = 0;
virtual void missingNodeInLedger (const std::uint32_t seq) = 0;
virtual uint256 getClosedLedgerHash () = 0;
// Do we have this inclusive range of ledgers in our database
virtual bool haveLedger (std::uint32_t seq) = 0;
virtual std::uint32_t getValidatedSeq () = 0;
virtual bool isValidated (Ledger::ref l) = 0;
virtual bool getValidatedRange (std::uint32_t& minVal, std::uint32_t& maxVal) = 0;
//--------------------------------------------------------------------------
//
@@ -182,19 +166,6 @@ public:
virtual void mapComplete (uint256 const& hash,
std::shared_ptr<SHAMap> const& map) = 0;
// Fetch packs
virtual void makeFetchPack (Job&, std::weak_ptr<Peer> peer,
std::shared_ptr<protocol::TMGetObjectByHash> request,
uint256 wantLedger, std::uint32_t uUptime) = 0;
virtual bool shouldFetchPack (std::uint32_t seq) = 0;
virtual void gotFetchPack (bool progress, std::uint32_t seq) = 0;
virtual void addFetchPack (
uint256 const& hash, std::shared_ptr< Blob >& data) = 0;
virtual bool getFetchPack (uint256 const& hash, Blob& data) = 0;
virtual int getFetchSize () = 0;
virtual void sweepFetchPack () = 0;
// network state machine
virtual void endConsensus (bool correctLCL) = 0;
virtual void setStandAlone () = 0;

View File

@@ -19,6 +19,7 @@
#include <BeastConfig.h>
#include <ripple/app/ledger/LedgerToJson.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/main/Application.h>
#include <ripple/app/misc/impl/AccountTxPaging.h>
#include <ripple/app/tx/Transaction.h>
@@ -55,7 +56,7 @@ convertBlobsToTxResult (
void
saveLedgerAsync (std::uint32_t seq)
{
Ledger::pointer ledger = getApp().getOPs().getLedgerBySeq(seq);
Ledger::pointer ledger = getApp().getLedgerMaster().getLedgerBySeq(seq);
if (ledger)
ledger->pendSaveValidated(false, false);
}

View File

@@ -127,13 +127,13 @@ public:
m_name << " target age set to " << m_target_age;
}
int getCacheSize ()
int getCacheSize () const
{
lock_guard lock (m_mutex);
return m_cache_count;
}
int getTrackSize ()
int getTrackSize () const
{
lock_guard lock (m_mutex);
return m_cache.size ();

View File

@@ -539,7 +539,7 @@ PeerImp::onTimer (error_code const& ec)
}
else
{
// We have an outstanding ping, raise latency
// We have an outstanding ping, raise latency
auto minLatency = std::chrono::duration_cast <std::chrono::milliseconds>
(clock_type::now() - lastPingTime_);
@@ -1572,7 +1572,7 @@ PeerImp::onMessage (std::shared_ptr <protocol::TMGetObjectByHash> const& m)
"GetObj: Full fetch pack for " << pLSeq;
pLSeq = obj.ledgerseq ();
pLDo = !getApp().getOPs ().haveLedger (pLSeq);
pLDo = !getApp().getLedgerMaster ().haveLedger (pLSeq);
if (!pLDo)
p_journal_.debug <<
@@ -1591,7 +1591,7 @@ PeerImp::onMessage (std::shared_ptr <protocol::TMGetObjectByHash> const& m)
std::make_shared< Blob > (
obj.data ().begin (), obj.data ().end ()));
getApp().getOPs ().addFetchPack (hash, data);
getApp().getLedgerMaster ().addFetchPack (hash, data);
}
}
}
@@ -1601,7 +1601,7 @@ PeerImp::onMessage (std::shared_ptr <protocol::TMGetObjectByHash> const& m)
p_journal_.debug << "GetObj: Partial fetch pack for " << pLSeq;
if (packet.type () == protocol::TMGetObjectByHash::otFETCH_PACK)
getApp().getOPs ().gotFetchPack (progress, pLSeq);
getApp().getLedgerMaster ().gotFetchPack (progress, pLSeq);
}
}
@@ -1681,7 +1681,7 @@ PeerImp::doFetchPack (const std::shared_ptr<protocol::TMGetObjectByHash>& packet
memcpy (hash.begin (), packet->ledgerhash ().data (), 32);
getApp().getJobQueue ().addJob (jtPACK, "MakeFetchPack",
std::bind (&NetworkOPs::makeFetchPack, &getApp().getOPs (),
std::bind (&LedgerMaster::makeFetchPack, &getApp().getLedgerMaster (),
std::placeholders::_1, std::weak_ptr<PeerImp> (shared_from_this ()),
packet, hash, UptimeTimer::getInstance ().getElapsedSeconds ()));
}

View File

@@ -29,6 +29,7 @@
namespace ripple {
class NetworkOPs;
class LedgerMaster;
namespace RPC {
@@ -38,6 +39,7 @@ struct Context
Json::Value params;
Resource::Charge& loadType;
NetworkOPs& netOps;
LedgerMaster& ledgerMaster;
Role role;
InfoSub::pointer infoSub;
Suspend suspend;

View File

@@ -30,7 +30,7 @@ Json::Value doAccountCurrencies (RPC::Context& context)
// Get the current ledger
Ledger::pointer ledger;
Json::Value result (RPC::lookupLedger (params, ledger, context.netOps));
Json::Value result (RPC::lookupLedger (params, ledger, context.ledgerMaster));
if (!ledger)
return result;

View File

@@ -38,7 +38,7 @@ Json::Value doAccountInfo (RPC::Context& context)
auto& params = context.params;
Ledger::pointer ledger;
Json::Value result = RPC::lookupLedger (params, ledger, context.netOps);
Json::Value result = RPC::lookupLedger (params, ledger, context.ledgerMaster);
if (!ledger)
return result;

View File

@@ -82,7 +82,7 @@ Json::Value doAccountLines (RPC::Context& context)
return RPC::missing_field_error (jss::account);
Ledger::pointer ledger;
Json::Value result (RPC::lookupLedger (params, ledger, context.netOps));
Json::Value result (RPC::lookupLedger (params, ledger, context.ledgerMaster));
if (! ledger)
return result;

View File

@@ -47,7 +47,7 @@ Json::Value doAccountObjects (RPC::Context& context)
return RPC::missing_field_error (jss::account);
Ledger::pointer ledger;
auto result = RPC::lookupLedger (params, ledger, context.netOps);
auto result = RPC::lookupLedger (params, ledger, context.ledgerMaster);
if (ledger == nullptr)
return result;

View File

@@ -50,7 +50,7 @@ Json::Value doAccountOffers (RPC::Context& context)
return RPC::missing_field_error (jss::account);
Ledger::pointer ledger;
Json::Value result (RPC::lookupLedger (params, ledger, context.netOps));
Json::Value result (RPC::lookupLedger (params, ledger, context.ledgerMaster));
if (! ledger)
return result;

View File

@@ -46,7 +46,7 @@ Json::Value doAccountTx (RPC::Context& context)
std::uint32_t uLedgerMax;
std::uint32_t uValidatedMin;
std::uint32_t uValidatedMax;
bool bValidated = context.netOps.getValidatedRange (
bool bValidated = context.ledgerMaster.getValidatedRange (
uValidatedMin, uValidatedMax);
if (!bValidated)
@@ -84,7 +84,7 @@ Json::Value doAccountTx (RPC::Context& context)
else
{
Ledger::pointer l;
Json::Value ret = RPC::lookupLedger (params, l, context.netOps);
Json::Value ret = RPC::lookupLedger (params, l, context.ledgerMaster);
if (!l)
return ret;

View File

@@ -49,7 +49,7 @@ Json::Value doAccountTxOld (RPC::Context& context)
std::uint32_t uLedgerMax;
std::uint32_t uValidatedMin;
std::uint32_t uValidatedMax;
bool bValidated = context.netOps.getValidatedRange (
bool bValidated = context.ledgerMaster.getValidatedRange (
uValidatedMin, uValidatedMax);
if (!context.params.isMember (jss::account))
@@ -104,7 +104,7 @@ Json::Value doAccountTxOld (RPC::Context& context)
else
{
Ledger::pointer l;
Json::Value ret = RPC::lookupLedger (context.params, l, context.netOps);
Json::Value ret = RPC::lookupLedger (context.params, l, context.ledgerMaster);
if (!l)
return ret;

View File

@@ -32,7 +32,7 @@ Json::Value doBookOffers (RPC::Context& context)
Ledger::pointer lpLedger;
Json::Value jvResult (
RPC::lookupLedger (context.params, lpLedger, context.netOps));
RPC::lookupLedger (context.params, lpLedger, context.ledgerMaster));
if (!lpLedger)
return jvResult;

View File

@@ -70,7 +70,7 @@ Json::Value doCanDelete (RPC::Context& context)
canDeleteStr.find_first_not_of("0123456789abcdef") ==
std::string::npos)
{
Ledger::pointer ledger = context.netOps.getLedgerByHash (
Ledger::pointer ledger = context.ledgerMaster.getLedgerByHash (
from_hex_text<uint256>(canDeleteStr));
if (!ledger)

View File

@@ -50,7 +50,7 @@ Json::Value doGatewayBalances (RPC::Context& context)
// Get the current ledger
Ledger::pointer ledger;
Json::Value result (RPC::lookupLedger (params, ledger, context.netOps));
Json::Value result (RPC::lookupLedger (params, ledger, context.ledgerMaster));
if (!ledger)
return result;

View File

@@ -41,7 +41,7 @@ Status LedgerHandler::check ()
if (!needsLedger)
return Status::OK;
if (auto s = RPC::lookupLedger (params, ledger_, context_.netOps, result_))
if (auto s = RPC::lookupLedger (params, ledger_, context_.ledgerMaster, result_))
return s;
bool bFull = params[jss::full].asBool();

View File

@@ -35,8 +35,8 @@ Json::Value doLedgerAccept (RPC::Context& context)
{
context.netOps.acceptLedger ();
jvResult[jss::ledger_current_index]
= context.netOps.getCurrentLedgerID ();
jvResult[jss::ledger_current_index] =
context.ledgerMaster.getCurrentLedgerIndex ();
}
return jvResult;

View File

@@ -23,11 +23,12 @@ namespace ripple {
Json::Value doLedgerClosed (RPC::Context& context)
{
uint256 uLedger = context.netOps.getClosedLedgerHash ();
auto ledger = context.ledgerMaster.getClosedLedger ();
assert (ledger);
Json::Value jvResult;
jvResult[jss::ledger_index] = context.netOps.getLedgerID (uLedger);
jvResult[jss::ledger_hash] = to_string (uLedger);
jvResult[jss::ledger_index] = ledger->getLedgerSeq ();
jvResult[jss::ledger_hash] = to_string (ledger->getHash ());
return jvResult;
}

View File

@@ -24,7 +24,8 @@ namespace ripple {
Json::Value doLedgerCurrent (RPC::Context& context)
{
Json::Value jvResult;
jvResult[jss::ledger_current_index] = context.netOps.getCurrentLedgerID ();
jvResult[jss::ledger_current_index] =
context.ledgerMaster.getCurrentLedgerIndex ();
return jvResult;
}

View File

@@ -40,7 +40,7 @@ Json::Value doLedgerData (RPC::Context& context)
Ledger::pointer lpLedger;
auto const& params = context.params;
Json::Value jvResult = RPC::lookupLedger (params, lpLedger, context.netOps);
Json::Value jvResult = RPC::lookupLedger (params, lpLedger, context.ledgerMaster);
if (!lpLedger)
return jvResult;

View File

@@ -32,7 +32,7 @@ Json::Value doLedgerEntry (RPC::Context& context)
{
Ledger::pointer lpLedger;
Json::Value jvResult = RPC::lookupLedger (
context.params, lpLedger, context.netOps);
context.params, lpLedger, context.ledgerMaster);
if (!lpLedger)
return jvResult;

View File

@@ -30,7 +30,7 @@ Json::Value doLedgerHeader (RPC::Context& context)
{
Ledger::pointer lpLedger;
Json::Value jvResult = RPC::lookupLedger (
context.params, lpLedger, context.netOps);
context.params, lpLedger, context.ledgerMaster);
if (!lpLedger)
return jvResult;

View File

@@ -84,7 +84,7 @@ Json::Value doNoRippleCheck (RPC::Context& context)
transactions = params["transactions"].asBool();
Ledger::pointer ledger;
Json::Value result (RPC::lookupLedger (params, ledger, context.netOps));
Json::Value result (RPC::lookupLedger (params, ledger, context.ledgerMaster));
if (! ledger)
return result;

View File

@@ -39,14 +39,14 @@ Json::Value doOwnerInfo (RPC::Context& context)
// Get info on account.
auto const& closedLedger = context.netOps.getClosedLedger ();
auto const& closedLedger = context.ledgerMaster.getClosedLedger ();
AccountID accountID;
auto jAccepted = RPC::accountFromString (accountID, strIdent);
ret[jss::accepted] = ! jAccepted ?
context.netOps.getOwnerInfo (closedLedger, accountID) : jAccepted;
auto const& currentLedger = context.netOps.getCurrentLedger ();
auto const& currentLedger = context.ledgerMaster.getCurrentLedger ();
auto jCurrent = RPC::accountFromString (accountID, strIdent);
ret[jss::current] = ! jCurrent ?

View File

@@ -24,7 +24,7 @@ namespace ripple {
Json::Value doPathFind (RPC::Context& context)
{
Ledger::pointer lpLedger = context.netOps.getClosedLedger();
Ledger::pointer lpLedger = context.ledgerMaster.getClosedLedger();
if (!context.params.isMember (jss::subcommand) ||
!context.params[jss::subcommand].isString ())

View File

@@ -74,7 +74,7 @@ Json::Value doRipplePathFind (RPC::Context& context)
{
// The caller specified a ledger
jvResult = RPC::lookupLedger (
context.params, lpLedger, context.netOps);
context.params, lpLedger, context.ledgerMaster);
if (!lpLedger)
return jvResult;
}
@@ -87,7 +87,7 @@ Json::Value doRipplePathFind (RPC::Context& context)
}
context.loadType = Resource::feeHighBurdenRPC;
lpLedger = context.netOps.getClosedLedger();
lpLedger = context.ledgerMaster.getClosedLedger();
PathRequest::pointer request;
context.suspend ([&request, &context, &jvResult, &lpLedger](RPC::Callback const& c)
@@ -160,7 +160,7 @@ Json::Value doRipplePathFind (RPC::Context& context)
{
// The closed ledger is recent and any nodes made resident
// have the best chance to persist
lpLedger = context.netOps.getClosedLedger();
lpLedger = context.ledgerMaster.getClosedLedger();
cache = getApp().getPathRequests().getLineCache(lpLedger, false);
}
@@ -205,7 +205,7 @@ Json::Value doRipplePathFind (RPC::Context& context)
auto contextPaths = context.params.isMember(jss::paths) ?
boost::optional<Json::Value>(context.params[jss::paths]) :
boost::optional<Json::Value>(boost::none);
auto pathFindResult = ripplePathFind(cache, raSrc, raDst, saDstAmount,
auto pathFindResult = ripplePathFind(cache, raSrc, raDst, saDstAmount,
jvSrcCurrencies, contextPaths, level);
if (!pathFindResult.first)
return pathFindResult.second;
@@ -222,9 +222,9 @@ Json::Value doRipplePathFind (RPC::Context& context)
}
std::pair<bool, Json::Value>
ripplePathFind (RippleLineCache::pointer const& cache,
ripplePathFind (RippleLineCache::pointer const& cache,
AccountID const& raSrc, AccountID const& raDst,
STAmount const& saDstAmount, Json::Value const& jvSrcCurrencies,
STAmount const& saDstAmount, Json::Value const& jvSrcCurrencies,
boost::optional<Json::Value> const& contextPaths, int const& level)
{
FindPaths fp(

View File

@@ -34,7 +34,7 @@ Json::Value doTransactionEntry (RPC::Context& context)
Json::Value jvResult = RPC::lookupLedger (
context.params,
lpLedger,
context.netOps);
context.ledgerMaster);
if (!lpLedger)
return jvResult;

View File

@@ -43,6 +43,19 @@ isHexTxID (std::string const& txid)
return (ret == txid.end ());
}
static
bool
isValidated (RPC::Context& context, std::uint32_t seq, uint256 const& hash)
{
if (!context.ledgerMaster.haveLedger (seq))
return false;
if (seq > context.ledgerMaster.getValidatedLedger ()->getLedgerSeq ())
return false;
return context.ledgerMaster.getHashBySeq (seq) == hash;
}
Json::Value doTx (RPC::Context& context)
{
if (!context.params.isMember (jss::transaction))
@@ -67,7 +80,7 @@ Json::Value doTx (RPC::Context& context)
if (txn->getLedger () == 0)
return ret;
if (auto lgr = context.netOps.getLedgerBySeq (txn->getLedger ()))
if (auto lgr = context.ledgerMaster.getLedgerBySeq (txn->getLedger ()))
{
bool okay = false;
@@ -95,7 +108,8 @@ Json::Value doTx (RPC::Context& context)
}
if (okay)
ret[jss::validated] = context.netOps.isValidated (lgr);
ret[jss::validated] = isValidated (
context, lgr->getLedgerSeq (), lgr->getHash ());
}
return ret;

View File

@@ -25,19 +25,19 @@ namespace RPC {
namespace {
bool isValidatedOld ()
bool isValidatedOld (LedgerMaster& ledgerMaster)
{
if (getConfig ().RUN_STANDALONE)
return false;
return getApp ().getLedgerMaster ().getValidatedLedgerAge () >
return ledgerMaster.getValidatedLedgerAge () >
Tuning::maxValidatedLedgerAge;
}
Status ledgerFromRequest (
Json::Value const& params,
Ledger::pointer& ledger,
NetworkOPs& netOps)
LedgerMaster& ledgerMaster)
{
static auto const minSequenceGap = 10;
@@ -65,18 +65,18 @@ Status ledgerFromRequest (
if (! ledgerHash.SetHex (hashValue.asString ()))
return {rpcINVALID_PARAMS, "ledgerHashMalformed"};
ledger = netOps.getLedgerByHash (ledgerHash);
ledger = ledgerMaster.getLedgerByHash (ledgerHash);
if (ledger == nullptr)
return {rpcLGR_NOT_FOUND, "ledgerNotFound"};
}
else if (indexValue.isNumeric())
{
ledger = netOps.getLedgerBySeq (indexValue.asInt ());
ledger = ledgerMaster.getLedgerBySeq (indexValue.asInt ());
if (ledger == nullptr)
return {rpcLGR_NOT_FOUND, "ledgerNotFound"};
if (ledger->getLedgerSeq () > netOps.getValidatedSeq () &&
isValidatedOld ())
if (ledger->getLedgerSeq () > ledgerMaster.getValidLedgerIndex () &&
isValidatedOld (ledgerMaster))
{
ledger.reset();
return {rpcNO_NETWORK, "InsufficientNetworkMode"};
@@ -84,13 +84,13 @@ Status ledgerFromRequest (
}
else
{
if (isValidatedOld ())
if (isValidatedOld (ledgerMaster))
return {rpcNO_NETWORK, "InsufficientNetworkMode"};
auto const index = indexValue.asString ();
if (index == "validated")
{
ledger = netOps.getValidatedLedger ();
ledger = ledgerMaster.getValidatedLedger ();
if (ledger == nullptr)
return {rpcNO_NETWORK, "InsufficientNetworkMode"};
@@ -100,12 +100,12 @@ Status ledgerFromRequest (
{
if (index.empty () || index == "current")
{
ledger = netOps.getCurrentLedger ();
ledger = ledgerMaster.getCurrentLedger ();
assert (! ledger->isClosed ());
}
else if (index == "closed")
{
ledger = netOps.getClosedLedger ();
ledger = ledgerMaster.getClosedLedger ();
assert (ledger->isClosed ());
}
else
@@ -117,7 +117,7 @@ Status ledgerFromRequest (
return {rpcNO_NETWORK, "InsufficientNetworkMode"};
if (ledger->getLedgerSeq () + minSequenceGap <
netOps.getValidatedSeq ())
ledgerMaster.getValidLedgerIndex ())
{
ledger.reset ();
return {rpcNO_NETWORK, "InsufficientNetworkMode"};
@@ -130,7 +130,7 @@ Status ledgerFromRequest (
return Status::OK;
}
bool isValidated (Ledger& ledger)
bool isValidated (LedgerMaster& ledgerMaster, Ledger& ledger)
{
if (ledger.isValidated ())
return true;
@@ -144,7 +144,7 @@ bool isValidated (Ledger& ledger)
// Use the skip list in the last validated ledger to see if ledger
// comes before the last validated ledger (and thus has been
// validated).
auto hash = getApp().getLedgerMaster ().walkHashBySeq (seq);
auto hash = ledgerMaster.walkHashBySeq (seq);
if (ledger.getHash() != hash)
return false;
}
@@ -184,10 +184,10 @@ bool isValidated (Ledger& ledger)
Status lookupLedger (
Json::Value const& params,
Ledger::pointer& ledger,
NetworkOPs& netOps,
LedgerMaster& ledgerMaster,
Json::Value& jsonResult)
{
if (auto status = ledgerFromRequest (params, ledger, netOps))
if (auto status = ledgerFromRequest (params, ledger, ledgerMaster))
return status;
if (ledger->isClosed ())
@@ -199,19 +199,18 @@ Status lookupLedger (
{
jsonResult[jss::ledger_current_index] = ledger->getLedgerSeq();
}
jsonResult[jss::validated] = isValidated (*ledger);
jsonResult[jss::validated] = isValidated (ledgerMaster, *ledger);
return Status::OK;
}
Json::Value lookupLedger (
Json::Value const& params,
Ledger::pointer& ledger,
NetworkOPs& netOps)
LedgerMaster& ledgerMaster)
{
Json::Value value (Json::objectValue);
if (auto status = lookupLedger (params, ledger, netOps, value))
if (auto status = lookupLedger (params, ledger, ledgerMaster, value))
status.inject (value);
return value;
}

View File

@@ -32,7 +32,9 @@ namespace RPC {
been filled.
*/
Json::Value lookupLedger (
Json::Value const& request, Ledger::pointer&, NetworkOPs&);
Json::Value const& request,
Ledger::pointer&,
LedgerMaster&);
/** Look up a ledger from a request and fill a Json::Result with the data
representing a ledger.
@@ -41,7 +43,7 @@ Json::Value lookupLedger (
Status lookupLedger (
Json::Value const& request,
Ledger::pointer&,
NetworkOPs&,
LedgerMaster&,
Json::Value& result);
} // RPC

View File

@@ -151,14 +151,14 @@ error_code_i fillHandler (Context& context,
if (! getConfig ().RUN_STANDALONE &&
handler->condition_ & NEEDS_CURRENT_LEDGER)
{
if (getApp ().getLedgerMaster ().getValidatedLedgerAge () >
if (context.ledgerMaster.getValidatedLedgerAge () >
Tuning::maxValidatedLedgerAge)
{
return rpcNO_CURRENT;
}
auto const cID = context.netOps.getCurrentLedgerID ();
auto const vID = context.netOps.getValidatedSeq ();
auto const cID = context.ledgerMaster.getCurrentLedgerIndex ();
auto const vID = context.ledgerMaster.getValidLedgerIndex ();
if (cID + 10 < vID)
{
@@ -169,7 +169,7 @@ error_code_i fillHandler (Context& context,
}
if ((handler->condition_ & NEEDS_CLOSED_LEDGER) &&
!context.netOps.getClosedLedger ())
!context.ledgerMaster.getClosedLedger ())
{
return rpcNO_CLOSED;
}

View File

@@ -109,7 +109,7 @@ void TxnSignApiFacade::snapshotAccountState (AccountID const& accountID)
if (!netOPs_) // Unit testing.
return;
ledger_ = netOPs_->getCurrentLedger ();
ledger_ = getApp().getLedgerMaster ().getCurrentLedger ();
accountID_ = accountID;
sle_ = cachedRead(*ledger_,
keylet::account(accountID_).key, ltACCOUNT_ROOT);
@@ -266,7 +266,7 @@ int TxnSignApiFacade::getValidatedLedgerAge () const
if (!netOPs_) // Unit testing.
return 0;
return getApp( ).getLedgerMaster ().getValidatedLedgerAge ();
return getApp().getLedgerMaster ().getValidatedLedgerAge ();
}
bool TxnSignApiFacade::isLoadedCluster () const

View File

@@ -353,8 +353,8 @@ ServerHandlerImp::processRequest (
auto const start (std::chrono::high_resolution_clock::now ());
RPC::Context context {
params, loadType, m_networkOPs, role, nullptr,
std::move (suspend), std::move (yield)};
params, loadType, m_networkOPs, getApp().getLedgerMaster(), role,
nullptr, std::move (suspend), std::move (yield)};
std::string response;
if (setup_.yieldStrategy.streaming == RPC::YieldStrategy::Streaming::yes)

View File

@@ -268,7 +268,7 @@ Json::Value ConnectionImpl <WebSocket>::invokeCommand (Json::Value& jvRequest)
else
{
RPC::Context context {
jvRequest, loadType, m_netOPs, role,
jvRequest, loadType, m_netOPs, getApp().getLedgerMaster(), role,
std::dynamic_pointer_cast<InfoSub> (this->shared_from_this ())};
RPC::doCommand (context, jvResult[jss::result]);
}