mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-04 19:25:51 +00:00
Compare commits
6 Commits
smart-escr
...
vlntb/job-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3592bd43c1 | ||
|
|
aa14097b02 | ||
|
|
8a5f95c223 | ||
|
|
fbda5ccc15 | ||
|
|
4cc37b9cd8 | ||
|
|
3495cb3753 |
@@ -29,7 +29,7 @@ Loop: xrpld.core xrpld.net
|
||||
xrpld.net > xrpld.core
|
||||
|
||||
Loop: xrpld.core xrpld.perflog
|
||||
xrpld.perflog == xrpld.core
|
||||
xrpld.perflog ~= xrpld.core
|
||||
|
||||
Loop: xrpld.net xrpld.rpc
|
||||
xrpld.rpc ~= xrpld.net
|
||||
|
||||
@@ -162,6 +162,7 @@ xrpld.ledger > xrpl.basics
|
||||
xrpld.ledger > xrpl.json
|
||||
xrpld.ledger > xrpl.protocol
|
||||
xrpld.net > xrpl.basics
|
||||
xrpld.net > xrpld.perflog
|
||||
xrpld.net > xrpl.json
|
||||
xrpld.net > xrpl.protocol
|
||||
xrpld.net > xrpl.resource
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <xrpld/consensus/LedgerTiming.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
#include <xrpld/overlay/predicates.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <xrpl/basics/random.h>
|
||||
#include <xrpl/beast/core/LexicalCast.h>
|
||||
@@ -49,6 +50,8 @@
|
||||
#include <iomanip>
|
||||
#include <mutex>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
RCLConsensus::RCLConsensus(
|
||||
@@ -140,11 +143,17 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE,
|
||||
"getConsensusLedger1",
|
||||
[id = hash, &app = app_, this]() {
|
||||
JLOG(j_.debug())
|
||||
<< "JOB advanceLedger getConsensusLedger1 started";
|
||||
app.getInboundLedgers().acquireAsync(
|
||||
id, 0, InboundLedger::Reason::CONSENSUS);
|
||||
[id = hash, &app = app_, this, journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
JLOG(j_.debug()) << "JOB advanceLedger "
|
||||
"getConsensusLedger1 started";
|
||||
app.getInboundLedgers().acquireAsync(
|
||||
id, 0, InboundLedger::Reason::CONSENSUS);
|
||||
},
|
||||
"getConsensusLedger1",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
return std::nullopt;
|
||||
@@ -442,21 +451,27 @@ RCLConsensus::Adaptor::onAccept(
|
||||
app_.getJobQueue().addJob(
|
||||
jtACCEPT,
|
||||
"acceptLedger",
|
||||
[=, this, cj = std::move(consensusJson)]() mutable {
|
||||
// Note that no lock is held or acquired during this job.
|
||||
// This is because generic Consensus guarantees that once a ledger
|
||||
// is accepted, the consensus results and capture by reference state
|
||||
// will not change until startRound is called (which happens via
|
||||
// endConsensus).
|
||||
RclConsensusLogger clog("onAccept", validating, j_);
|
||||
this->doAccept(
|
||||
result,
|
||||
prevLedger,
|
||||
closeResolution,
|
||||
rawCloseTimes,
|
||||
mode,
|
||||
std::move(cj));
|
||||
this->app_.getOPs().endConsensus(clog.ss());
|
||||
[=, this, cj = std::move(consensusJson), journal = j_]() mutable {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
// Note that no lock is held or acquired during this job.
|
||||
// This is because generic Consensus guarantees that once a
|
||||
// ledger is accepted, the consensus results and capture by
|
||||
// reference state will not change until startRound is
|
||||
// called (which happens via endConsensus).
|
||||
RclConsensusLogger clog("onAccept", validating, j_);
|
||||
this->doAccept(
|
||||
result,
|
||||
prevLedger,
|
||||
closeResolution,
|
||||
rawCloseTimes,
|
||||
mode,
|
||||
std::move(cj));
|
||||
this->app_.getOPs().endConsensus(clog.ss());
|
||||
},
|
||||
"acceptLedger",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
RCLValidatedLedger::RCLValidatedLedger(MakeGenesis)
|
||||
@@ -142,11 +144,19 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
|
||||
Application* pApp = &app_;
|
||||
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE, "getConsensusLedger2", [pApp, hash, this]() {
|
||||
JLOG(j_.debug())
|
||||
<< "JOB advanceLedger getConsensusLedger2 started";
|
||||
pApp->getInboundLedgers().acquireAsync(
|
||||
hash, 0, InboundLedger::Reason::CONSENSUS);
|
||||
jtADVANCE,
|
||||
"getConsensusLedger2",
|
||||
[pApp, hash, this, journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
JLOG(j_.debug())
|
||||
<< "JOB advanceLedger getConsensusLedger2 started";
|
||||
pApp->getInboundLedgers().acquireAsync(
|
||||
hash, 0, InboundLedger::Reason::CONSENSUS);
|
||||
},
|
||||
"getConsensusLedger2",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@@ -23,11 +23,14 @@
|
||||
#include <xrpld/app/misc/Transaction.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/nodestore/Database.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/protocol/HashPrefix.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
ConsensusTransSetSF::ConsensusTransSetSF(Application& app, NodeCache& nodeCache)
|
||||
@@ -65,9 +68,14 @@ ConsensusTransSetSF::gotNode(
|
||||
"ripple::ConsensusTransSetSF::gotNode : transaction hash "
|
||||
"match");
|
||||
auto const pap = &app_;
|
||||
app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() {
|
||||
pap->getOPs().submitTransaction(stx);
|
||||
});
|
||||
app_.getJobQueue().addJob(
|
||||
jtTRANSACTION, "TXS->TXN", [pap, stx, journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { pap->getOPs().submitTransaction(stx); },
|
||||
"TXS->TXN",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
{
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include <xrpld/core/SociDB.h>
|
||||
#include <xrpld/nodestore/Database.h>
|
||||
#include <xrpld/nodestore/detail/DatabaseNodeImp.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
@@ -46,6 +47,8 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
create_genesis_t const create_genesis{};
|
||||
@@ -1028,7 +1031,12 @@ pendSaveValidated(
|
||||
isCurrent ? jtPUBLEDGER : jtPUBOLDLEDGER,
|
||||
std::to_string(ledger->seq()),
|
||||
[&app, ledger, isCurrent]() {
|
||||
saveValidatedLedger(app, ledger, isCurrent);
|
||||
beast::Journal journal = app.journal("Ledger");
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { saveValidatedLedger(app, ledger, isCurrent); },
|
||||
"OrderBookDB::update:",
|
||||
1s,
|
||||
journal);
|
||||
}))
|
||||
{
|
||||
return true;
|
||||
|
||||
@@ -24,10 +24,13 @@
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/core/Config.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
OrderBookDB::OrderBookDB(Application& app)
|
||||
@@ -69,7 +72,13 @@ OrderBookDB::setup(std::shared_ptr<ReadView const> const& ledger)
|
||||
app_.getJobQueue().addJob(
|
||||
jtUPDATE_PF,
|
||||
"OrderBookDB::update: " + std::to_string(ledger->seq()),
|
||||
[this, ledger]() { update(ledger); });
|
||||
[this, ledger, journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { update(ledger); },
|
||||
"OrderBookDB::update:",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
#include <xrpld/shamap/SHAMapNodeID.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
@@ -37,6 +38,8 @@
|
||||
#include <algorithm>
|
||||
#include <random>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
@@ -473,15 +476,24 @@ InboundLedger::done()
|
||||
|
||||
// We hold the PeerSet lock, so must dispatch
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() {
|
||||
if (self->complete_ && !self->failed_)
|
||||
{
|
||||
self->app_.getLedgerMaster().checkAccept(self->getLedger());
|
||||
self->app_.getLedgerMaster().tryAdvance();
|
||||
}
|
||||
else
|
||||
self->app_.getInboundLedgers().logFailure(
|
||||
self->hash_, self->mSeq);
|
||||
jtLEDGER_DATA,
|
||||
"AcquisitionDone",
|
||||
[self = shared_from_this(), journal = journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (self->complete_ && !self->failed_)
|
||||
{
|
||||
self->app_.getLedgerMaster().checkAccept(
|
||||
self->getLedger());
|
||||
self->app_.getLedgerMaster().tryAdvance();
|
||||
}
|
||||
else
|
||||
self->app_.getInboundLedgers().logFailure(
|
||||
self->hash_, self->mSeq);
|
||||
},
|
||||
"AcquisitionDone",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class InboundLedgersImp : public InboundLedgers
|
||||
@@ -212,8 +214,14 @@ public:
|
||||
// dispatch
|
||||
if (ledger->gotData(std::weak_ptr<Peer>(peer), packet))
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "processLedgerData", [ledger]() {
|
||||
ledger->runData();
|
||||
jtLEDGER_DATA,
|
||||
"processLedgerData",
|
||||
[ledger, journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { ledger->runData(); },
|
||||
"processLedgerData",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
|
||||
return true;
|
||||
@@ -227,8 +235,12 @@ public:
|
||||
if (packet->type() == protocol::liAS_NODE)
|
||||
{
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "gotStaleData", [this, packet]() {
|
||||
gotStaleData(packet);
|
||||
jtLEDGER_DATA, "gotStaleData", [this, packet, journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { gotStaleData(packet); },
|
||||
"gotStaleData",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,9 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/overlay/PeerSet.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -244,22 +247,31 @@ LedgerDeltaAcquire::onLedgerBuilt(
|
||||
app_.getJobQueue().addJob(
|
||||
jtREPLAY_TASK,
|
||||
"onLedgerBuilt",
|
||||
[=, ledger = this->fullLedger_, &app = this->app_]() {
|
||||
for (auto reason : reasons)
|
||||
{
|
||||
switch (reason)
|
||||
{
|
||||
case InboundLedger::Reason::GENERIC:
|
||||
app.getLedgerMaster().storeLedger(ledger);
|
||||
break;
|
||||
default:
|
||||
// TODO for other use cases
|
||||
break;
|
||||
}
|
||||
}
|
||||
[=,
|
||||
ledger = this->fullLedger_,
|
||||
&app = this->app_,
|
||||
journal = journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
for (auto reason : reasons)
|
||||
{
|
||||
switch (reason)
|
||||
{
|
||||
case InboundLedger::Reason::GENERIC:
|
||||
app.getLedgerMaster().storeLedger(ledger);
|
||||
break;
|
||||
default:
|
||||
// TODO for other use cases
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (firstTime)
|
||||
app.getLedgerMaster().tryAdvance();
|
||||
if (firstTime)
|
||||
app.getLedgerMaster().tryAdvance();
|
||||
},
|
||||
"onLedgerBuilt",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <xrpld/core/TimeKeeper.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
#include <xrpld/overlay/Peer.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/MathUtilities.h>
|
||||
@@ -56,6 +57,8 @@
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// Don't catch up more than 100 ledgers (cannot exceed 256)
|
||||
@@ -1361,27 +1364,36 @@ LedgerMaster::tryAdvance()
|
||||
if (!mAdvanceThread && !mValidLedger.empty())
|
||||
{
|
||||
mAdvanceThread = true;
|
||||
app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
|
||||
std::unique_lock sl(m_mutex);
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE, "advanceLedger", [this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
std::unique_lock sl(m_mutex);
|
||||
|
||||
XRPL_ASSERT(
|
||||
!mValidLedger.empty() && mAdvanceThread,
|
||||
"ripple::LedgerMaster::tryAdvance : has valid ledger");
|
||||
XRPL_ASSERT(
|
||||
!mValidLedger.empty() && mAdvanceThread,
|
||||
"ripple::LedgerMaster::tryAdvance : has valid "
|
||||
"ledger");
|
||||
|
||||
JLOG(m_journal.trace()) << "advanceThread<";
|
||||
JLOG(m_journal.trace()) << "advanceThread<";
|
||||
|
||||
try
|
||||
{
|
||||
doAdvance(sl);
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
{
|
||||
JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
|
||||
}
|
||||
try
|
||||
{
|
||||
doAdvance(sl);
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
{
|
||||
JLOG(m_journal.fatal())
|
||||
<< "doAdvance throws: " << ex.what();
|
||||
}
|
||||
|
||||
mAdvanceThread = false;
|
||||
JLOG(m_journal.trace()) << "advanceThread>";
|
||||
});
|
||||
mAdvanceThread = false;
|
||||
JLOG(m_journal.trace()) << "advanceThread>";
|
||||
},
|
||||
"advanceLedger",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1536,7 +1548,13 @@ LedgerMaster::newPFWork(
|
||||
<< "newPFWork: Creating job. path find threads: "
|
||||
<< mPathFindThread;
|
||||
if (app_.getJobQueue().addJob(
|
||||
jtUPDATE_PF, name, [this]() { updatePaths(); }))
|
||||
jtUPDATE_PF, name, [this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { updatePaths(); },
|
||||
"newPFWork: Creating job. path find threads:",
|
||||
1s,
|
||||
journal);
|
||||
}))
|
||||
{
|
||||
++mPathFindThread;
|
||||
}
|
||||
@@ -1857,8 +1875,11 @@ LedgerMaster::fetchForHistory(
|
||||
mFillInProgress = seq;
|
||||
}
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE, "tryFill", [this, ledger]() {
|
||||
tryFill(ledger);
|
||||
jtADVANCE,
|
||||
"tryFill",
|
||||
[this, ledger, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { tryFill(ledger); }, "tryFill", 1s, journal);
|
||||
});
|
||||
}
|
||||
progress = true;
|
||||
@@ -2027,10 +2048,17 @@ LedgerMaster::gotFetchPack(bool progress, std::uint32_t seq)
|
||||
{
|
||||
if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire))
|
||||
{
|
||||
app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() {
|
||||
app_.getInboundLedgers().gotFetchPack();
|
||||
mGotFetchPackThread.clear(std::memory_order_release);
|
||||
});
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "gotFetchPack", [this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
app_.getInboundLedgers().gotFetchPack();
|
||||
mGotFetchPackThread.clear(std::memory_order_release);
|
||||
},
|
||||
"gotFetchPack",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,9 @@
|
||||
|
||||
#include <xrpld/app/ledger/detail/TimeoutCounter.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -83,9 +86,15 @@ TimeoutCounter::queueJob(ScopedLockType& sl)
|
||||
app_.getJobQueue().addJob(
|
||||
queueJobParameter_.jobType,
|
||||
queueJobParameter_.jobName,
|
||||
[wptr = pmDowncast()]() {
|
||||
if (auto sptr = wptr.lock(); sptr)
|
||||
sptr->invokeOnTimer();
|
||||
[wptr = pmDowncast(), journal = journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto sptr = wptr.lock(); sptr)
|
||||
sptr->invokeOnTimer();
|
||||
},
|
||||
"TimeoutCounter::queueJob",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <xrpld/app/ledger/detail/TransactionAcquire.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
@@ -79,8 +80,16 @@ TransactionAcquire::done()
|
||||
// just updates the consensus and related structures when we acquire
|
||||
// a transaction set. No need to update them if we're shutting down.
|
||||
app_.getJobQueue().addJob(
|
||||
jtTXN_DATA, "completeAcquire", [pap, hash, map]() {
|
||||
pap->getInboundTransactions().giveSet(hash, map, true);
|
||||
jtTXN_DATA,
|
||||
"completeAcquire",
|
||||
[pap, hash, map, journal = journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
pap->getInboundTransactions().giveSet(hash, map, true);
|
||||
},
|
||||
"completeAcquire",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,6 +81,8 @@
|
||||
#include <tuple>
|
||||
#include <unordered_map>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class NetworkOPsImp final : public NetworkOPs
|
||||
@@ -994,9 +996,16 @@ NetworkOPsImp::setHeartbeatTimer()
|
||||
heartbeatTimer_,
|
||||
mConsensus.parms().ledgerGRANULARITY,
|
||||
[this]() {
|
||||
m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
|
||||
processHeartbeatTimer();
|
||||
});
|
||||
m_job_queue.addJob(
|
||||
jtNETOP_TIMER,
|
||||
"NetOPs.heartbeat",
|
||||
[this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { processHeartbeatTimer(); },
|
||||
"NetOPs.heartbeat",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
},
|
||||
[this]() { setHeartbeatTimer(); });
|
||||
}
|
||||
@@ -1010,9 +1019,16 @@ NetworkOPsImp::setClusterTimer()
|
||||
clusterTimer_,
|
||||
10s,
|
||||
[this]() {
|
||||
m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
|
||||
processClusterTimer();
|
||||
});
|
||||
m_job_queue.addJob(
|
||||
jtNETOP_CLUSTER,
|
||||
"NetOPs.cluster",
|
||||
[this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { processClusterTimer(); },
|
||||
"NetOPs.cluster",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
},
|
||||
[this]() { setClusterTimer(); });
|
||||
}
|
||||
@@ -1229,10 +1245,17 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
|
||||
auto tx = std::make_shared<Transaction>(trans, reason, app_);
|
||||
|
||||
m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
|
||||
auto t = tx;
|
||||
processTransaction(t, false, false, FailHard::no);
|
||||
});
|
||||
m_job_queue.addJob(
|
||||
jtTRANSACTION, "submitTxn", [this, tx, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
auto t = tx;
|
||||
processTransaction(t, false, false, FailHard::no);
|
||||
},
|
||||
"submitTxn",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -1315,7 +1338,13 @@ NetworkOPsImp::doTransactionAsync(
|
||||
if (mDispatchState == DispatchState::none)
|
||||
{
|
||||
if (m_job_queue.addJob(
|
||||
jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
|
||||
jtBATCH, "transactionBatch", [this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { transactionBatch(); },
|
||||
"transactionBatch",
|
||||
1s,
|
||||
journal);
|
||||
}))
|
||||
{
|
||||
mDispatchState = DispatchState::scheduled;
|
||||
}
|
||||
@@ -1362,9 +1391,16 @@ NetworkOPsImp::doTransactionSyncBatch(
|
||||
if (mTransactions.size())
|
||||
{
|
||||
// More transactions need to be applied, but by another job.
|
||||
if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
|
||||
transactionBatch();
|
||||
}))
|
||||
if (m_job_queue.addJob(
|
||||
jtBATCH,
|
||||
"transactionBatch",
|
||||
[this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() { transactionBatch(); },
|
||||
"transactionBatch",
|
||||
1s,
|
||||
journal);
|
||||
}))
|
||||
{
|
||||
mDispatchState = DispatchState::scheduled;
|
||||
}
|
||||
@@ -3175,8 +3211,17 @@ NetworkOPsImp::reportFeeChange()
|
||||
if (f != mLastFeeSummary)
|
||||
{
|
||||
m_job_queue.addJob(
|
||||
jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
|
||||
pubServer();
|
||||
jtCLIENT_FEE_CHANGE,
|
||||
"reportFeeChange->pubServer",
|
||||
[this, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
pubServer();
|
||||
return true;
|
||||
},
|
||||
"reportFeeChange->pubServer",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -3187,7 +3232,16 @@ NetworkOPsImp::reportConsensusStateChange(ConsensusPhase phase)
|
||||
m_job_queue.addJob(
|
||||
jtCLIENT_CONSENSUS,
|
||||
"reportConsensusStateChange->pubConsensus",
|
||||
[this, phase]() { pubConsensus(phase); });
|
||||
[this, phase, journal = m_journal]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
pubConsensus(phase);
|
||||
return true;
|
||||
},
|
||||
"reportConsensusStateChange->pubConsensus",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
inline void
|
||||
@@ -3693,262 +3747,301 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
jtCLIENT_ACCT_HIST,
|
||||
"AccountHistoryTxStream",
|
||||
[this, dbType = databaseType, subInfo]() {
|
||||
auto const& accountId = subInfo.index_->accountId_;
|
||||
auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
|
||||
auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
auto const& accountId = subInfo.index_->accountId_;
|
||||
auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
|
||||
auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
|
||||
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account " << toBase58(accountId)
|
||||
<< " started. lastLedgerSeq=" << lastLedgerSeq;
|
||||
|
||||
auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
|
||||
std::shared_ptr<TxMeta> const& meta) -> bool {
|
||||
/*
|
||||
* genesis account: first tx is the one with seq 1
|
||||
* other account: first tx is the one created the account
|
||||
*/
|
||||
if (accountId == genesisAccountId)
|
||||
{
|
||||
auto stx = tx->getSTransaction();
|
||||
if (stx->getAccountID(sfAccount) == accountId &&
|
||||
stx->getSeqValue() == 1)
|
||||
return true;
|
||||
}
|
||||
|
||||
for (auto& node : meta->getNodes())
|
||||
{
|
||||
if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
|
||||
continue;
|
||||
|
||||
if (node.isFieldPresent(sfNewFields))
|
||||
{
|
||||
if (auto inner = dynamic_cast<STObject const*>(
|
||||
node.peekAtPField(sfNewFields));
|
||||
inner)
|
||||
{
|
||||
if (inner->isFieldPresent(sfAccount) &&
|
||||
inner->getAccountID(sfAccount) == accountId)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
auto send = [&](Json::Value const& jvObj,
|
||||
bool unsubscribe) -> bool {
|
||||
if (auto sptr = subInfo.sinkWptr_.lock())
|
||||
{
|
||||
sptr->send(jvObj, true);
|
||||
if (unsubscribe)
|
||||
unsubAccountHistory(sptr, accountId, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
|
||||
bool unsubscribe) -> bool {
|
||||
if (auto sptr = subInfo.sinkWptr_.lock())
|
||||
{
|
||||
jvObj.visit(
|
||||
sptr->getApiVersion(), //
|
||||
[&](Json::Value const& jv) { sptr->send(jv, true); });
|
||||
|
||||
if (unsubscribe)
|
||||
unsubAccountHistory(sptr, accountId, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
auto getMoreTxns =
|
||||
[&](std::uint32_t minLedger,
|
||||
std::uint32_t maxLedger,
|
||||
std::optional<RelationalDatabase::AccountTxMarker> marker)
|
||||
-> std::optional<std::pair<
|
||||
RelationalDatabase::AccountTxs,
|
||||
std::optional<RelationalDatabase::AccountTxMarker>>> {
|
||||
switch (dbType)
|
||||
{
|
||||
case Sqlite: {
|
||||
auto db = static_cast<SQLiteDatabase*>(
|
||||
&app_.getRelationalDatabase());
|
||||
RelationalDatabase::AccountTxPageOptions options{
|
||||
accountId, minLedger, maxLedger, marker, 0, true};
|
||||
return db->newestAccountTxPage(options);
|
||||
}
|
||||
default: {
|
||||
UNREACHABLE(
|
||||
"ripple::NetworkOPsImp::addAccountHistoryJob::"
|
||||
"getMoreTxns : invalid database type");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* search backward until the genesis ledger or asked to stop
|
||||
*/
|
||||
while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
|
||||
{
|
||||
int feeChargeCount = 0;
|
||||
if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
|
||||
{
|
||||
sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
|
||||
++feeChargeCount;
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId) << " no InfoSub. Fee charged "
|
||||
<< feeChargeCount << " times.";
|
||||
return;
|
||||
}
|
||||
<< toBase58(accountId)
|
||||
<< " started. lastLedgerSeq=" << lastLedgerSeq;
|
||||
|
||||
// try to search in 1024 ledgers till reaching genesis ledgers
|
||||
auto startLedgerSeq =
|
||||
(lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account " << toBase58(accountId)
|
||||
<< ", working on ledger range [" << startLedgerSeq << ","
|
||||
<< lastLedgerSeq << "]";
|
||||
auto isFirstTx =
|
||||
[&](std::shared_ptr<Transaction> const& tx,
|
||||
std::shared_ptr<TxMeta> const& meta) -> bool {
|
||||
/*
|
||||
* genesis account: first tx is the one with seq 1
|
||||
* other account: first tx is the one created the
|
||||
* account
|
||||
*/
|
||||
if (accountId == genesisAccountId)
|
||||
{
|
||||
auto stx = tx->getSTransaction();
|
||||
if (stx->getAccountID(sfAccount) == accountId &&
|
||||
stx->getSeqValue() == 1)
|
||||
return true;
|
||||
}
|
||||
|
||||
auto haveRange = [&]() -> bool {
|
||||
std::uint32_t validatedMin = UINT_MAX;
|
||||
std::uint32_t validatedMax = 0;
|
||||
auto haveSomeValidatedLedgers =
|
||||
app_.getLedgerMaster().getValidatedRange(
|
||||
validatedMin, validatedMax);
|
||||
for (auto& node : meta->getNodes())
|
||||
{
|
||||
if (node.getFieldU16(sfLedgerEntryType) !=
|
||||
ltACCOUNT_ROOT)
|
||||
continue;
|
||||
|
||||
return haveSomeValidatedLedgers &&
|
||||
validatedMin <= startLedgerSeq &&
|
||||
lastLedgerSeq <= validatedMax;
|
||||
}();
|
||||
if (node.isFieldPresent(sfNewFields))
|
||||
{
|
||||
if (auto inner = dynamic_cast<STObject const*>(
|
||||
node.peekAtPField(sfNewFields));
|
||||
inner)
|
||||
{
|
||||
if (inner->isFieldPresent(sfAccount) &&
|
||||
inner->getAccountID(sfAccount) ==
|
||||
accountId)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!haveRange)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory reschedule job for account "
|
||||
<< toBase58(accountId) << ", incomplete ledger range ["
|
||||
<< startLedgerSeq << "," << lastLedgerSeq << "]";
|
||||
setAccountHistoryJobTimer(subInfo);
|
||||
return;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
std::optional<RelationalDatabase::AccountTxMarker> marker{};
|
||||
while (!subInfo.index_->stopHistorical_)
|
||||
{
|
||||
auto dbResult =
|
||||
getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
|
||||
if (!dbResult)
|
||||
auto send = [&](Json::Value const& jvObj,
|
||||
bool unsubscribe) -> bool {
|
||||
if (auto sptr = subInfo.sinkWptr_.lock())
|
||||
{
|
||||
sptr->send(jvObj, true);
|
||||
if (unsubscribe)
|
||||
unsubAccountHistory(sptr, accountId, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
|
||||
bool unsubscribe) -> bool {
|
||||
if (auto sptr = subInfo.sinkWptr_.lock())
|
||||
{
|
||||
jvObj.visit(
|
||||
sptr->getApiVersion(), //
|
||||
[&](Json::Value const& jv) {
|
||||
sptr->send(jv, true);
|
||||
});
|
||||
|
||||
if (unsubscribe)
|
||||
unsubAccountHistory(sptr, accountId, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
auto getMoreTxns =
|
||||
[&](std::uint32_t minLedger,
|
||||
std::uint32_t maxLedger,
|
||||
std::optional<RelationalDatabase::AccountTxMarker>
|
||||
marker)
|
||||
-> std::optional<std::pair<
|
||||
RelationalDatabase::AccountTxs,
|
||||
std::optional<
|
||||
RelationalDatabase::AccountTxMarker>>> {
|
||||
switch (dbType)
|
||||
{
|
||||
case Sqlite: {
|
||||
auto db = static_cast<SQLiteDatabase*>(
|
||||
&app_.getRelationalDatabase());
|
||||
RelationalDatabase::AccountTxPageOptions
|
||||
options{
|
||||
accountId,
|
||||
minLedger,
|
||||
maxLedger,
|
||||
marker,
|
||||
0,
|
||||
true};
|
||||
return db->newestAccountTxPage(options);
|
||||
}
|
||||
default: {
|
||||
UNREACHABLE(
|
||||
"ripple::NetworkOPsImp::"
|
||||
"addAccountHistoryJob::"
|
||||
"getMoreTxns : invalid database type");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* search backward until the genesis ledger or asked to stop
|
||||
*/
|
||||
while (lastLedgerSeq >= 2 &&
|
||||
!subInfo.index_->stopHistorical_)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId) << " getMoreTxns failed.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const& txns = dbResult->first;
|
||||
marker = dbResult->second;
|
||||
size_t num_txns = txns.size();
|
||||
for (size_t i = 0; i < num_txns; ++i)
|
||||
{
|
||||
auto const& [tx, meta] = txns[i];
|
||||
|
||||
if (!tx || !meta)
|
||||
int feeChargeCount = 0;
|
||||
if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId) << " empty tx or meta.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
auto curTxLedger =
|
||||
app_.getLedgerMaster().getLedgerBySeq(
|
||||
tx->getLedger());
|
||||
if (!curTxLedger)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId) << " no ledger.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
std::shared_ptr<STTx const> stTxn =
|
||||
tx->getSTransaction();
|
||||
if (!stTxn)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " getSTransaction failed.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const mRef = std::ref(*meta);
|
||||
auto const trR = meta->getResultTER();
|
||||
MultiApiJson jvTx =
|
||||
transJson(stTxn, trR, true, curTxLedger, mRef);
|
||||
|
||||
jvTx.set(
|
||||
jss::account_history_tx_index, txHistoryIndex--);
|
||||
if (i + 1 == num_txns ||
|
||||
txns[i + 1].first->getLedger() != tx->getLedger())
|
||||
jvTx.set(jss::account_history_boundary, true);
|
||||
|
||||
if (isFirstTx(tx, meta))
|
||||
{
|
||||
jvTx.set(jss::account_history_tx_first, true);
|
||||
sendMultiApiJson(jvTx, false);
|
||||
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " done, found last tx.";
|
||||
return;
|
||||
sptr->getConsumer().charge(
|
||||
Resource::feeMediumBurdenRPC);
|
||||
++feeChargeCount;
|
||||
}
|
||||
else
|
||||
{
|
||||
sendMultiApiJson(jvTx, false);
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " no InfoSub. Fee charged " << feeChargeCount
|
||||
<< " times.";
|
||||
return;
|
||||
}
|
||||
|
||||
// try to search in 1024 ledgers till reaching genesis
|
||||
// ledgers
|
||||
auto startLedgerSeq =
|
||||
(lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024
|
||||
: 2);
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< ", working on ledger range [" << startLedgerSeq
|
||||
<< "," << lastLedgerSeq << "]";
|
||||
|
||||
auto haveRange = [&]() -> bool {
|
||||
std::uint32_t validatedMin = UINT_MAX;
|
||||
std::uint32_t validatedMax = 0;
|
||||
auto haveSomeValidatedLedgers =
|
||||
app_.getLedgerMaster().getValidatedRange(
|
||||
validatedMin, validatedMax);
|
||||
|
||||
return haveSomeValidatedLedgers &&
|
||||
validatedMin <= startLedgerSeq &&
|
||||
lastLedgerSeq <= validatedMax;
|
||||
}();
|
||||
|
||||
if (!haveRange)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory reschedule job for account "
|
||||
<< toBase58(accountId)
|
||||
<< ", incomplete ledger range ["
|
||||
<< startLedgerSeq << "," << lastLedgerSeq
|
||||
<< "]";
|
||||
setAccountHistoryJobTimer(subInfo);
|
||||
return;
|
||||
}
|
||||
|
||||
std::optional<RelationalDatabase::AccountTxMarker>
|
||||
marker{};
|
||||
while (!subInfo.index_->stopHistorical_)
|
||||
{
|
||||
auto dbResult = getMoreTxns(
|
||||
startLedgerSeq, lastLedgerSeq, marker);
|
||||
if (!dbResult)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " getMoreTxns failed.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const& txns = dbResult->first;
|
||||
marker = dbResult->second;
|
||||
size_t num_txns = txns.size();
|
||||
for (size_t i = 0; i < num_txns; ++i)
|
||||
{
|
||||
auto const& [tx, meta] = txns[i];
|
||||
|
||||
if (!tx || !meta)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " empty tx or meta.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
auto curTxLedger =
|
||||
app_.getLedgerMaster().getLedgerBySeq(
|
||||
tx->getLedger());
|
||||
if (!curTxLedger)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId) << " no ledger.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
std::shared_ptr<STTx const> stTxn =
|
||||
tx->getSTransaction();
|
||||
if (!stTxn)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " getSTransaction failed.";
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const mRef = std::ref(*meta);
|
||||
auto const trR = meta->getResultTER();
|
||||
MultiApiJson jvTx = transJson(
|
||||
stTxn, trR, true, curTxLedger, mRef);
|
||||
|
||||
jvTx.set(
|
||||
jss::account_history_tx_index,
|
||||
txHistoryIndex--);
|
||||
if (i + 1 == num_txns ||
|
||||
txns[i + 1].first->getLedger() !=
|
||||
tx->getLedger())
|
||||
jvTx.set(
|
||||
jss::account_history_boundary, true);
|
||||
|
||||
if (isFirstTx(tx, meta))
|
||||
{
|
||||
jvTx.set(
|
||||
jss::account_history_tx_first, true);
|
||||
sendMultiApiJson(jvTx, false);
|
||||
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " done, found last tx.";
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
sendMultiApiJson(jvTx, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (marker)
|
||||
{
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " paging, marker=" << marker->ledgerSeq
|
||||
<< ":" << marker->txnSeq;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!subInfo.index_->stopHistorical_)
|
||||
{
|
||||
lastLedgerSeq = startLedgerSeq - 1;
|
||||
if (lastLedgerSeq <= 1)
|
||||
{
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " done, reached genesis ledger.";
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (marker)
|
||||
{
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " paging, marker=" << marker->ledgerSeq << ":"
|
||||
<< marker->txnSeq;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!subInfo.index_->stopHistorical_)
|
||||
{
|
||||
lastLedgerSeq = startLedgerSeq - 1;
|
||||
if (lastLedgerSeq <= 1)
|
||||
{
|
||||
JLOG(m_journal.trace())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " done, reached genesis ledger.";
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
},
|
||||
"AccountHistoryTxStream",
|
||||
1s,
|
||||
this->m_journal);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <xrpld/core/Config.h>
|
||||
#include <xrpld/core/DatabaseCon.h>
|
||||
#include <xrpld/core/SociDB.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <xrpl/basics/ByteUtilities.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
@@ -35,6 +36,8 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
static auto checkpointPageCount = 1000;
|
||||
@@ -257,9 +260,17 @@ public:
|
||||
// There is a separate check in `checkpoint` for a valid
|
||||
// connection in the rare case when the DatabaseCon is destroyed
|
||||
// after locking this weak_ptr
|
||||
[wp = std::weak_ptr<Checkpointer>{shared_from_this()}]() {
|
||||
if (auto self = wp.lock())
|
||||
self->checkpoint();
|
||||
[wp = std::weak_ptr<Checkpointer>{shared_from_this()},
|
||||
journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto self = wp.lock())
|
||||
self->checkpoint();
|
||||
return true;
|
||||
},
|
||||
"WAL",
|
||||
1s,
|
||||
journal);
|
||||
}))
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <xrpld/net/RPCCall.h>
|
||||
#include <xrpld/net/RPCSub.h>
|
||||
#include <xrpld/perflog/PerfLog.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/StringUtilities.h>
|
||||
@@ -27,6 +28,8 @@
|
||||
|
||||
#include <deque>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// Subscription object for JSON-RPC
|
||||
@@ -91,8 +94,17 @@ public:
|
||||
JLOG(j_.info()) << "RPCCall::fromNetwork start";
|
||||
|
||||
mSending = m_jobQueue.addJob(
|
||||
jtCLIENT_SUBSCRIBE, "RPCSub::sendThread", [this]() {
|
||||
sendThread();
|
||||
jtCLIENT_SUBSCRIBE,
|
||||
"RPCSub::sendThread",
|
||||
[this, journal = j_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
sendThread();
|
||||
return true;
|
||||
},
|
||||
"RPCSub::sendThread",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1077,8 +1077,17 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMManifests> const& m)
|
||||
fee_.update(Resource::feeModerateBurdenPeer, "oversize");
|
||||
|
||||
app_.getJobQueue().addJob(
|
||||
jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
|
||||
overlay_.onManifests(m, that);
|
||||
jtMANIFEST,
|
||||
"receiveManifests",
|
||||
[this, that = shared_from_this(), m, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
overlay_.onManifests(m, that);
|
||||
return true;
|
||||
},
|
||||
"receiveManifests",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1349,10 +1358,18 @@ PeerImp::handleTransaction(
|
||||
flags,
|
||||
checkSignature,
|
||||
batch,
|
||||
stx]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->checkTransaction(
|
||||
flags, checkSignature, stx, batch);
|
||||
stx,
|
||||
journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->checkTransaction(
|
||||
flags, checkSignature, stx, batch);
|
||||
return true;
|
||||
},
|
||||
"recvTransaction->checkTransaction",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1447,10 +1464,18 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
|
||||
|
||||
// Queue a job to process the request
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->processLedgerRequest(m);
|
||||
});
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_REQ, "recvGetLedger", [weak, m, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->processLedgerRequest(m);
|
||||
return true;
|
||||
},
|
||||
"recvGetLedger",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1468,27 +1493,38 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMProofPathRequest> const& m)
|
||||
Resource::feeModerateBurdenPeer, "received a proof path request");
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(
|
||||
jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
|
||||
if (auto peer = weak.lock())
|
||||
{
|
||||
auto reply =
|
||||
peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
|
||||
if (reply.has_error())
|
||||
{
|
||||
if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
|
||||
peer->charge(
|
||||
Resource::feeMalformedRequest,
|
||||
"proof_path_request");
|
||||
else
|
||||
peer->charge(
|
||||
Resource::feeRequestNoReply, "proof_path_request");
|
||||
}
|
||||
else
|
||||
{
|
||||
peer->send(std::make_shared<Message>(
|
||||
reply, protocol::mtPROOF_PATH_RESPONSE));
|
||||
}
|
||||
}
|
||||
jtREPLAY_REQ,
|
||||
"recvProofPathRequest",
|
||||
[weak, m, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
{
|
||||
auto reply = peer->ledgerReplayMsgHandler_
|
||||
.processProofPathRequest(m);
|
||||
if (reply.has_error())
|
||||
{
|
||||
if (reply.error() ==
|
||||
protocol::TMReplyError::reBAD_REQUEST)
|
||||
peer->charge(
|
||||
Resource::feeMalformedRequest,
|
||||
"proof_path_request");
|
||||
else
|
||||
peer->charge(
|
||||
Resource::feeRequestNoReply,
|
||||
"proof_path_request");
|
||||
}
|
||||
else
|
||||
{
|
||||
peer->send(std::make_shared<Message>(
|
||||
reply, protocol::mtPROOF_PATH_RESPONSE));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
},
|
||||
"recvProofPathRequest",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1522,28 +1558,38 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMReplayDeltaRequest> const& m)
|
||||
fee_.fee = Resource::feeModerateBurdenPeer;
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(
|
||||
jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
|
||||
if (auto peer = weak.lock())
|
||||
{
|
||||
auto reply =
|
||||
peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
|
||||
if (reply.has_error())
|
||||
{
|
||||
if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
|
||||
peer->charge(
|
||||
Resource::feeMalformedRequest,
|
||||
"replay_delta_request");
|
||||
else
|
||||
peer->charge(
|
||||
Resource::feeRequestNoReply,
|
||||
"replay_delta_request");
|
||||
}
|
||||
else
|
||||
{
|
||||
peer->send(std::make_shared<Message>(
|
||||
reply, protocol::mtREPLAY_DELTA_RESPONSE));
|
||||
}
|
||||
}
|
||||
jtREPLAY_REQ,
|
||||
"recvReplayDeltaRequest",
|
||||
[weak, m, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
{
|
||||
auto reply = peer->ledgerReplayMsgHandler_
|
||||
.processReplayDeltaRequest(m);
|
||||
if (reply.has_error())
|
||||
{
|
||||
if (reply.error() ==
|
||||
protocol::TMReplyError::reBAD_REQUEST)
|
||||
peer->charge(
|
||||
Resource::feeMalformedRequest,
|
||||
"replay_delta_request");
|
||||
else
|
||||
peer->charge(
|
||||
Resource::feeRequestNoReply,
|
||||
"replay_delta_request");
|
||||
}
|
||||
else
|
||||
{
|
||||
peer->send(std::make_shared<Message>(
|
||||
reply, protocol::mtREPLAY_DELTA_RESPONSE));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
},
|
||||
"recvReplayDeltaRequest",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1640,12 +1686,21 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMLedgerData> const& m)
|
||||
{
|
||||
std::weak_ptr<PeerImp> weak{shared_from_this()};
|
||||
app_.getJobQueue().addJob(
|
||||
jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
|
||||
if (auto peer = weak.lock())
|
||||
{
|
||||
peer->app_.getInboundTransactions().gotData(
|
||||
ledgerHash, peer, m);
|
||||
}
|
||||
jtTXN_DATA,
|
||||
"recvPeerData",
|
||||
[weak, ledgerHash, m, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
{
|
||||
peer->app_.getInboundTransactions().gotData(
|
||||
ledgerHash, peer, m);
|
||||
}
|
||||
return true;
|
||||
},
|
||||
"recvPeerData",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -1770,9 +1825,16 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMProposeSet> const& m)
|
||||
app_.getJobQueue().addJob(
|
||||
isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
|
||||
"recvPropose->checkPropose",
|
||||
[weak, isTrusted, m, proposal]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->checkPropose(isTrusted, m, proposal);
|
||||
[weak, isTrusted, m, proposal, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->checkPropose(isTrusted, m, proposal);
|
||||
return true;
|
||||
},
|
||||
"recvPropose->checkPropose",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -2408,9 +2470,16 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
|
||||
app_.getJobQueue().addJob(
|
||||
isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
|
||||
name,
|
||||
[weak, val, m, key]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->checkValidation(val, key, m);
|
||||
[weak, val, m, key, name, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->checkValidation(val, key, m);
|
||||
return true;
|
||||
},
|
||||
name,
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
else
|
||||
@@ -2463,9 +2532,18 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
||||
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(
|
||||
jtREQUESTED_TXN, "doTransactions", [weak, m]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->doTransactions(m);
|
||||
jtREQUESTED_TXN,
|
||||
"doTransactions",
|
||||
[weak, m, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->doTransactions(m);
|
||||
return true;
|
||||
},
|
||||
"doTransactions",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -2597,9 +2675,18 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMHaveTransactions> const& m)
|
||||
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(
|
||||
jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->handleHaveTransactions(m);
|
||||
jtMISSING_TXN,
|
||||
"handleHaveTransactions",
|
||||
[weak, m, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->handleHaveTransactions(m);
|
||||
return true;
|
||||
},
|
||||
"handleHaveTransactions",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -2768,8 +2855,18 @@ PeerImp::doFetchPack(std::shared_ptr<protocol::TMGetObjectByHash> const& packet)
|
||||
auto elapsed = UptimeClock::now();
|
||||
auto const pap = &app_;
|
||||
app_.getJobQueue().addJob(
|
||||
jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
|
||||
pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
|
||||
jtPACK,
|
||||
"MakeFetchPack",
|
||||
[pap, weak, packet, hash, elapsed, journal = p_journal_]() {
|
||||
perf::measureDurationAndLog(
|
||||
[&]() {
|
||||
pap->getLedgerMaster().makeFetchPack(
|
||||
weak, packet, hash, elapsed);
|
||||
return true;
|
||||
},
|
||||
"MakeFetchPack",
|
||||
1s,
|
||||
journal);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -190,20 +190,35 @@ measureDurationAndLog(
|
||||
std::chrono::duration<Rep, Period> maxDelay,
|
||||
beast::Journal const& journal)
|
||||
{
|
||||
using Result = std::invoke_result_t<Func>;
|
||||
|
||||
auto start_time = std::chrono::high_resolution_clock::now();
|
||||
|
||||
auto result = func();
|
||||
|
||||
auto end_time = std::chrono::high_resolution_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
end_time - start_time);
|
||||
if (duration > maxDelay)
|
||||
if constexpr (std::is_void_v<Result>)
|
||||
{
|
||||
JLOG(journal.warn())
|
||||
<< actionDescription << " took " << duration.count() << " ms";
|
||||
std::forward<Func>(func)();
|
||||
auto end_time = std::chrono::high_resolution_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
end_time - start_time);
|
||||
if (duration > maxDelay)
|
||||
{
|
||||
JLOG(journal.warn())
|
||||
<< actionDescription << " took " << duration.count() << " ms";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Result result = std::forward<Func>(func)();
|
||||
auto end_time = std::chrono::high_resolution_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
end_time - start_time);
|
||||
if (duration > maxDelay)
|
||||
{
|
||||
JLOG(journal.warn())
|
||||
<< actionDescription << " took " << duration.count() << " ms";
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace perf
|
||||
|
||||
Reference in New Issue
Block a user