mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-20 22:02:34 +00:00
Address a PR comment
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
This commit is contained in:
@@ -209,7 +209,7 @@ public:
|
||||
boost::asio::io_context& io_svc,
|
||||
beast::Journal journal,
|
||||
beast::insight::Collector::ptr const& collector)
|
||||
: app_(registry.app())
|
||||
: registry_(registry)
|
||||
, m_journal(journal)
|
||||
, m_localTX(make_LocalTxs())
|
||||
, mMode(start_valid ? OperatingMode::FULL : OperatingMode::DISCONNECTED)
|
||||
@@ -217,14 +217,16 @@ public:
|
||||
, clusterTimer_(io_svc)
|
||||
, accountHistoryTxTimer_(io_svc)
|
||||
, mConsensus(
|
||||
app_,
|
||||
make_FeeVote(setup_FeeVote(app_.config().section("voting")), app_.logs().journal("FeeVote")),
|
||||
registry_.app(),
|
||||
make_FeeVote(
|
||||
setup_FeeVote(registry_.app().config().section("voting")),
|
||||
registry_.logs().journal("FeeVote")),
|
||||
ledgerMaster,
|
||||
*m_localTX,
|
||||
registry.getInboundTransactions(),
|
||||
beast::get_abstract_clock<std::chrono::steady_clock>(),
|
||||
validatorKeys,
|
||||
app_.logs().journal("LedgerConsensus"))
|
||||
registry_.logs().journal("LedgerConsensus"))
|
||||
, validatorPK_(validatorKeys.keys ? validatorKeys.keys->publicKey : decltype(validatorPK_){})
|
||||
, validatorMasterPK_(validatorKeys.keys ? validatorKeys.keys->masterPublicKey : decltype(validatorMasterPK_){})
|
||||
, m_ledgerMaster(ledgerMaster)
|
||||
@@ -666,7 +668,7 @@ private:
|
||||
void
|
||||
setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo);
|
||||
|
||||
Application& app_;
|
||||
ServiceRegistry& registry_;
|
||||
beast::Journal m_journal;
|
||||
|
||||
std::unique_ptr<LocalTxs> m_localTX;
|
||||
@@ -848,7 +850,7 @@ NetworkOPsImp::getHostId(bool forAdmin)
|
||||
// For non-admin uses hash the node public key into a
|
||||
// single RFC1751 word:
|
||||
static std::string const shroudedHostId = [this]() {
|
||||
auto const& id = app_.nodeIdentity();
|
||||
auto const& id = registry_.app().nodeIdentity();
|
||||
|
||||
return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
|
||||
}();
|
||||
@@ -862,7 +864,7 @@ NetworkOPsImp::setStateTimer()
|
||||
setHeartbeatTimer();
|
||||
|
||||
// Only do this work if a cluster is configured
|
||||
if (app_.cluster().size() != 0)
|
||||
if (registry_.cluster().size() != 0)
|
||||
setClusterTimer();
|
||||
}
|
||||
|
||||
@@ -933,13 +935,13 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
{
|
||||
RclConsensusLogger clog("Heartbeat Timer", mConsensus.validating(), m_journal);
|
||||
{
|
||||
std::unique_lock lock{app_.getMasterMutex()};
|
||||
std::unique_lock lock{registry_.app().getMasterMutex()};
|
||||
|
||||
// VFALCO NOTE This is for diagnosing a crash on exit
|
||||
LoadManager& mgr(app_.getLoadManager());
|
||||
LoadManager& mgr(registry_.getLoadManager());
|
||||
mgr.heartbeat();
|
||||
|
||||
std::size_t const numPeers = app_.overlay().size();
|
||||
std::size_t const numPeers = registry_.overlay().size();
|
||||
|
||||
// do we have sufficient peers? If not, we are disconnected.
|
||||
if (numPeers < minPeerCount_)
|
||||
@@ -991,7 +993,7 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
CLOG(clog.ss()) << ". ";
|
||||
}
|
||||
|
||||
mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
|
||||
mConsensus.timerEntry(registry_.timeKeeper().closeTime(), clog.ss());
|
||||
|
||||
CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
|
||||
ConsensusPhase const currPhase = mConsensus.phase();
|
||||
@@ -1009,16 +1011,16 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
void
|
||||
NetworkOPsImp::processClusterTimer()
|
||||
{
|
||||
if (app_.cluster().size() == 0)
|
||||
if (registry_.cluster().size() == 0)
|
||||
return;
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
bool const update = app_.cluster().update(
|
||||
app_.nodeIdentity().first,
|
||||
bool const update = registry_.cluster().update(
|
||||
registry_.app().nodeIdentity().first,
|
||||
"",
|
||||
(m_ledgerMaster.getValidatedLedgerAge() <= 4min) ? app_.getFeeTrack().getLocalFee() : 0,
|
||||
app_.timeKeeper().now());
|
||||
(m_ledgerMaster.getValidatedLedgerAge() <= 4min) ? registry_.getFeeTrack().getLocalFee() : 0,
|
||||
registry_.timeKeeper().now());
|
||||
|
||||
if (!update)
|
||||
{
|
||||
@@ -1028,7 +1030,7 @@ NetworkOPsImp::processClusterTimer()
|
||||
}
|
||||
|
||||
protocol::TMCluster cluster;
|
||||
app_.cluster().for_each([&cluster](ClusterNode const& node) {
|
||||
registry_.cluster().for_each([&cluster](ClusterNode const& node) {
|
||||
protocol::TMClusterNode& n = *cluster.add_clusternodes();
|
||||
n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
|
||||
n.set_reporttime(node.getReportTime().time_since_epoch().count());
|
||||
@@ -1037,14 +1039,14 @@ NetworkOPsImp::processClusterTimer()
|
||||
n.set_nodename(node.name());
|
||||
});
|
||||
|
||||
Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
|
||||
Resource::Gossip gossip = registry_.getResourceManager().exportConsumers();
|
||||
for (auto& item : gossip.items)
|
||||
{
|
||||
protocol::TMLoadSource& node = *cluster.add_loadsources();
|
||||
node.set_name(to_string(item.address));
|
||||
node.set_cost(item.balance);
|
||||
}
|
||||
app_.overlay().foreach(send_if(std::make_shared<Message>(cluster, protocol::mtCLUSTER), peer_in_cluster()));
|
||||
registry_.overlay().foreach(send_if(std::make_shared<Message>(cluster, protocol::mtCLUSTER), peer_in_cluster()));
|
||||
setClusterTimer();
|
||||
}
|
||||
|
||||
@@ -1089,7 +1091,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
auto const trans = sterilize(*iTrans);
|
||||
|
||||
auto const txid = trans->getTransactionID();
|
||||
auto const flags = app_.getHashRouter().getFlags(txid);
|
||||
auto const flags = registry_.getHashRouter().getFlags(txid);
|
||||
|
||||
if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
|
||||
{
|
||||
@@ -1099,8 +1101,8 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
|
||||
try
|
||||
{
|
||||
auto const [validity, reason] =
|
||||
checkValidity(app_.getHashRouter(), *trans, m_ledgerMaster.getValidatedRules(), app_.config());
|
||||
auto const [validity, reason] = checkValidity(
|
||||
registry_.getHashRouter(), *trans, m_ledgerMaster.getValidatedRules(), registry_.app().config());
|
||||
|
||||
if (validity != Validity::Valid)
|
||||
{
|
||||
@@ -1117,7 +1119,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
|
||||
std::string reason;
|
||||
|
||||
auto tx = std::make_shared<Transaction>(trans, reason, app_);
|
||||
auto tx = std::make_shared<Transaction>(trans, reason, registry_.app());
|
||||
|
||||
m_job_queue.addJob(jtTRANSACTION, "SubmitTxn", [this, tx]() {
|
||||
auto t = tx;
|
||||
@@ -1128,7 +1130,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
bool
|
||||
NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
|
||||
{
|
||||
auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
|
||||
auto const newFlags = registry_.getHashRouter().getFlags(transaction->getID());
|
||||
|
||||
if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
|
||||
{
|
||||
@@ -1149,14 +1151,15 @@ NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
|
||||
{
|
||||
transaction->setStatus(INVALID);
|
||||
transaction->setResult(temINVALID_FLAG);
|
||||
app_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
|
||||
registry_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
// NOTE ximinez - I think this check is redundant,
|
||||
// but I'm not 100% sure yet.
|
||||
// If so, only cost is looking up HashRouter flags.
|
||||
auto const [validity, reason] = checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
|
||||
auto const [validity, reason] =
|
||||
checkValidity(registry_.getHashRouter(), sttx, view->rules(), registry_.app().config());
|
||||
XRPL_ASSERT(validity == Validity::Valid, "xrpl::NetworkOPsImp::processTransaction : valid validity");
|
||||
|
||||
// Not concerned with local checks at this point.
|
||||
@@ -1165,12 +1168,12 @@ NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
|
||||
JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
|
||||
transaction->setStatus(INVALID);
|
||||
transaction->setResult(temBAD_SIGNATURE);
|
||||
app_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
|
||||
registry_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
// canonicalize can change our pointer
|
||||
app_.getMasterTransaction().canonicalize(&transaction);
|
||||
registry_.getMasterTransaction().canonicalize(&transaction);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1266,7 +1269,7 @@ NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
|
||||
for (auto const& [_, tx] : set)
|
||||
{
|
||||
std::string reason;
|
||||
auto transaction = std::make_shared<Transaction>(tx, reason, app_);
|
||||
auto transaction = std::make_shared<Transaction>(tx, reason, registry_.app());
|
||||
|
||||
if (transaction->getStatus() == INVALID)
|
||||
{
|
||||
@@ -1274,7 +1277,7 @@ NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
|
||||
{
|
||||
JLOG(m_journal.trace()) << "Exception checking transaction: " << reason;
|
||||
}
|
||||
app_.getHashRouter().setFlags(tx->getTransactionID(), HashRouterFlags::BAD);
|
||||
registry_.getHashRouter().setFlags(tx->getTransactionID(), HashRouterFlags::BAD);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1348,13 +1351,13 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
batchLock.unlock();
|
||||
|
||||
{
|
||||
std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
|
||||
std::unique_lock masterLock{registry_.app().getMasterMutex(), std::defer_lock};
|
||||
bool changed = false;
|
||||
{
|
||||
std::unique_lock ledgerLock{m_ledgerMaster.peekMutex(), std::defer_lock};
|
||||
std::lock(masterLock, ledgerLock);
|
||||
|
||||
app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
|
||||
registry_.openLedger().modify([&](OpenView& view, beast::Journal j) {
|
||||
for (TransactionStatus& e : transactions)
|
||||
{
|
||||
// we check before adding to the batch
|
||||
@@ -1365,7 +1368,8 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
if (e.failType == FailHard::yes)
|
||||
flags |= tapFAIL_HARD;
|
||||
|
||||
auto const result = app_.getTxQ().apply(app_, view, e.transaction->getSTransaction(), flags, j);
|
||||
auto const result =
|
||||
registry_.getTxQ().apply(registry_.app(), view, e.transaction->getSTransaction(), flags, j);
|
||||
e.result = result.ter;
|
||||
e.applied = result.applied;
|
||||
changed = changed || result.applied;
|
||||
@@ -1380,7 +1384,7 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
if (auto const l = m_ledgerMaster.getValidatedLedger())
|
||||
validatedLedgerIndex = l->header().seq;
|
||||
|
||||
auto newOL = app_.openLedger().current();
|
||||
auto newOL = registry_.openLedger().current();
|
||||
for (TransactionStatus& e : transactions)
|
||||
{
|
||||
e.transaction->clearSubmitResult();
|
||||
@@ -1394,7 +1398,7 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
e.transaction->setResult(e.result);
|
||||
|
||||
if (isTemMalformed(e.result))
|
||||
app_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::BAD);
|
||||
registry_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::BAD);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (e.result != tesSUCCESS)
|
||||
@@ -1428,7 +1432,7 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
batchLock.lock();
|
||||
std::string reason;
|
||||
auto const trans = sterilize(*txNext);
|
||||
auto t = std::make_shared<Transaction>(trans, reason, app_);
|
||||
auto t = std::make_shared<Transaction>(trans, reason, registry_.app());
|
||||
if (t->getApplying())
|
||||
break;
|
||||
submit_held.emplace_back(t, false, false, FailHard::no);
|
||||
@@ -1480,7 +1484,7 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
// up!)
|
||||
//
|
||||
if (e.local || (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
|
||||
app_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::HELD))
|
||||
registry_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::HELD))
|
||||
{
|
||||
// transaction should be held
|
||||
JLOG(m_journal.debug()) << "Transaction should be held: " << e.result;
|
||||
@@ -1514,7 +1518,7 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
(e.result == terQUEUED)) &&
|
||||
!enforceFailHard)
|
||||
{
|
||||
auto const toSkip = app_.getHashRouter().shouldRelay(e.transaction->getID());
|
||||
auto const toSkip = registry_.getHashRouter().shouldRelay(e.transaction->getID());
|
||||
if (auto const sttx = *(e.transaction->getSTransaction()); toSkip &&
|
||||
// Skip relaying if it's an inner batch txn. The flag should
|
||||
// only be set if the Batch feature is enabled. If Batch is
|
||||
@@ -1528,10 +1532,10 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
sttx.add(s);
|
||||
tx.set_rawtransaction(s.data(), s.size());
|
||||
tx.set_status(protocol::tsCURRENT);
|
||||
tx.set_receivetimestamp(app_.timeKeeper().now().time_since_epoch().count());
|
||||
tx.set_receivetimestamp(registry_.timeKeeper().now().time_since_epoch().count());
|
||||
tx.set_deferred(e.result == terQUEUED);
|
||||
// FIXME: This should be when we received it
|
||||
app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
|
||||
registry_.overlay().relay(e.transaction->getID(), tx, *toSkip);
|
||||
e.transaction->setBroadcast();
|
||||
}
|
||||
}
|
||||
@@ -1539,7 +1543,7 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
if (validatedLedgerIndex)
|
||||
{
|
||||
auto [fee, accountSeq, availableSeq] =
|
||||
app_.getTxQ().getTxRequiredFeeAndSeq(*newOL, e.transaction->getSTransaction());
|
||||
registry_.getTxQ().getTxRequiredFeeAndSeq(*newOL, e.transaction->getSTransaction());
|
||||
e.transaction->setCurrentLedgerState(*validatedLedgerIndex, fee, accountSeq, availableSeq);
|
||||
}
|
||||
}
|
||||
@@ -1714,7 +1718,7 @@ NetworkOPsImp::checkLastClosedLedger(Overlay::PeerSequence const& peerList, uint
|
||||
//-------------------------------------------------------------------------
|
||||
// Determine preferred last closed ledger
|
||||
|
||||
auto& validations = app_.getValidations();
|
||||
auto& validations = registry_.getValidations();
|
||||
JLOG(m_journal.debug()) << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
|
||||
|
||||
// Will rely on peer LCL if no trusted validations exist
|
||||
@@ -1759,7 +1763,7 @@ NetworkOPsImp::checkLastClosedLedger(Overlay::PeerSequence const& peerList, uint
|
||||
auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
|
||||
|
||||
if (!consensus)
|
||||
consensus = app_.getInboundLedgers().acquire(closedLedger, 0, InboundLedger::Reason::CONSENSUS);
|
||||
consensus = registry_.getInboundLedgers().acquire(closedLedger, 0, InboundLedger::Reason::CONSENSUS);
|
||||
|
||||
if (consensus &&
|
||||
(!m_ledgerMaster.canBeCurrent(consensus) ||
|
||||
@@ -1800,7 +1804,7 @@ NetworkOPsImp::switchLastClosedLedger(std::shared_ptr<Ledger const> const& newLC
|
||||
clearNeedNetworkLedger();
|
||||
|
||||
// Update fee computations.
|
||||
app_.getTxQ().processClosedLedger(app_, *newLCL, true);
|
||||
registry_.getTxQ().processClosedLedger(registry_.app(), *newLCL, true);
|
||||
|
||||
// Caller must own master lock
|
||||
{
|
||||
@@ -1808,14 +1812,14 @@ NetworkOPsImp::switchLastClosedLedger(std::shared_ptr<Ledger const> const& newLC
|
||||
// open ledger. Then apply local tx.
|
||||
|
||||
auto retries = m_localTX->getTxSet();
|
||||
auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
|
||||
auto const lastVal = registry_.getLedgerMaster().getValidatedLedger();
|
||||
std::optional<Rules> rules;
|
||||
if (lastVal)
|
||||
rules = makeRulesGivenLedger(*lastVal, app_.config().features);
|
||||
rules = makeRulesGivenLedger(*lastVal, registry_.app().config().features);
|
||||
else
|
||||
rules.emplace(app_.config().features);
|
||||
app_.openLedger().accept(
|
||||
app_,
|
||||
rules.emplace(registry_.app().config().features);
|
||||
registry_.openLedger().accept(
|
||||
registry_.app(),
|
||||
*rules,
|
||||
newLCL,
|
||||
OrderedTxs({}),
|
||||
@@ -1825,7 +1829,7 @@ NetworkOPsImp::switchLastClosedLedger(std::shared_ptr<Ledger const> const& newLC
|
||||
"jump",
|
||||
[&](OpenView& view, beast::Journal j) {
|
||||
// Stuff the ledger with transactions from the queue.
|
||||
return app_.getTxQ().accept(app_, view);
|
||||
return registry_.getTxQ().accept(registry_.app(), view);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1834,11 +1838,11 @@ NetworkOPsImp::switchLastClosedLedger(std::shared_ptr<Ledger const> const& newLC
|
||||
protocol::TMStatusChange s;
|
||||
s.set_newevent(protocol::neSWITCHED_LEDGER);
|
||||
s.set_ledgerseq(newLCL->header().seq);
|
||||
s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
|
||||
s.set_networktime(registry_.timeKeeper().now().time_since_epoch().count());
|
||||
s.set_ledgerhashprevious(newLCL->header().parentHash.begin(), newLCL->header().parentHash.size());
|
||||
s.set_ledgerhash(newLCL->header().hash.begin(), newLCL->header().hash.size());
|
||||
|
||||
app_.overlay().foreach(send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
|
||||
registry_.overlay().foreach(send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -1875,23 +1879,23 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed, std::unique_ptr<std:
|
||||
"xrpl::NetworkOPsImp::beginConsensus : closedLedger parent matches "
|
||||
"hash");
|
||||
|
||||
app_.validators().setNegativeUNL(prevLedger->negativeUNL());
|
||||
TrustChanges const changes = app_.validators().updateTrusted(
|
||||
app_.getValidations().getCurrentNodeIDs(),
|
||||
registry_.validators().setNegativeUNL(prevLedger->negativeUNL());
|
||||
TrustChanges const changes = registry_.validators().updateTrusted(
|
||||
registry_.getValidations().getCurrentNodeIDs(),
|
||||
closingInfo.parentCloseTime,
|
||||
*this,
|
||||
app_.overlay(),
|
||||
app_.getHashRouter());
|
||||
registry_.overlay(),
|
||||
registry_.getHashRouter());
|
||||
|
||||
if (!changes.added.empty() || !changes.removed.empty())
|
||||
{
|
||||
app_.getValidations().trustChanged(changes.added, changes.removed);
|
||||
registry_.getValidations().trustChanged(changes.added, changes.removed);
|
||||
// Update the AmendmentTable so it tracks the current validators.
|
||||
app_.getAmendmentTable().trustChanged(app_.validators().getQuorumKeys().second);
|
||||
registry_.getAmendmentTable().trustChanged(registry_.validators().getQuorumKeys().second);
|
||||
}
|
||||
|
||||
mConsensus.startRound(
|
||||
app_.timeKeeper().closeTime(), networkClosed, prevLedger, changes.removed, changes.added, clog);
|
||||
registry_.timeKeeper().closeTime(), networkClosed, prevLedger, changes.removed, changes.added, clog);
|
||||
|
||||
ConsensusPhase const currPhase = mConsensus.phase();
|
||||
if (mLastConsensusPhase != currPhase)
|
||||
@@ -1926,7 +1930,7 @@ NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
|
||||
return false;
|
||||
}
|
||||
|
||||
return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
|
||||
return mConsensus.peerProposal(registry_.timeKeeper().closeTime(), peerPos);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1940,11 +1944,11 @@ NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
|
||||
protocol::TMHaveTransactionSet msg;
|
||||
msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
|
||||
msg.set_status(protocol::tsHAVE);
|
||||
app_.overlay().foreach(send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
|
||||
registry_.overlay().foreach(send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
|
||||
|
||||
// We acquired it because consensus asked us to
|
||||
if (fromAcquire)
|
||||
mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
|
||||
mConsensus.gotTxSet(registry_.timeKeeper().closeTime(), RCLTxSet{map});
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1952,7 +1956,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
|
||||
{
|
||||
uint256 deadLedger = m_ledgerMaster.getClosedLedger()->header().parentHash;
|
||||
|
||||
for (auto const& it : app_.overlay().getActivePeers())
|
||||
for (auto const& it : registry_.overlay().getActivePeers())
|
||||
{
|
||||
if (it && (it->getClosedLedgerHash() == deadLedger))
|
||||
{
|
||||
@@ -1962,7 +1966,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
|
||||
}
|
||||
|
||||
uint256 networkClosed;
|
||||
bool ledgerChange = checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
|
||||
bool ledgerChange = checkLastClosedLedger(registry_.overlay().getActivePeers(), networkClosed);
|
||||
|
||||
if (networkClosed.isZero())
|
||||
{
|
||||
@@ -1991,7 +1995,8 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
|
||||
// Note: Do not go to FULL if we don't have the previous ledger
|
||||
// check if the ledger is bad enough to go to CONNECTED -- TODO
|
||||
auto current = m_ledgerMaster.getCurrentLedger();
|
||||
if (app_.timeKeeper().now() < (current->header().parentCloseTime + 2 * current->header().closeTimeResolution))
|
||||
if (registry_.timeKeeper().now() <
|
||||
(current->header().parentCloseTime + 2 * current->header().closeTimeResolution))
|
||||
{
|
||||
setMode(OperatingMode::FULL);
|
||||
}
|
||||
@@ -2097,9 +2102,9 @@ NetworkOPsImp::pubServer()
|
||||
Json::Value jvObj(Json::objectValue);
|
||||
|
||||
ServerFeeSummary f{
|
||||
app_.openLedger().current()->fees().base,
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current()),
|
||||
app_.getFeeTrack()};
|
||||
registry_.openLedger().current()->fees().base,
|
||||
registry_.getTxQ().getMetrics(*registry_.openLedger().current()),
|
||||
registry_.getFeeTrack()};
|
||||
|
||||
jvObj[jss::type] = "serverStatus";
|
||||
jvObj[jss::server_status] = strOperatingMode();
|
||||
@@ -2190,7 +2195,7 @@ NetworkOPsImp::pubValidation(std::shared_ptr<STValidation> const& val)
|
||||
jvObj[jss::flags] = val->getFlags();
|
||||
jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
|
||||
jvObj[jss::data] = strHex(val->getSerializer().slice());
|
||||
jvObj[jss::network_id] = app_.config().NETWORK_ID;
|
||||
jvObj[jss::network_id] = registry_.app().config().NETWORK_ID;
|
||||
|
||||
if (auto version = (*val)[~sfServerVersion])
|
||||
jvObj[jss::server_version] = std::to_string(*version);
|
||||
@@ -2201,7 +2206,7 @@ NetworkOPsImp::pubValidation(std::shared_ptr<STValidation> const& val)
|
||||
if (auto hash = (*val)[~sfValidatedHash])
|
||||
jvObj[jss::validated_hash] = strHex(*hash);
|
||||
|
||||
auto const masterKey = app_.validatorManifests().getMasterKey(signerPublic);
|
||||
auto const masterKey = registry_.validatorManifests().getMasterKey(signerPublic);
|
||||
|
||||
if (masterKey != signerPublic)
|
||||
jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
|
||||
@@ -2308,12 +2313,12 @@ NetworkOPsImp::setMode(OperatingMode om)
|
||||
using namespace std::chrono_literals;
|
||||
if (om == OperatingMode::CONNECTED)
|
||||
{
|
||||
if (app_.getLedgerMaster().getValidatedLedgerAge() < 1min)
|
||||
if (registry_.getLedgerMaster().getValidatedLedgerAge() < 1min)
|
||||
om = OperatingMode::SYNCING;
|
||||
}
|
||||
else if (om == OperatingMode::SYNCING)
|
||||
{
|
||||
if (app_.getLedgerMaster().getValidatedLedgerAge() >= 1min)
|
||||
if (registry_.getLedgerMaster().getValidatedLedgerAge() >= 1min)
|
||||
om = OperatingMode::CONNECTED;
|
||||
}
|
||||
|
||||
@@ -2345,7 +2350,7 @@ NetworkOPsImp::recvValidation(std::shared_ptr<STValidation> const& val, std::str
|
||||
else
|
||||
pendingValidations_.insert(val->getLedgerHash());
|
||||
scope_unlock unlock(lock);
|
||||
handleNewValidation(app_, val, source, bypassAccept, m_journal);
|
||||
handleNewValidation(registry_.app(), val, source, bypassAccept, m_journal);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
@@ -2367,7 +2372,7 @@ NetworkOPsImp::recvValidation(std::shared_ptr<STValidation> const& val, std::str
|
||||
JLOG(m_journal.debug()) << [this, &val]() -> auto {
|
||||
std::stringstream ss;
|
||||
ss << "VALIDATION: " << val->render() << " master_key: ";
|
||||
auto master = app_.validators().getTrustedKey(val->getSignerPublic());
|
||||
auto master = registry_.validators().getTrustedKey(val->getSignerPublic());
|
||||
if (master)
|
||||
{
|
||||
ss << toBase58(TokenType::NodePublic, *master);
|
||||
@@ -2381,7 +2386,7 @@ NetworkOPsImp::recvValidation(std::shared_ptr<STValidation> const& val, std::str
|
||||
|
||||
// We will always relay trusted validations; if configured, we will
|
||||
// also relay all untrusted validations.
|
||||
return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
|
||||
return registry_.app().config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
|
||||
}
|
||||
|
||||
Json::Value
|
||||
@@ -2423,7 +2428,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
"One or more unsupported amendments have reached majority. "
|
||||
"Upgrade to the latest version before they are activated "
|
||||
"to avoid being amendment blocked.";
|
||||
if (auto const expected = app_.getAmendmentTable().firstUnsupportedExpected())
|
||||
if (auto const expected = registry_.getAmendmentTable().firstUnsupportedExpected())
|
||||
{
|
||||
auto& d = w[jss::details] = Json::objectValue;
|
||||
d[jss::expected_date] = expected->time_since_epoch().count();
|
||||
@@ -2440,8 +2445,8 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
info[jss::hostid] = getHostId(admin);
|
||||
|
||||
// domain: if configured with a domain, report it:
|
||||
if (!app_.config().SERVER_DOMAIN.empty())
|
||||
info[jss::server_domain] = app_.config().SERVER_DOMAIN;
|
||||
if (!registry_.app().config().SERVER_DOMAIN.empty())
|
||||
info[jss::server_domain] = registry_.app().config().SERVER_DOMAIN;
|
||||
|
||||
info[jss::build_version] = BuildInfo::getVersionString();
|
||||
|
||||
@@ -2452,11 +2457,11 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
if (needNetworkLedger_)
|
||||
info[jss::network_ledger] = "waiting";
|
||||
|
||||
info[jss::validation_quorum] = static_cast<Json::UInt>(app_.validators().quorum());
|
||||
info[jss::validation_quorum] = static_cast<Json::UInt>(registry_.validators().quorum());
|
||||
|
||||
if (admin)
|
||||
{
|
||||
switch (app_.config().NODE_SIZE)
|
||||
switch (registry_.app().config().NODE_SIZE)
|
||||
{
|
||||
case 0:
|
||||
info[jss::node_size] = "tiny";
|
||||
@@ -2475,7 +2480,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
break;
|
||||
}
|
||||
|
||||
auto when = app_.validators().expires();
|
||||
auto when = registry_.validators().expires();
|
||||
|
||||
if (!human)
|
||||
{
|
||||
@@ -2488,7 +2493,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
{
|
||||
auto& x = (info[jss::validator_list] = Json::objectValue);
|
||||
|
||||
x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
|
||||
x[jss::count] = static_cast<Json::UInt>(registry_.validators().count());
|
||||
|
||||
if (when)
|
||||
{
|
||||
@@ -2501,7 +2506,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
{
|
||||
x[jss::expiration] = to_string(*when);
|
||||
|
||||
if (*when > app_.timeKeeper().now())
|
||||
if (*when > registry_.timeKeeper().now())
|
||||
x[jss::status] = "active";
|
||||
else
|
||||
x[jss::status] = "expired";
|
||||
@@ -2526,11 +2531,12 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
info[jss::io_latency_ms] = static_cast<Json::UInt>(app_.getIOLatency().count());
|
||||
info[jss::io_latency_ms] = static_cast<Json::UInt>(registry_.app().getIOLatency().count());
|
||||
|
||||
if (admin)
|
||||
{
|
||||
if (auto const localPubKey = app_.validators().localPublicKey(); localPubKey && app_.getValidationPublicKey())
|
||||
if (auto const localPubKey = registry_.validators().localPublicKey();
|
||||
localPubKey && registry_.app().getValidationPublicKey())
|
||||
{
|
||||
info[jss::pubkey_validator] = toBase58(TokenType::NodePublic, localPubKey.value());
|
||||
}
|
||||
@@ -2542,17 +2548,17 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
|
||||
if (counters)
|
||||
{
|
||||
info[jss::counters] = app_.getPerfLog().countersJson();
|
||||
info[jss::counters] = registry_.getPerfLog().countersJson();
|
||||
|
||||
Json::Value nodestore(Json::objectValue);
|
||||
app_.getNodeStore().getCountsJson(nodestore);
|
||||
registry_.getNodeStore().getCountsJson(nodestore);
|
||||
info[jss::counters][jss::nodestore] = nodestore;
|
||||
info[jss::current_activities] = app_.getPerfLog().currentJson();
|
||||
info[jss::current_activities] = registry_.getPerfLog().currentJson();
|
||||
}
|
||||
|
||||
info[jss::pubkey_node] = toBase58(TokenType::NodePublic, app_.nodeIdentity().first);
|
||||
info[jss::pubkey_node] = toBase58(TokenType::NodePublic, registry_.app().nodeIdentity().first);
|
||||
|
||||
info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
|
||||
info[jss::complete_ledgers] = registry_.getLedgerMaster().getCompleteLedgers();
|
||||
|
||||
if (amendmentBlocked_)
|
||||
info[jss::amendment_blocked] = true;
|
||||
@@ -2562,7 +2568,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
if (fp != 0)
|
||||
info[jss::fetch_pack] = Json::UInt(fp);
|
||||
|
||||
info[jss::peers] = Json::UInt(app_.overlay().size());
|
||||
info[jss::peers] = Json::UInt(registry_.overlay().size());
|
||||
|
||||
Json::Value lastClose = Json::objectValue;
|
||||
lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
|
||||
@@ -2583,13 +2589,13 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
if (admin)
|
||||
info[jss::load] = m_job_queue.getJson();
|
||||
|
||||
if (auto const netid = app_.overlay().networkID())
|
||||
if (auto const netid = registry_.overlay().networkID())
|
||||
info[jss::network_id] = static_cast<Json::UInt>(*netid);
|
||||
|
||||
auto const escalationMetrics = app_.getTxQ().getMetrics(*app_.openLedger().current());
|
||||
auto const escalationMetrics = registry_.getTxQ().getMetrics(*registry_.openLedger().current());
|
||||
|
||||
auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
|
||||
auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
|
||||
auto const loadFactorServer = registry_.getFeeTrack().getLoadFactor();
|
||||
auto const loadBaseServer = registry_.getFeeTrack().getLoadBase();
|
||||
/* Scale the escalated fee level to unitless "load factor".
|
||||
In practice, this just strips the units, but it will continue
|
||||
to work correctly if either base value ever changes. */
|
||||
@@ -2623,13 +2629,13 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
|
||||
if (admin)
|
||||
{
|
||||
std::uint32_t fee = app_.getFeeTrack().getLocalFee();
|
||||
std::uint32_t fee = registry_.getFeeTrack().getLocalFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_local] = static_cast<double>(fee) / loadBaseServer;
|
||||
fee = app_.getFeeTrack().getRemoteFee();
|
||||
fee = registry_.getFeeTrack().getRemoteFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_net] = static_cast<double>(fee) / loadBaseServer;
|
||||
fee = app_.getFeeTrack().getClusterFee();
|
||||
fee = registry_.getFeeTrack().getClusterFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_cluster] = static_cast<double>(fee) / loadBaseServer;
|
||||
}
|
||||
@@ -2670,7 +2676,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
|
||||
l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
|
||||
|
||||
if (auto const closeOffset = app_.timeKeeper().closeOffset(); std::abs(closeOffset.count()) >= 60)
|
||||
if (auto const closeOffset = registry_.timeKeeper().closeOffset(); std::abs(closeOffset.count()) >= 60)
|
||||
l[jss::close_time_offset] = static_cast<std::uint32_t>(closeOffset.count());
|
||||
|
||||
constexpr std::chrono::seconds highAgeThreshold{1000000};
|
||||
@@ -2682,7 +2688,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
else
|
||||
{
|
||||
auto lCloseTime = lpClosed->header().closeTime;
|
||||
auto closeTime = app_.timeKeeper().closeTime();
|
||||
auto closeTime = registry_.timeKeeper().closeTime();
|
||||
if (lCloseTime <= closeTime)
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
@@ -2706,16 +2712,16 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
|
||||
accounting_.json(info);
|
||||
info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
|
||||
info[jss::jq_trans_overflow] = std::to_string(app_.overlay().getJqTransOverflow());
|
||||
info[jss::peer_disconnects] = std::to_string(app_.overlay().getPeerDisconnect());
|
||||
info[jss::peer_disconnects_resources] = std::to_string(app_.overlay().getPeerDisconnectCharges());
|
||||
info[jss::jq_trans_overflow] = std::to_string(registry_.overlay().getJqTransOverflow());
|
||||
info[jss::peer_disconnects] = std::to_string(registry_.overlay().getPeerDisconnect());
|
||||
info[jss::peer_disconnects_resources] = std::to_string(registry_.overlay().getPeerDisconnectCharges());
|
||||
|
||||
// This array must be sorted in increasing order.
|
||||
static constexpr std::array<std::string_view, 7> protocols{"http", "https", "peer", "ws", "ws2", "wss", "wss2"};
|
||||
static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
|
||||
{
|
||||
Json::Value ports{Json::arrayValue};
|
||||
for (auto const& port : app_.getServerHandler().setup().ports)
|
||||
for (auto const& port : registry_.getServerHandler().setup().ports)
|
||||
{
|
||||
// Don't publish admin ports for non-admin users
|
||||
if (!admin &&
|
||||
@@ -2739,9 +2745,9 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
}
|
||||
}
|
||||
|
||||
if (app_.config().exists(SECTION_PORT_GRPC))
|
||||
if (registry_.app().config().exists(SECTION_PORT_GRPC))
|
||||
{
|
||||
auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
|
||||
auto const& grpcSection = registry_.app().config().section(SECTION_PORT_GRPC);
|
||||
auto const optPort = grpcSection.get("port");
|
||||
if (optPort && grpcSection.get("ip"))
|
||||
{
|
||||
@@ -2760,13 +2766,13 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
void
|
||||
NetworkOPsImp::clearLedgerFetch()
|
||||
{
|
||||
app_.getInboundLedgers().clearFailures();
|
||||
registry_.getInboundLedgers().clearFailures();
|
||||
}
|
||||
|
||||
Json::Value
|
||||
NetworkOPsImp::getLedgerFetchInfo()
|
||||
{
|
||||
return app_.getInboundLedgers().getInfo();
|
||||
return registry_.getInboundLedgers().getInfo();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -2815,11 +2821,11 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
// Ledgers are published only when they acquire sufficient validations
|
||||
// Holes are filled across connection loss or other catastrophe
|
||||
|
||||
std::shared_ptr<AcceptedLedger> alpAccepted = app_.getAcceptedLedgerCache().fetch(lpAccepted->header().hash);
|
||||
std::shared_ptr<AcceptedLedger> alpAccepted = registry_.getAcceptedLedgerCache().fetch(lpAccepted->header().hash);
|
||||
if (!alpAccepted)
|
||||
{
|
||||
alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
|
||||
app_.getAcceptedLedgerCache().canonicalize_replace_client(lpAccepted->header().hash, alpAccepted);
|
||||
alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, registry_.app());
|
||||
registry_.getAcceptedLedgerCache().canonicalize_replace_client(lpAccepted->header().hash, alpAccepted);
|
||||
}
|
||||
|
||||
XRPL_ASSERT(alpAccepted->getLedger().get() == lpAccepted.get(), "xrpl::NetworkOPsImp::pubLedger : accepted input");
|
||||
@@ -2838,7 +2844,7 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
jvObj[jss::ledger_hash] = to_string(lpAccepted->header().hash);
|
||||
jvObj[jss::ledger_time] = Json::Value::UInt(lpAccepted->header().closeTime.time_since_epoch().count());
|
||||
|
||||
jvObj[jss::network_id] = app_.config().NETWORK_ID;
|
||||
jvObj[jss::network_id] = registry_.app().config().NETWORK_ID;
|
||||
|
||||
if (!lpAccepted->rules().enabled(featureXRPFees))
|
||||
jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
|
||||
@@ -2850,7 +2856,7 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
|
||||
if (mMode >= OperatingMode::SYNCING)
|
||||
{
|
||||
jvObj[jss::validated_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
|
||||
jvObj[jss::validated_ledgers] = registry_.getLedgerMaster().getCompleteLedgers();
|
||||
}
|
||||
|
||||
auto it = mStreamMaps[sLedger].begin();
|
||||
@@ -2918,9 +2924,9 @@ void
|
||||
NetworkOPsImp::reportFeeChange()
|
||||
{
|
||||
ServerFeeSummary f{
|
||||
app_.openLedger().current()->fees().base,
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current()),
|
||||
app_.getFeeTrack()};
|
||||
registry_.openLedger().current()->fees().base,
|
||||
registry_.getTxQ().getMetrics(*registry_.openLedger().current()),
|
||||
registry_.getFeeTrack()};
|
||||
|
||||
// only schedule the job if something has changed
|
||||
if (f != mLastFeeSummary)
|
||||
@@ -2981,7 +2987,7 @@ NetworkOPsImp::transJson(
|
||||
lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
|
||||
{
|
||||
uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
|
||||
uint32_t netID = app_.config().NETWORK_ID;
|
||||
uint32_t netID = registry_.app().config().NETWORK_ID;
|
||||
if (transaction->isFieldPresent(sfNetworkID))
|
||||
netID = transaction->getFieldU32(sfNetworkID);
|
||||
|
||||
@@ -3019,7 +3025,7 @@ NetworkOPsImp::transJson(
|
||||
// If the offer create is not self funded then add the owner balance
|
||||
if (account != amount.issue().account)
|
||||
{
|
||||
auto const ownerFunds = accountFunds(*ledger, account, amount, fhIGNORE_FREEZE, app_.journal("View"));
|
||||
auto const ownerFunds = accountFunds(*ledger, account, amount, fhIGNORE_FREEZE, registry_.journal("View"));
|
||||
jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
|
||||
}
|
||||
}
|
||||
@@ -3096,7 +3102,7 @@ NetworkOPsImp::pubValidatedTransaction(
|
||||
}
|
||||
|
||||
if (transaction.getResult() == tesSUCCESS)
|
||||
app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
|
||||
registry_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
|
||||
|
||||
pubAccountTransaction(ledger, transaction, last);
|
||||
}
|
||||
@@ -3382,7 +3388,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
static auto const databaseType = [&]() -> DatabaseType {
|
||||
// Use a dynamic_cast to return DatabaseType::None
|
||||
// on failure.
|
||||
if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
|
||||
if (dynamic_cast<SQLiteDatabase*>(®istry_.getRelationalDatabase()))
|
||||
{
|
||||
return DatabaseType::Sqlite;
|
||||
}
|
||||
@@ -3404,7 +3410,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
app_.getJobQueue().addJob(jtCLIENT_ACCT_HIST, "HistTxStream", [this, dbType = databaseType, subInfo]() {
|
||||
registry_.getJobQueue().addJob(jtCLIENT_ACCT_HIST, "HistTxStream", [this, dbType = databaseType, subInfo]() {
|
||||
auto const& accountId = subInfo.index_->accountId_;
|
||||
auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
|
||||
auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
|
||||
@@ -3479,7 +3485,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
switch (dbType)
|
||||
{
|
||||
case Sqlite: {
|
||||
auto db = static_cast<SQLiteDatabase*>(&app_.getRelationalDatabase());
|
||||
auto db = static_cast<SQLiteDatabase*>(®istry_.getRelationalDatabase());
|
||||
RelationalDatabase::AccountTxPageOptions options{accountId, minLedger, maxLedger, marker, 0, true};
|
||||
return db->newestAccountTxPage(options);
|
||||
}
|
||||
@@ -3520,7 +3526,8 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
auto haveRange = [&]() -> bool {
|
||||
std::uint32_t validatedMin = UINT_MAX;
|
||||
std::uint32_t validatedMax = 0;
|
||||
auto haveSomeValidatedLedgers = app_.getLedgerMaster().getValidatedRange(validatedMin, validatedMax);
|
||||
auto haveSomeValidatedLedgers =
|
||||
registry_.getLedgerMaster().getValidatedRange(validatedMin, validatedMax);
|
||||
|
||||
return haveSomeValidatedLedgers && validatedMin <= startLedgerSeq && lastLedgerSeq <= validatedMax;
|
||||
}();
|
||||
@@ -3565,7 +3572,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
send(rpcError(rpcINTERNAL), true);
|
||||
return;
|
||||
}
|
||||
auto curTxLedger = app_.getLedgerMaster().getLedgerBySeq(tx->getLedger());
|
||||
auto curTxLedger = registry_.getLedgerMaster().getLedgerBySeq(tx->getLedger());
|
||||
if (!curTxLedger)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
@@ -3705,7 +3712,7 @@ NetworkOPsImp::subAccountHistory(InfoSub::ref isrListener, AccountID const& acco
|
||||
simIterator->second.emplace(isrListener->getSeq(), ahi);
|
||||
}
|
||||
|
||||
auto const ledger = app_.getLedgerMaster().getValidatedLedger();
|
||||
auto const ledger = registry_.getLedgerMaster().getValidatedLedger();
|
||||
if (ledger)
|
||||
{
|
||||
subAccountHistoryStart(ledger, ahi);
|
||||
@@ -3759,7 +3766,7 @@ NetworkOPsImp::unsubAccountHistoryInternal(std::uint64_t seq, AccountID const& a
|
||||
bool
|
||||
NetworkOPsImp::subBook(InfoSub::ref isrListener, Book const& book)
|
||||
{
|
||||
if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
|
||||
if (auto listeners = registry_.getOrderBookDB().makeBookListeners(book))
|
||||
listeners->addSubscriber(isrListener);
|
||||
else
|
||||
{
|
||||
@@ -3773,7 +3780,7 @@ NetworkOPsImp::subBook(InfoSub::ref isrListener, Book const& book)
|
||||
bool
|
||||
NetworkOPsImp::unsubBook(std::uint64_t uSeq, Book const& book)
|
||||
{
|
||||
if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
|
||||
if (auto listeners = registry_.getOrderBookDB().getBookListeners(book))
|
||||
listeners->removeSubscriber(uSeq);
|
||||
|
||||
return true;
|
||||
@@ -3792,7 +3799,7 @@ NetworkOPsImp::acceptLedger(std::optional<std::chrono::milliseconds> consensusDe
|
||||
// FIXME Could we improve on this and remove the need for a specialized
|
||||
// API in Consensus?
|
||||
beginConsensus(m_ledgerMaster.getClosedLedger()->header().hash, {});
|
||||
mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
|
||||
mConsensus.simulate(registry_.timeKeeper().closeTime(), consensusDelay);
|
||||
return m_ledgerMaster.getCurrentLedger()->header().seq;
|
||||
}
|
||||
|
||||
@@ -3810,12 +3817,12 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult)
|
||||
jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
|
||||
jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
|
||||
jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
|
||||
jvResult[jss::network_id] = app_.config().NETWORK_ID;
|
||||
jvResult[jss::network_id] = registry_.app().config().NETWORK_ID;
|
||||
}
|
||||
|
||||
if ((mMode >= OperatingMode::SYNCING) && !isNeedNetworkLedger())
|
||||
{
|
||||
jvResult[jss::validated_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
|
||||
jvResult[jss::validated_ledgers] = registry_.getLedgerMaster().getCompleteLedgers();
|
||||
}
|
||||
|
||||
std::lock_guard sl(mSubLock);
|
||||
@@ -3874,13 +3881,13 @@ NetworkOPsImp::subServer(InfoSub::ref isrListener, Json::Value& jvResult, bool a
|
||||
// CHECKME: is it necessary to provide a random number here?
|
||||
beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
|
||||
|
||||
auto const& feeTrack = app_.getFeeTrack();
|
||||
auto const& feeTrack = registry_.getFeeTrack();
|
||||
jvResult[jss::random] = to_string(uRandom);
|
||||
jvResult[jss::server_status] = strOperatingMode(admin);
|
||||
jvResult[jss::load_base] = feeTrack.getLoadBase();
|
||||
jvResult[jss::load_factor] = feeTrack.getLoadFactor();
|
||||
jvResult[jss::hostid] = getHostId(admin);
|
||||
jvResult[jss::pubkey_node] = toBase58(TokenType::NodePublic, app_.nodeIdentity().first);
|
||||
jvResult[jss::pubkey_node] = toBase58(TokenType::NodePublic, registry_.app().nodeIdentity().first);
|
||||
|
||||
std::lock_guard sl(mSubLock);
|
||||
return mStreamMaps[sServer].emplace(isrListener->getSeq(), isrListener).second;
|
||||
@@ -4066,7 +4073,7 @@ NetworkOPsImp::getBookPage(
|
||||
STAmount saDirRate;
|
||||
|
||||
auto const rate = transferRate(view, book.out.account);
|
||||
auto viewJ = app_.journal("View");
|
||||
auto viewJ = registry_.journal("View");
|
||||
|
||||
while (!bDone && iLimit-- > 0)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user