mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Refactor Application shutdown using new Service, AsyncService interfaces
This commit is contained in:
@@ -523,17 +523,8 @@ void LedgerConsensus::stateAccepted ()
|
||||
endConsensus ();
|
||||
}
|
||||
|
||||
// VFALCO TODO implement shutdown without a naked global
|
||||
extern volatile bool doShutdown;
|
||||
|
||||
void LedgerConsensus::timerEntry ()
|
||||
{
|
||||
if (doShutdown)
|
||||
{
|
||||
WriteLog (lsFATAL, LedgerConsensus) << "Shutdown requested";
|
||||
getApp().stop ();
|
||||
}
|
||||
|
||||
if ((mState != lcsFINISHED) && (mState != lcsACCEPTED))
|
||||
checkLCL ();
|
||||
|
||||
|
||||
@@ -6,8 +6,9 @@
|
||||
|
||||
typedef std::pair<uint256, InboundLedger::pointer> u256_acq_pair;
|
||||
|
||||
InboundLedgers::InboundLedgers ()
|
||||
: mLock (this, "InboundLedger", __FILE__, __LINE__)
|
||||
InboundLedgers::InboundLedgers (Service& parent)
|
||||
: Service ("InboundLedgers", parent)
|
||||
, mLock (this, "InboundLedger", __FILE__, __LINE__)
|
||||
, mRecentFailures ("LedgerAcquireRecentFailures", 0, kReacquireIntervalSeconds)
|
||||
{
|
||||
}
|
||||
@@ -20,32 +21,35 @@ InboundLedger::pointer InboundLedgers::findCreate (uint256 const& hash, uint32 s
|
||||
{
|
||||
ScopedLockType sl (mLock, __FILE__, __LINE__);
|
||||
|
||||
boost::unordered_map<uint256, InboundLedger::pointer>::iterator it = mLedgers.find (hash);
|
||||
if (it != mLedgers.end ())
|
||||
if (! isServiceStopping ())
|
||||
{
|
||||
ret = it->second;
|
||||
// FIXME: Should set the sequence if it's not set
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = boost::make_shared<InboundLedger> (hash, seq);
|
||||
assert (ret);
|
||||
mLedgers.insert (std::make_pair (hash, ret));
|
||||
|
||||
if (!ret->tryLocal())
|
||||
boost::unordered_map<uint256, InboundLedger::pointer>::iterator it = mLedgers.find (hash);
|
||||
if (it != mLedgers.end ())
|
||||
{
|
||||
ret->addPeers ();
|
||||
ret->setTimer (); // Cannot call in constructor
|
||||
ret = it->second;
|
||||
// FIXME: Should set the sequence if it's not set
|
||||
}
|
||||
else if (!ret->isFailed ())
|
||||
else
|
||||
{
|
||||
WriteLog (lsDEBUG, InboundLedger) << "Acquiring ledger we already have locally: " << hash;
|
||||
Ledger::pointer ledger = ret->getLedger ();
|
||||
ledger->setClosed ();
|
||||
ledger->setImmutable ();
|
||||
getApp().getLedgerMaster ().storeLedger (ledger);
|
||||
if (couldBeNew)
|
||||
getApp().getLedgerMaster().checkAccept(ledger);
|
||||
ret = boost::make_shared<InboundLedger> (hash, seq);
|
||||
assert (ret);
|
||||
mLedgers.insert (std::make_pair (hash, ret));
|
||||
|
||||
if (!ret->tryLocal())
|
||||
{
|
||||
ret->addPeers ();
|
||||
ret->setTimer (); // Cannot call in constructor
|
||||
}
|
||||
else if (!ret->isFailed ())
|
||||
{
|
||||
WriteLog (lsDEBUG, InboundLedger) << "Acquiring ledger we already have locally: " << hash;
|
||||
Ledger::pointer ledger = ret->getLedger ();
|
||||
ledger->setClosed ();
|
||||
ledger->setImmutable ();
|
||||
getApp().getLedgerMaster ().storeLedger (ledger);
|
||||
if (couldBeNew)
|
||||
getApp().getLedgerMaster().checkAccept(ledger);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -348,4 +352,12 @@ Json::Value InboundLedgers::getInfo()
|
||||
return ret;
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
void InboundLedgers::onServiceStop ()
|
||||
{
|
||||
ScopedLockType lock (mLock, __FILE__, __LINE__);
|
||||
|
||||
mLedgers.clear();
|
||||
mRecentFailures.clear();
|
||||
|
||||
serviceStopped();
|
||||
}
|
||||
|
||||
@@ -13,13 +13,15 @@
|
||||
*/
|
||||
// VFALCO TODO Rename to InboundLedgers
|
||||
// VFALCO TODO Create abstract interface
|
||||
class InboundLedgers : LeakChecked <InboundLedger>
|
||||
class InboundLedgers
|
||||
: public Service
|
||||
, public LeakChecked <InboundLedger>
|
||||
{
|
||||
public:
|
||||
// How long before we try again to acquire the same ledger
|
||||
static const int kReacquireIntervalSeconds = 300;
|
||||
|
||||
InboundLedgers ();
|
||||
explicit InboundLedgers (Service& parent);
|
||||
|
||||
// VFALCO TODO Should this be called findOrAdd ?
|
||||
//
|
||||
@@ -60,6 +62,8 @@ public:
|
||||
void gotFetchPack (Job&);
|
||||
void sweep ();
|
||||
|
||||
void onServiceStop ();
|
||||
|
||||
private:
|
||||
typedef boost::unordered_map <uint256, InboundLedger::pointer> MapType;
|
||||
|
||||
|
||||
@@ -313,7 +313,7 @@ bool LedgerMaster::getValidatedRange (uint32& minVal, uint32& maxVal)
|
||||
return true;
|
||||
}
|
||||
|
||||
void LedgerMaster::tryFill (Ledger::pointer ledger)
|
||||
void LedgerMaster::tryFill (Job& job, Ledger::pointer ledger)
|
||||
{
|
||||
uint32 seq = ledger->getLedgerSeq ();
|
||||
uint256 prevHash = ledger->getParentHash ();
|
||||
@@ -323,7 +323,7 @@ void LedgerMaster::tryFill (Ledger::pointer ledger)
|
||||
uint32 minHas = ledger->getLedgerSeq ();
|
||||
uint32 maxHas = ledger->getLedgerSeq ();
|
||||
|
||||
while (seq > 0)
|
||||
while (! job.shouldCancel() && seq > 0)
|
||||
{
|
||||
{
|
||||
ScopedLockType ml (mLock, __FILE__, __LINE__);
|
||||
@@ -616,7 +616,7 @@ void LedgerMaster::advanceThread()
|
||||
{ // Previous ledger is in DB
|
||||
sl.lock(__FILE__, __LINE__);
|
||||
mFillInProgress = ledger->getLedgerSeq();
|
||||
getApp().getJobQueue().addJob(jtADVANCE, "tryFill", BIND_TYPE (&LedgerMaster::tryFill, this, ledger));
|
||||
getApp().getJobQueue().addJob(jtADVANCE, "tryFill", BIND_TYPE (&LedgerMaster::tryFill, this, P_1, ledger));
|
||||
sl.unlock();
|
||||
}
|
||||
progress = true;
|
||||
@@ -673,7 +673,7 @@ void LedgerMaster::advanceThread()
|
||||
{
|
||||
mPathFindThread = true;
|
||||
getApp().getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
BIND_TYPE (&LedgerMaster::updatePaths, this));
|
||||
BIND_TYPE (&LedgerMaster::updatePaths, this, P_1));
|
||||
}
|
||||
}
|
||||
if (progress)
|
||||
@@ -824,11 +824,11 @@ uint256 LedgerMaster::getLedgerHash(uint32 desiredSeq, Ledger::ref knownGoodLedg
|
||||
return hash;
|
||||
}
|
||||
|
||||
void LedgerMaster::updatePaths ()
|
||||
void LedgerMaster::updatePaths (Job& job)
|
||||
{
|
||||
Ledger::pointer lastLedger;
|
||||
|
||||
do
|
||||
while (! job.shouldCancel())
|
||||
{
|
||||
bool newOnly = true;
|
||||
|
||||
@@ -856,10 +856,8 @@ void LedgerMaster::updatePaths ()
|
||||
}
|
||||
|
||||
// VFALCO TODO Fix this global variable
|
||||
PathRequest::updateAll (lastLedger, newOnly);
|
||||
|
||||
PathRequest::updateAll (lastLedger, newOnly, job.getCancelCallback ());
|
||||
}
|
||||
while (1);
|
||||
}
|
||||
|
||||
void LedgerMaster::newPathRequest ()
|
||||
@@ -871,8 +869,6 @@ void LedgerMaster::newPathRequest ()
|
||||
{
|
||||
mPathFindThread = true;
|
||||
getApp().getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
BIND_TYPE (&LedgerMaster::updatePaths, this));
|
||||
BIND_TYPE (&LedgerMaster::updatePaths, this, P_1));
|
||||
}
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -14,7 +14,9 @@
|
||||
// VFALCO TODO Rename to Ledgers
|
||||
// It sounds like this holds all the ledgers...
|
||||
//
|
||||
class LedgerMaster : LeakChecked <LedgerMaster>
|
||||
class LedgerMaster
|
||||
: public Service
|
||||
, public LeakChecked <LedgerMaster>
|
||||
{
|
||||
public:
|
||||
typedef FUNCTION_TYPE <void (Ledger::ref)> callback;
|
||||
@@ -23,8 +25,9 @@ public:
|
||||
typedef RippleRecursiveMutex LockType;
|
||||
typedef LockType::ScopedLockType ScopedLockType;
|
||||
|
||||
LedgerMaster ()
|
||||
: mLock (this, "LedgerMaster", __FILE__, __LINE__)
|
||||
explicit LedgerMaster (Service& parent)
|
||||
: Service ("LedgerMaster", parent)
|
||||
, mLock (this, "LedgerMaster", __FILE__, __LINE__)
|
||||
, mHeldTransactions (uint256 ())
|
||||
, mMinValidations (0)
|
||||
, mLastValidateSeq (0)
|
||||
@@ -198,9 +201,9 @@ private:
|
||||
bool isTransactionOnFutureList (Transaction::ref trans);
|
||||
|
||||
void getFetchPack (Ledger::ref have);
|
||||
void tryFill (Ledger::pointer);
|
||||
void tryFill (Job&, Ledger::pointer);
|
||||
void advanceThread ();
|
||||
void updatePaths ();
|
||||
void updatePaths (Job&);
|
||||
|
||||
private:
|
||||
LockType mLock;
|
||||
|
||||
@@ -6,8 +6,9 @@
|
||||
|
||||
SETUP_LOG (OrderBookDB)
|
||||
|
||||
OrderBookDB::OrderBookDB ()
|
||||
: mLock (this, "OrderBookDB", __FILE__, __LINE__)
|
||||
OrderBookDB::OrderBookDB (Service& parent)
|
||||
: Service ("OrderBookDB", parent)
|
||||
, mLock (this, "OrderBookDB", __FILE__, __LINE__)
|
||||
, mSeq (0)
|
||||
{
|
||||
|
||||
|
||||
@@ -12,14 +12,20 @@
|
||||
// But, for now it is probably faster to just generate it each time.
|
||||
//
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
typedef std::pair<uint160, uint160> currencyIssuer_t;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
#ifdef C11X
|
||||
typedef std::pair<const uint160&, const uint160&> currencyIssuer_ct;
|
||||
#else
|
||||
typedef std::pair<uint160, uint160> currencyIssuer_ct; // C++ defect 106
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class BookListeners
|
||||
{
|
||||
public:
|
||||
@@ -37,10 +43,15 @@ private:
|
||||
boost::unordered_map<uint64, InfoSub::wptr> mListeners;
|
||||
};
|
||||
|
||||
class OrderBookDB : LeakChecked <OrderBookDB>
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class OrderBookDB
|
||||
: public Service
|
||||
, public LeakChecked <OrderBookDB>
|
||||
{
|
||||
public:
|
||||
OrderBookDB ();
|
||||
explicit OrderBookDB (Service& parent);
|
||||
|
||||
void setup (Ledger::ref ledger);
|
||||
void update (Ledger::pointer ledger);
|
||||
void invalidate ();
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,8 +4,8 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_IAPPLICATION_H
|
||||
#define RIPPLE_IAPPLICATION_H
|
||||
#ifndef RIPPLE_APP_APPLICATION_H_INCLUDED
|
||||
#define RIPPLE_APP_APPLICATION_H_INCLUDED
|
||||
|
||||
namespace Validators { class Manager; }
|
||||
|
||||
@@ -120,7 +120,6 @@ public:
|
||||
virtual void setup () = 0;
|
||||
virtual void run () = 0;
|
||||
virtual void stop () = 0;
|
||||
virtual void sweep () = 0;
|
||||
};
|
||||
|
||||
extern Application& getApp ();
|
||||
|
||||
123
src/ripple_app/main/IoServicePool.cpp
Normal file
123
src/ripple_app/main/IoServicePool.cpp
Normal file
@@ -0,0 +1,123 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class IoServicePool::ServiceThread : private Thread
|
||||
{
|
||||
public:
|
||||
explicit ServiceThread (
|
||||
String const& name,
|
||||
IoServicePool& owner,
|
||||
boost::asio::io_service& service)
|
||||
: Thread (name)
|
||||
, m_owner (owner)
|
||||
, m_service (service)
|
||||
{
|
||||
//startThread ();
|
||||
}
|
||||
|
||||
~ServiceThread ()
|
||||
{
|
||||
m_service.stop ();
|
||||
|
||||
// block until thread exits
|
||||
stopThread ();
|
||||
}
|
||||
|
||||
void start ()
|
||||
{
|
||||
startThread ();
|
||||
}
|
||||
|
||||
void run ()
|
||||
{
|
||||
m_service.run ();
|
||||
|
||||
m_owner.onThreadExit();
|
||||
}
|
||||
|
||||
private:
|
||||
IoServicePool& m_owner;
|
||||
boost::asio::io_service& m_service;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
IoServicePool::IoServicePool (Service& parent, String const& name, int numberOfThreads)
|
||||
: Service (name.toStdString().c_str(), parent)
|
||||
, m_name (name)
|
||||
, m_service (numberOfThreads)
|
||||
, m_work (boost::ref (m_service))
|
||||
, m_running (false)
|
||||
{
|
||||
bassert (numberOfThreads > 0);
|
||||
|
||||
m_threads.ensureStorageAllocated (numberOfThreads);
|
||||
|
||||
for (int i = 0; i < numberOfThreads; ++i)
|
||||
{
|
||||
++m_threadsRunning;
|
||||
m_threads.add (new ServiceThread (m_name, *this, m_service));
|
||||
}
|
||||
}
|
||||
|
||||
IoServicePool::~IoServicePool ()
|
||||
{
|
||||
// must have called runAsync()
|
||||
bassert (m_running);
|
||||
|
||||
// the dtor of m_threads will block until each thread exits.
|
||||
}
|
||||
|
||||
void IoServicePool::runAsync ()
|
||||
{
|
||||
// must not call twice
|
||||
bassert (!m_running);
|
||||
m_running = true;
|
||||
|
||||
for (int i = 0; i < m_threads.size (); ++i)
|
||||
m_threads [i]->start ();
|
||||
}
|
||||
|
||||
boost::asio::io_service& IoServicePool::getService ()
|
||||
{
|
||||
return m_service;
|
||||
}
|
||||
|
||||
IoServicePool::operator boost::asio::io_service& ()
|
||||
{
|
||||
return m_service;
|
||||
}
|
||||
|
||||
void IoServicePool::onServiceStop ()
|
||||
{
|
||||
// VFALCO NOTE This is a hack! We should gracefully
|
||||
// cancel all pending I/O, and delete the work
|
||||
// object using boost::optional, and let run()
|
||||
// just return naturally.
|
||||
//
|
||||
m_service.stop ();
|
||||
}
|
||||
|
||||
void IoServicePool::onServiceChildrenStopped ()
|
||||
{
|
||||
}
|
||||
|
||||
// Called every time io_service::run() returns and a thread will exit.
|
||||
//
|
||||
void IoServicePool::onThreadExit()
|
||||
{
|
||||
// service must be stopping for threads to exit.
|
||||
bassert (isServiceStopping());
|
||||
|
||||
// must have at least count 1
|
||||
bassert (m_threadsRunning.get() > 0);
|
||||
|
||||
if (--m_threadsRunning == 0)
|
||||
{
|
||||
// last thread just exited
|
||||
serviceStopped ();
|
||||
}
|
||||
}
|
||||
38
src/ripple_app/main/IoServicePool.h
Normal file
38
src/ripple_app/main/IoServicePool.h
Normal file
@@ -0,0 +1,38 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_IOSERVICEPOOL_H_INCLUDED
|
||||
#define RIPPLE_APP_IOSERVICEPOOL_H_INCLUDED
|
||||
|
||||
/** An io_service with an associated group of threads. */
|
||||
class IoServicePool : public Service
|
||||
{
|
||||
public:
|
||||
IoServicePool (Service& parent, String const& name, int numberOfThreads);
|
||||
~IoServicePool ();
|
||||
|
||||
void runAsync ();
|
||||
|
||||
boost::asio::io_service& getService ();
|
||||
operator boost::asio::io_service& ();
|
||||
|
||||
void onServiceStop ();
|
||||
void onServiceChildrenStopped ();
|
||||
|
||||
private:
|
||||
class ServiceThread;
|
||||
|
||||
void onThreadExit();
|
||||
|
||||
String m_name;
|
||||
boost::asio::io_service m_service;
|
||||
boost::optional <boost::asio::io_service::work> m_work;
|
||||
OwnedArray <ServiceThread> m_threads;
|
||||
Atomic <int> m_threadsRunning;
|
||||
bool m_running;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -426,7 +426,6 @@ int RippleMain::run (int argc, char const* const* argv)
|
||||
// No arguments. Run server.
|
||||
ScopedPointer <Application> app (Application::New ());
|
||||
setupServer ();
|
||||
setCallingThreadName ("io");
|
||||
startServer ();
|
||||
}
|
||||
else
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
SETUP_LOG (NetworkOPs)
|
||||
|
||||
class NetworkOPsImp
|
||||
: public NetworkOPs
|
||||
, public DeadlineTimer::Listener
|
||||
@@ -22,8 +20,10 @@ public:
|
||||
public:
|
||||
// VFALCO TODO Make LedgerMaster a SharedPtr or a reference.
|
||||
//
|
||||
NetworkOPsImp (LedgerMaster& ledgerMaster)
|
||||
: mLock (this, "NetOPs", __FILE__, __LINE__)
|
||||
NetworkOPsImp (LedgerMaster& ledgerMaster, Service& parent, Journal journal)
|
||||
: NetworkOPs (parent)
|
||||
, m_journal (journal)
|
||||
, mLock (this, "NetOPs", __FILE__, __LINE__)
|
||||
, mMode (omDISCONNECTED)
|
||||
, mNeedNetworkLedger (false)
|
||||
, mProposing (false)
|
||||
@@ -358,8 +358,17 @@ public:
|
||||
InfoSub::pointer findRpcSub (const std::string& strUrl);
|
||||
InfoSub::pointer addRpcSub (const std::string& strUrl, InfoSub::ref rspEntry);
|
||||
|
||||
//
|
||||
//--------------------------------------------------------------------------
|
||||
//
|
||||
// Service
|
||||
|
||||
void onServiceStop ()
|
||||
{
|
||||
m_heartbeatTimer.cancel();
|
||||
m_clusterTimer.cancel();
|
||||
|
||||
serviceStopped ();
|
||||
}
|
||||
|
||||
private:
|
||||
void setHeartbeatTimer ();
|
||||
@@ -389,6 +398,8 @@ private:
|
||||
// XXX Split into more locks.
|
||||
typedef RippleRecursiveMutex LockType;
|
||||
typedef LockType::ScopedLockType ScopedLockType;
|
||||
|
||||
Journal m_journal;
|
||||
LockType mLock;
|
||||
|
||||
OperatingMode mMode;
|
||||
@@ -484,7 +495,7 @@ void NetworkOPsImp::processHeartbeatTimer ()
|
||||
if (mMode != omDISCONNECTED)
|
||||
{
|
||||
setMode (omDISCONNECTED);
|
||||
WriteLog (lsWARNING, NetworkOPs)
|
||||
m_journal.warning
|
||||
<< "Node count (" << numPeers << ") "
|
||||
<< "has fallen below quorum (" << getConfig ().NETWORK_QUORUM << ").";
|
||||
}
|
||||
@@ -495,7 +506,7 @@ void NetworkOPsImp::processHeartbeatTimer ()
|
||||
if (mMode == omDISCONNECTED)
|
||||
{
|
||||
setMode (omCONNECTED);
|
||||
WriteLog (lsINFO, NetworkOPs) << "Node count (" << numPeers << ") is sufficient.";
|
||||
m_journal.info << "Node count (" << numPeers << ") is sufficient.";
|
||||
}
|
||||
|
||||
// Check if the last validated ledger forces a change between these states
|
||||
@@ -520,11 +531,11 @@ void NetworkOPsImp::processHeartbeatTimer ()
|
||||
|
||||
void NetworkOPsImp::processClusterTimer ()
|
||||
{
|
||||
bool synced = (getApp().getLedgerMaster().getValidatedLedgerAge() <= 240);
|
||||
bool synced = (m_ledgerMaster.getValidatedLedgerAge() <= 240);
|
||||
ClusterNodeStatus us("", synced ? getApp().getFeeTrack().getLocalFee() : 0, getNetworkTimeNC());
|
||||
if (!getApp().getUNL().nodeUpdate(getApp().getLocalCredentials().getNodePublic(), us))
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "To soon to send cluster update";
|
||||
m_journal.debug << "To soon to send cluster update";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -615,7 +626,8 @@ void NetworkOPsImp::closeTimeOffset (int offset)
|
||||
else
|
||||
mCloseTimeOffset = (mCloseTimeOffset * 3) / 4;
|
||||
|
||||
CondLog (mCloseTimeOffset != 0, lsINFO, NetworkOPs) << "Close time offset now " << mCloseTimeOffset;
|
||||
if (mCloseTimeOffset != 0)
|
||||
m_journal.info << "Close time offset now " << mCloseTimeOffset;
|
||||
}
|
||||
|
||||
uint32 NetworkOPsImp::getLedgerID (uint256 const& hash)
|
||||
@@ -678,13 +690,13 @@ void NetworkOPsImp::submitTransaction (Job&, SerializedTransaction::pointer iTra
|
||||
|
||||
if (getApp().getHashRouter ().addSuppressionPeer (suppress, 0, flags) && ((flags & SF_RETRY) != 0))
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Redundant transactions submitted";
|
||||
m_journal.warning << "Redundant transactions submitted";
|
||||
return;
|
||||
}
|
||||
|
||||
if ((flags & SF_BAD) != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Submitted transaction cached bad";
|
||||
m_journal.warning << "Submitted transaction cached bad";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -694,7 +706,7 @@ void NetworkOPsImp::submitTransaction (Job&, SerializedTransaction::pointer iTra
|
||||
{
|
||||
if (!trans->checkSign ())
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Submitted transaction has bad signature";
|
||||
m_journal.warning << "Submitted transaction has bad signature";
|
||||
getApp().getHashRouter ().setFlag (suppress, SF_BAD);
|
||||
return;
|
||||
}
|
||||
@@ -703,7 +715,7 @@ void NetworkOPsImp::submitTransaction (Job&, SerializedTransaction::pointer iTra
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Exception checking transaction " << suppress;
|
||||
m_journal.warning << "Exception checking transaction " << suppress;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -734,9 +746,9 @@ Transaction::pointer NetworkOPsImp::submitTransactionSync (Transaction::ref tpTr
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsFATAL, NetworkOPs) << "Transaction reconstruction failure";
|
||||
WriteLog (lsFATAL, NetworkOPs) << tpTransNew->getSTransaction ()->getJson (0);
|
||||
WriteLog (lsFATAL, NetworkOPs) << tpTrans->getSTransaction ()->getJson (0);
|
||||
m_journal.fatal << "Transaction reconstruction failure";
|
||||
m_journal.fatal << tpTransNew->getSTransaction ()->getJson (0);
|
||||
m_journal.fatal << tpTrans->getSTransaction ()->getJson (0);
|
||||
|
||||
// assert (false); "1e-95" as amount can trigger this
|
||||
|
||||
@@ -780,7 +792,7 @@ void NetworkOPsImp::runTransactionQueue ()
|
||||
if (isTerRetry (r))
|
||||
{
|
||||
// transaction should be held
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "QTransaction should be held: " << r;
|
||||
m_journal.debug << "QTransaction should be held: " << r;
|
||||
dbtx->setStatus (HELD);
|
||||
getApp().getMasterTransaction ().canonicalize (&dbtx);
|
||||
m_ledgerMaster.addHeldTransaction (dbtx);
|
||||
@@ -788,18 +800,18 @@ void NetworkOPsImp::runTransactionQueue ()
|
||||
else if (r == tefPAST_SEQ)
|
||||
{
|
||||
// duplicate or conflict
|
||||
WriteLog (lsINFO, NetworkOPs) << "QTransaction is obsolete";
|
||||
m_journal.info << "QTransaction is obsolete";
|
||||
dbtx->setStatus (OBSOLETE);
|
||||
}
|
||||
else if (r == tesSUCCESS)
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "QTransaction is now included in open ledger";
|
||||
m_journal.info << "QTransaction is now included in open ledger";
|
||||
dbtx->setStatus (INCLUDED);
|
||||
getApp().getMasterTransaction ().canonicalize (&dbtx);
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "QStatus other than success " << r;
|
||||
m_journal.debug << "QStatus other than success " << r;
|
||||
dbtx->setStatus (INVALID);
|
||||
}
|
||||
|
||||
@@ -809,7 +821,7 @@ void NetworkOPsImp::runTransactionQueue ()
|
||||
|
||||
if (getApp().getHashRouter ().swapSet (txn->getID (), peers, SF_RELAYED))
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "relaying";
|
||||
m_journal.debug << "relaying";
|
||||
protocol::TMTransaction tx;
|
||||
Serializer s;
|
||||
dbtx->getSTransaction ()->add (s);
|
||||
@@ -821,7 +833,7 @@ void NetworkOPsImp::runTransactionQueue ()
|
||||
getApp().getPeers ().relayMessageBut (peers, packet);
|
||||
}
|
||||
else
|
||||
WriteLog(lsDEBUG, NetworkOPs) << "recently relayed";
|
||||
m_journal.debug << "recently relayed";
|
||||
}
|
||||
|
||||
txn->doCallbacks (r);
|
||||
@@ -852,7 +864,7 @@ Transaction::pointer NetworkOPsImp::processTransaction (Transaction::pointer tra
|
||||
// signature not checked
|
||||
if (!trans->checkSign ())
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Transaction has bad signature";
|
||||
m_journal.info << "Transaction has bad signature";
|
||||
trans->setStatus (INVALID);
|
||||
trans->setResult (temBAD_SIGNATURE);
|
||||
getApp().getHashRouter ().setFlag (trans->getID (), SF_BAD);
|
||||
@@ -879,7 +891,8 @@ Transaction::pointer NetworkOPsImp::processTransaction (Transaction::pointer tra
|
||||
if (r != tesSUCCESS)
|
||||
{
|
||||
std::string token, human;
|
||||
CondLog (transResultInfo (r, token, human), lsINFO, NetworkOPs) << "TransactionResult: " << token << ": " << human;
|
||||
if (transResultInfo (r, token, human))
|
||||
m_journal.info << "TransactionResult: " << token << ": " << human;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -896,7 +909,7 @@ Transaction::pointer NetworkOPsImp::processTransaction (Transaction::pointer tra
|
||||
|
||||
if (r == tesSUCCESS)
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Transaction is now included in open ledger";
|
||||
m_journal.info << "Transaction is now included in open ledger";
|
||||
trans->setStatus (INCLUDED);
|
||||
|
||||
// VFALCO NOTE The value of trans can be changed here!!
|
||||
@@ -905,7 +918,7 @@ Transaction::pointer NetworkOPsImp::processTransaction (Transaction::pointer tra
|
||||
else if (r == tefPAST_SEQ)
|
||||
{
|
||||
// duplicate or conflict
|
||||
WriteLog (lsINFO, NetworkOPs) << "Transaction is obsolete";
|
||||
m_journal.info << "Transaction is obsolete";
|
||||
trans->setStatus (OBSOLETE);
|
||||
}
|
||||
else if (isTerRetry (r))
|
||||
@@ -913,7 +926,7 @@ Transaction::pointer NetworkOPsImp::processTransaction (Transaction::pointer tra
|
||||
if (!bFailHard)
|
||||
{
|
||||
// transaction should be held
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "Transaction should be held: " << r;
|
||||
m_journal.debug << "Transaction should be held: " << r;
|
||||
trans->setStatus (HELD);
|
||||
getApp().getMasterTransaction ().canonicalize (&trans);
|
||||
m_ledgerMaster.addHeldTransaction (trans);
|
||||
@@ -921,7 +934,7 @@ Transaction::pointer NetworkOPsImp::processTransaction (Transaction::pointer tra
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "Status other than success " << r;
|
||||
m_journal.debug << "Status other than success " << r;
|
||||
trans->setStatus (INVALID);
|
||||
}
|
||||
|
||||
@@ -992,21 +1005,21 @@ STVector256 NetworkOPsImp::getDirNodeInfo (
|
||||
|
||||
if (sleNode)
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "getDirNodeInfo: node index: " << uNodeIndex.ToString ();
|
||||
m_journal.debug << "getDirNodeInfo: node index: " << uNodeIndex.ToString ();
|
||||
|
||||
WriteLog (lsTRACE, NetworkOPs) << "getDirNodeInfo: first: " << strHex (sleNode->getFieldU64 (sfIndexPrevious));
|
||||
WriteLog (lsTRACE, NetworkOPs) << "getDirNodeInfo: last: " << strHex (sleNode->getFieldU64 (sfIndexNext));
|
||||
m_journal.trace << "getDirNodeInfo: first: " << strHex (sleNode->getFieldU64 (sfIndexPrevious));
|
||||
m_journal.trace << "getDirNodeInfo: last: " << strHex (sleNode->getFieldU64 (sfIndexNext));
|
||||
|
||||
uNodePrevious = sleNode->getFieldU64 (sfIndexPrevious);
|
||||
uNodeNext = sleNode->getFieldU64 (sfIndexNext);
|
||||
svIndexes = sleNode->getFieldV256 (sfIndexes);
|
||||
|
||||
WriteLog (lsTRACE, NetworkOPs) << "getDirNodeInfo: first: " << strHex (uNodePrevious);
|
||||
WriteLog (lsTRACE, NetworkOPs) << "getDirNodeInfo: last: " << strHex (uNodeNext);
|
||||
m_journal.trace << "getDirNodeInfo: first: " << strHex (uNodePrevious);
|
||||
m_journal.trace << "getDirNodeInfo: last: " << strHex (uNodeNext);
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "getDirNodeInfo: node index: NOT FOUND: " << uNodeIndex.ToString ();
|
||||
m_journal.info << "getDirNodeInfo: node index: NOT FOUND: " << uNodeIndex.ToString ();
|
||||
|
||||
uNodePrevious = 0;
|
||||
uNodeNext = 0;
|
||||
@@ -1176,7 +1189,7 @@ bool NetworkOPsImp::checkLastClosedLedger (const std::vector<Peer::pointer>& pee
|
||||
// agree? And do we have no better ledger available?
|
||||
// If so, we are either tracking or full.
|
||||
|
||||
WriteLog (lsTRACE, NetworkOPs) << "NetworkOPsImp::checkLastClosedLedger";
|
||||
m_journal.trace << "NetworkOPsImp::checkLastClosedLedger";
|
||||
|
||||
Ledger::pointer ourClosed = m_ledgerMaster.getClosedLedger ();
|
||||
|
||||
@@ -1185,8 +1198,8 @@ bool NetworkOPsImp::checkLastClosedLedger (const std::vector<Peer::pointer>& pee
|
||||
|
||||
uint256 closedLedger = ourClosed->getHash ();
|
||||
uint256 prevClosedLedger = ourClosed->getParentHash ();
|
||||
WriteLog (lsTRACE, NetworkOPs) << "OurClosed: " << closedLedger;
|
||||
WriteLog (lsTRACE, NetworkOPs) << "PrevClosed: " << prevClosedLedger;
|
||||
m_journal.trace << "OurClosed: " << closedLedger;
|
||||
m_journal.trace << "PrevClosed: " << prevClosedLedger;
|
||||
|
||||
boost::unordered_map<uint256, ValidationCount> ledgers;
|
||||
{
|
||||
@@ -1240,14 +1253,17 @@ bool NetworkOPsImp::checkLastClosedLedger (const std::vector<Peer::pointer>& pee
|
||||
for (boost::unordered_map<uint256, ValidationCount>::iterator it = ledgers.begin (), end = ledgers.end ();
|
||||
it != end; ++it)
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "L: " << it->first << " t=" << it->second.trustedValidations <<
|
||||
m_journal.debug << "L: " << it->first << " t=" << it->second.trustedValidations <<
|
||||
", n=" << it->second.nodesUsing;
|
||||
|
||||
// Temporary logging to make sure tiebreaking isn't broken
|
||||
if (it->second.trustedValidations > 0)
|
||||
WriteLog (lsTRACE, NetworkOPs) << " TieBreakTV: " << it->second.highValidation;
|
||||
m_journal.trace << " TieBreakTV: " << it->second.highValidation;
|
||||
else
|
||||
CondLog (it->second.nodesUsing > 0, lsTRACE, NetworkOPs) << " TieBreakNU: " << it->second.highNodeUsing;
|
||||
{
|
||||
if (it->second.nodesUsing > 0)
|
||||
m_journal.trace << " TieBreakNU: " << it->second.highNodeUsing;
|
||||
}
|
||||
|
||||
if (it->second > bestVC)
|
||||
{
|
||||
@@ -1260,7 +1276,7 @@ bool NetworkOPsImp::checkLastClosedLedger (const std::vector<Peer::pointer>& pee
|
||||
if (switchLedgers && (closedLedger == prevClosedLedger))
|
||||
{
|
||||
// don't switch to our own previous ledger
|
||||
WriteLog (lsINFO, NetworkOPs) << "We won't switch to our own previous ledger";
|
||||
m_journal.info << "We won't switch to our own previous ledger";
|
||||
networkClosed = ourClosed->getHash ();
|
||||
switchLedgers = false;
|
||||
}
|
||||
@@ -1279,9 +1295,9 @@ bool NetworkOPsImp::checkLastClosedLedger (const std::vector<Peer::pointer>& pee
|
||||
return false;
|
||||
}
|
||||
|
||||
WriteLog (lsWARNING, NetworkOPs) << "We are not running on the consensus ledger";
|
||||
WriteLog (lsINFO, NetworkOPs) << "Our LCL: " << ourClosed->getJson (0);
|
||||
WriteLog (lsINFO, NetworkOPs) << "Net LCL " << closedLedger;
|
||||
m_journal.warning << "We are not running on the consensus ledger";
|
||||
m_journal.info << "Our LCL: " << ourClosed->getJson (0);
|
||||
m_journal.info << "Net LCL " << closedLedger;
|
||||
|
||||
if ((mMode == omTRACKING) || (mMode == omFULL))
|
||||
setMode (omCONNECTED);
|
||||
@@ -1290,7 +1306,7 @@ bool NetworkOPsImp::checkLastClosedLedger (const std::vector<Peer::pointer>& pee
|
||||
|
||||
if (!consensus)
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Acquiring consensus ledger " << closedLedger;
|
||||
m_journal.info << "Acquiring consensus ledger " << closedLedger;
|
||||
|
||||
if (!mAcquiringLedger || (mAcquiringLedger->getHash () != closedLedger))
|
||||
mAcquiringLedger = getApp().getInboundLedgers ().findCreate (closedLedger, 0, true);
|
||||
@@ -1298,7 +1314,7 @@ bool NetworkOPsImp::checkLastClosedLedger (const std::vector<Peer::pointer>& pee
|
||||
if (!mAcquiringLedger || mAcquiringLedger->isFailed ())
|
||||
{
|
||||
getApp().getInboundLedgers ().dropLedger (closedLedger);
|
||||
WriteLog (lsERROR, NetworkOPs) << "Network ledger cannot be acquired";
|
||||
m_journal.error << "Network ledger cannot be acquired";
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1321,9 +1337,9 @@ void NetworkOPsImp::switchLastClosedLedger (Ledger::pointer newLedger, bool duri
|
||||
// set the newledger as our last closed ledger -- this is abnormal code
|
||||
|
||||
if (duringConsensus)
|
||||
WriteLog (lsERROR, NetworkOPs) << "JUMPdc last closed ledger to " << newLedger->getHash ();
|
||||
m_journal.error << "JUMPdc last closed ledger to " << newLedger->getHash ();
|
||||
else
|
||||
WriteLog (lsERROR, NetworkOPs) << "JUMP last closed ledger to " << newLedger->getHash ();
|
||||
m_journal.error << "JUMP last closed ledger to " << newLedger->getHash ();
|
||||
|
||||
clearNeedNetworkLedger ();
|
||||
newLedger->setClosed ();
|
||||
@@ -1344,8 +1360,8 @@ void NetworkOPsImp::switchLastClosedLedger (Ledger::pointer newLedger, bool duri
|
||||
|
||||
int NetworkOPsImp::beginConsensus (uint256 const& networkClosed, Ledger::pointer closingLedger)
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Consensus time for ledger " << closingLedger->getLedgerSeq ();
|
||||
WriteLog (lsINFO, NetworkOPs) << " LCL is " << closingLedger->getParentHash ();
|
||||
m_journal.info << "Consensus time for ledger " << closingLedger->getLedgerSeq ();
|
||||
m_journal.info << " LCL is " << closingLedger->getParentHash ();
|
||||
|
||||
Ledger::pointer prevLedger = m_ledgerMaster.getLedgerByHash (closingLedger->getParentHash ());
|
||||
|
||||
@@ -1354,7 +1370,7 @@ int NetworkOPsImp::beginConsensus (uint256 const& networkClosed, Ledger::pointer
|
||||
// this shouldn't happen unless we jump ledgers
|
||||
if (mMode == omFULL)
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Don't have LCL, going to tracking";
|
||||
m_journal.warning << "Don't have LCL, going to tracking";
|
||||
setMode (omTRACKING);
|
||||
}
|
||||
|
||||
@@ -1370,7 +1386,7 @@ int NetworkOPsImp::beginConsensus (uint256 const& networkClosed, Ledger::pointer
|
||||
mConsensus = boost::make_shared<LedgerConsensus> (
|
||||
networkClosed, prevLedger, m_ledgerMaster.getCurrentLedger ()->getCloseTimeNC ());
|
||||
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "Initiating consensus engine";
|
||||
m_journal.debug << "Initiating consensus engine";
|
||||
return mConsensus->startup ();
|
||||
}
|
||||
|
||||
@@ -1392,7 +1408,7 @@ bool NetworkOPsImp::haveConsensusObject ()
|
||||
|
||||
if (!ledgerChange)
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Beginning consensus due to peer action";
|
||||
m_journal.info << "Beginning consensus due to peer action";
|
||||
beginConsensus (networkClosed, m_ledgerMaster.getCurrentLedger ());
|
||||
}
|
||||
}
|
||||
@@ -1418,7 +1434,7 @@ void NetworkOPsImp::processTrustedProposal (LedgerProposal::pointer proposal,
|
||||
|
||||
if (!haveConsensusObject ())
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Received proposal outside consensus window";
|
||||
m_journal.info << "Received proposal outside consensus window";
|
||||
|
||||
if (mMode == omFULL)
|
||||
relay = false;
|
||||
@@ -1431,7 +1447,7 @@ void NetworkOPsImp::processTrustedProposal (LedgerProposal::pointer proposal,
|
||||
|
||||
if (!set->has_previousledger () && (checkLedger != consensusLCL))
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Have to re-check proposal signature due to consensus view change";
|
||||
m_journal.warning << "Have to re-check proposal signature due to consensus view change";
|
||||
assert (proposal->hasSignature ());
|
||||
proposal->setPrevLedger (consensusLCL);
|
||||
|
||||
@@ -1442,7 +1458,7 @@ void NetworkOPsImp::processTrustedProposal (LedgerProposal::pointer proposal,
|
||||
if (sigGood && (consensusLCL == proposal->getPrevLedger ()))
|
||||
{
|
||||
relay = mConsensus->peerPosition (proposal);
|
||||
WriteLog (lsTRACE, NetworkOPs) << "Proposal processing finished, relay=" << relay;
|
||||
m_journal.trace << "Proposal processing finished, relay=" << relay;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1455,7 +1471,7 @@ void NetworkOPsImp::processTrustedProposal (LedgerProposal::pointer proposal,
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Not relaying trusted proposal";
|
||||
m_journal.info << "Not relaying trusted proposal";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1508,7 +1524,7 @@ SHAMapAddNode NetworkOPsImp::gotTXData (const boost::shared_ptr<Peer>& peer, uin
|
||||
|
||||
if (!consensus)
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Got TX data with no consensus object";
|
||||
m_journal.warning << "Got TX data with no consensus object";
|
||||
return SHAMapAddNode ();
|
||||
}
|
||||
|
||||
@@ -1519,7 +1535,7 @@ bool NetworkOPsImp::hasTXSet (const boost::shared_ptr<Peer>& peer, uint256 const
|
||||
{
|
||||
if (!haveConsensusObject ())
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Peer has TX set, not during consensus";
|
||||
m_journal.info << "Peer has TX set, not during consensus";
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1550,7 +1566,7 @@ void NetworkOPsImp::endConsensus (bool correctLCL)
|
||||
{
|
||||
if (it && (it->getClosedLedgerHash () == deadLedger))
|
||||
{
|
||||
WriteLog (lsTRACE, NetworkOPs) << "Killing obsolete peer status";
|
||||
m_journal.trace << "Killing obsolete peer status";
|
||||
it->cycleStatus ();
|
||||
}
|
||||
}
|
||||
@@ -1694,7 +1710,7 @@ NetworkOPsImp::transactionsSQL (std::string selection, const RippleAddress& acco
|
||||
% lexicalCastThrow <std::string> (offset)
|
||||
% lexicalCastThrow <std::string> (numberOfResults)
|
||||
);
|
||||
WriteLog (lsTRACE, NetworkOPs) << "txSQL query: " << sql;
|
||||
m_journal.trace << "txSQL query: " << sql;
|
||||
return sql;
|
||||
}
|
||||
|
||||
@@ -1731,7 +1747,7 @@ NetworkOPsImp::getAccountTxs (const RippleAddress& account, int32 minLedger, int
|
||||
if (rawMeta.getLength() == 0)
|
||||
{ // Work around a bug that could leave the metadata missing
|
||||
uint32 seq = static_cast<uint32>(db->getBigInt("LedgerSeq"));
|
||||
WriteLog(lsWARNING, NetworkOPs) << "Recovering ledger " << seq << ", txn " << txn->getID();
|
||||
m_journal.warning << "Recovering ledger " << seq << ", txn " << txn->getID();
|
||||
Ledger::pointer ledger = getLedgerBySeq(seq);
|
||||
if (ledger)
|
||||
ledger->pendSaveValidated(false, false);
|
||||
@@ -1903,7 +1919,7 @@ NetworkOPsImp::getTxsAccount (const RippleAddress& account, int32 minLedger, int
|
||||
if (rawMeta.getLength() == 0)
|
||||
{ // Work around a bug that could leave the metadata missing
|
||||
uint32 seq = static_cast<uint32>(db->getBigInt("LedgerSeq"));
|
||||
WriteLog(lsWARNING, NetworkOPs) << "Recovering ledger " << seq << ", txn " << txn->getID();
|
||||
m_journal.warning << "Recovering ledger " << seq << ", txn " << txn->getID();
|
||||
Ledger::pointer ledger = getLedgerBySeq(seq);
|
||||
if (ledger)
|
||||
ledger->pendSaveValidated(false, false);
|
||||
@@ -2049,7 +2065,7 @@ NetworkOPsImp::getLedgerAffectedAccounts (uint32 ledgerSeq)
|
||||
|
||||
bool NetworkOPsImp::recvValidation (SerializedValidation::ref val, const std::string& source)
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "recvValidation " << val->getLedgerHash () << " from " << source;
|
||||
m_journal.debug << "recvValidation " << val->getLedgerHash () << " from " << source;
|
||||
return getApp().getValidations ().addValidation (val, source);
|
||||
}
|
||||
|
||||
@@ -2270,7 +2286,7 @@ void NetworkOPsImp::pubProposedTransaction (Ledger::ref lpCurrent, SerializedTra
|
||||
}
|
||||
}
|
||||
AcceptedLedgerTx alt (stTxn, terResult);
|
||||
WriteLog (lsTRACE, NetworkOPs) << "pubProposed: " << alt.getJson ();
|
||||
m_journal.trace << "pubProposed: " << alt.getJson ();
|
||||
pubAccountTransaction (lpCurrent, AcceptedLedgerTx (stTxn, terResult), false);
|
||||
}
|
||||
|
||||
@@ -2326,7 +2342,7 @@ void NetworkOPsImp::pubLedger (Ledger::ref accepted)
|
||||
{
|
||||
BOOST_FOREACH (const AcceptedLedger::value_type & vt, alpAccepted->getMap ())
|
||||
{
|
||||
WriteLog (lsTRACE, NetworkOPs) << "pubAccepted: " << vt.second->getJson ();
|
||||
m_journal.trace << "pubAccepted: " << vt.second->getJson ();
|
||||
pubValidatedTransaction (lpAccepted, *vt.second);
|
||||
}
|
||||
}
|
||||
@@ -2485,7 +2501,7 @@ void NetworkOPsImp::pubAccountTransaction (Ledger::ref lpCurrent, const Accepted
|
||||
}
|
||||
}
|
||||
}
|
||||
WriteLog (lsINFO, NetworkOPs) << boost::str (boost::format ("pubAccountTransaction: iProposed=%d iAccepted=%d") % iProposed % iAccepted);
|
||||
m_journal.info << boost::str (boost::format ("pubAccountTransaction: iProposed=%d iAccepted=%d") % iProposed % iAccepted);
|
||||
|
||||
if (!notify.empty ())
|
||||
{
|
||||
@@ -2515,7 +2531,7 @@ void NetworkOPsImp::subAccount (InfoSub::ref isrListener, const boost::unordered
|
||||
// For the connection, monitor each account.
|
||||
BOOST_FOREACH (const RippleAddress & naAccountID, vnaAccountIDs)
|
||||
{
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("subAccount: account: %d") % naAccountID.humanAccountID ());
|
||||
m_journal.trace << boost::str (boost::format ("subAccount: account: %d") % naAccountID.humanAccountID ());
|
||||
|
||||
isrListener->insertSubAccountInfo (naAccountID, uLedgerIndex);
|
||||
}
|
||||
@@ -2756,11 +2772,11 @@ void NetworkOPsImp::getBookPage (Ledger::pointer lpLedger, const uint160& uTaker
|
||||
const uint256 uBookEnd = Ledger::getQualityNext (uBookBase);
|
||||
uint256 uTipIndex = uBookBase;
|
||||
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uTakerPaysCurrencyID=%s uTakerPaysIssuerID=%s") % STAmount::createHumanCurrency (uTakerPaysCurrencyID) % RippleAddress::createHumanAccountID (uTakerPaysIssuerID));
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uTakerGetsCurrencyID=%s uTakerGetsIssuerID=%s") % STAmount::createHumanCurrency (uTakerGetsCurrencyID) % RippleAddress::createHumanAccountID (uTakerGetsIssuerID));
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uBookBase=%s") % uBookBase);
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uBookEnd=%s") % uBookEnd);
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uTipIndex=%s") % uTipIndex);
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uTakerPaysCurrencyID=%s uTakerPaysIssuerID=%s") % STAmount::createHumanCurrency (uTakerPaysCurrencyID) % RippleAddress::createHumanAccountID (uTakerPaysIssuerID));
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uTakerGetsCurrencyID=%s uTakerGetsIssuerID=%s") % STAmount::createHumanCurrency (uTakerGetsCurrencyID) % RippleAddress::createHumanAccountID (uTakerGetsIssuerID));
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uBookBase=%s") % uBookBase);
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uBookEnd=%s") % uBookEnd);
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uTipIndex=%s") % uTipIndex);
|
||||
|
||||
LedgerEntrySet lesActive (lpLedger, tapNONE, true);
|
||||
|
||||
@@ -2785,13 +2801,13 @@ void NetworkOPsImp::getBookPage (Ledger::pointer lpLedger, const uint160& uTaker
|
||||
{
|
||||
bDirectAdvance = false;
|
||||
|
||||
WriteLog (lsTRACE, NetworkOPs) << "getBookPage: bDirectAdvance";
|
||||
m_journal.trace << "getBookPage: bDirectAdvance";
|
||||
|
||||
sleOfferDir = lesActive.entryCache (ltDIR_NODE, lpLedger->getNextLedgerIndex (uTipIndex, uBookEnd));
|
||||
|
||||
if (!sleOfferDir)
|
||||
{
|
||||
WriteLog (lsTRACE, NetworkOPs) << "getBookPage: bDone";
|
||||
m_journal.trace << "getBookPage: bDone";
|
||||
bDone = true;
|
||||
}
|
||||
else
|
||||
@@ -2801,8 +2817,8 @@ void NetworkOPsImp::getBookPage (Ledger::pointer lpLedger, const uint160& uTaker
|
||||
|
||||
lesActive.dirFirst (uTipIndex, sleOfferDir, uBookEntry, uOfferIndex);
|
||||
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uTipIndex=%s") % uTipIndex);
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uOfferIndex=%s") % uOfferIndex);
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uTipIndex=%s") % uTipIndex);
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uOfferIndex=%s") % uOfferIndex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2828,7 +2844,7 @@ void NetworkOPsImp::getBookPage (Ledger::pointer lpLedger, const uint160& uTaker
|
||||
// Found in running balance table.
|
||||
|
||||
saOwnerFunds = umBalanceEntry->second;
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: saOwnerFunds=%s (cached)") % saOwnerFunds.getFullText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: saOwnerFunds=%s (cached)") % saOwnerFunds.getFullText());
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -2836,7 +2852,7 @@ void NetworkOPsImp::getBookPage (Ledger::pointer lpLedger, const uint160& uTaker
|
||||
|
||||
saOwnerFunds = lesActive.accountHolds (uOfferOwnerID, uTakerGetsCurrencyID, uTakerGetsIssuerID);
|
||||
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: saOwnerFunds=%s (new)") % saOwnerFunds.getFullText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: saOwnerFunds=%s (new)") % saOwnerFunds.getFullText());
|
||||
if (saOwnerFunds.isNegative ())
|
||||
{
|
||||
// Treat negative funds as zero.
|
||||
@@ -2874,12 +2890,12 @@ void NetworkOPsImp::getBookPage (Ledger::pointer lpLedger, const uint160& uTaker
|
||||
}
|
||||
else
|
||||
{
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: saTakerGets=%s") % saTakerGets.getFullText());
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: saTakerPays=%s") % saTakerPays.getFullText());
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: saOwnerFunds=%s") % saOwnerFunds.getFullText());
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: saDirRate=%s") % saDirRate.getText());
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: multiply=%s") % STAmount::multiply(saTakerGetsFunded, saDirRate).getFullText());
|
||||
// WriteLog (lsINFO, NetworkOPs) << boost::str(boost::format("getBookPage: multiply=%s") % STAmount::multiply(saTakerGetsFunded, saDirRate, saTakerPays).getFullText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: saTakerGets=%s") % saTakerGets.getFullText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: saTakerPays=%s") % saTakerPays.getFullText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: saOwnerFunds=%s") % saOwnerFunds.getFullText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: saDirRate=%s") % saDirRate.getText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: multiply=%s") % STAmount::multiply(saTakerGetsFunded, saDirRate).getFullText());
|
||||
// m_journal.info << boost::str(boost::format("getBookPage: multiply=%s") % STAmount::multiply(saTakerGetsFunded, saDirRate, saTakerPays).getFullText());
|
||||
|
||||
// Only provide, if not fully funded.
|
||||
|
||||
@@ -2909,7 +2925,7 @@ void NetworkOPsImp::getBookPage (Ledger::pointer lpLedger, const uint160& uTaker
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsTRACE, NetworkOPs) << boost::str (boost::format ("getBookPage: uOfferIndex=%s") % uOfferIndex);
|
||||
m_journal.trace << boost::str (boost::format ("getBookPage: uOfferIndex=%s") % uOfferIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2933,13 +2949,13 @@ void NetworkOPsImp::makeFetchPack (Job&, boost::weak_ptr<Peer> wPeer,
|
||||
{
|
||||
if (UptimeTimer::getInstance ().getElapsedSeconds () > (uUptime + 1))
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Fetch pack request got stale";
|
||||
m_journal.info << "Fetch pack request got stale";
|
||||
return;
|
||||
}
|
||||
|
||||
if (getApp().getFeeTrack ().isLoadedLocal ())
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Too busy to make fetch pack";
|
||||
m_journal.info << "Too busy to make fetch pack";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2986,13 +3002,13 @@ void NetworkOPsImp::makeFetchPack (Job&, boost::weak_ptr<Peer> wPeer,
|
||||
}
|
||||
while (wantLedger && (UptimeTimer::getInstance ().getElapsedSeconds () <= (uUptime + 1)));
|
||||
|
||||
WriteLog (lsINFO, NetworkOPs) << "Built fetch pack with " << reply.objects ().size () << " nodes";
|
||||
m_journal.info << "Built fetch pack with " << reply.objects ().size () << " nodes";
|
||||
PackedMessage::pointer msg = boost::make_shared<PackedMessage> (reply, protocol::mtGET_OBJECTS);
|
||||
peer->sendPacket (msg, false);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Exception building fetch pach";
|
||||
m_journal.warning << "Exception building fetch pach";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3017,7 +3033,7 @@ bool NetworkOPsImp::getFetchPack (uint256 const& hash, Blob& data)
|
||||
|
||||
if (hash != Serializer::getSHA512Half (data))
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Bad entry in fetch pack";
|
||||
m_journal.warning << "Bad entry in fetch pack";
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -3048,19 +3064,28 @@ void NetworkOPsImp::missingNodeInLedger (uint32 seq)
|
||||
uint256 hash = getApp().getLedgerMaster ().getHashBySeq (seq);
|
||||
if (hash.isZero())
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Missing a node in ledger " << seq << " cannot fetch";
|
||||
m_journal.warning << "Missing a node in ledger " << seq << " cannot fetch";
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Missing a node in ledger " << seq << " fetching";
|
||||
m_journal.warning << "Missing a node in ledger " << seq << " fetching";
|
||||
getApp().getInboundLedgers ().findCreate (hash, seq, false);
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NetworkOPs* NetworkOPs::New (LedgerMaster& ledgerMaster)
|
||||
NetworkOPs::NetworkOPs (Service& parent)
|
||||
: InfoSub::Source ("NetworkOPs", parent)
|
||||
{
|
||||
ScopedPointer <NetworkOPs> object (new NetworkOPsImp (ledgerMaster));
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NetworkOPs* NetworkOPs::New (LedgerMaster& ledgerMaster,
|
||||
Service& parent, Journal journal)
|
||||
{
|
||||
ScopedPointer <NetworkOPs> object (new NetworkOPsImp (
|
||||
ledgerMaster, parent, journal));
|
||||
return object.release ();
|
||||
}
|
||||
|
||||
@@ -36,8 +36,12 @@ class LedgerConsensus;
|
||||
instances of rippled will need to be hardened to protect against hostile
|
||||
or unreliable servers.
|
||||
*/
|
||||
class NetworkOPs : public InfoSub::Source
|
||||
class NetworkOPs
|
||||
: public InfoSub::Source
|
||||
{
|
||||
protected:
|
||||
explicit NetworkOPs (Service& parent);
|
||||
|
||||
public:
|
||||
enum Fault
|
||||
{
|
||||
@@ -63,7 +67,8 @@ public:
|
||||
public:
|
||||
// VFALCO TODO Make LedgerMaster a SharedPtr or a reference.
|
||||
//
|
||||
static NetworkOPs* New (LedgerMaster& ledgerMaster);
|
||||
static NetworkOPs* New (LedgerMaster& ledgerMaster,
|
||||
Service& parent, Journal journal);
|
||||
|
||||
virtual ~NetworkOPs () { }
|
||||
|
||||
|
||||
@@ -33,10 +33,11 @@ static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit);
|
||||
class SqliteBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
Backend (size_t keyBytes, std::string const& path)
|
||||
Backend (size_t keyBytes, std::string const& path, NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_name (path)
|
||||
, m_db (new DatabaseCon(path, s_nodeStoreDBInit, s_nodeStoreDBCount))
|
||||
, m_scheduler (scheduler)
|
||||
{
|
||||
String s;
|
||||
|
||||
@@ -164,6 +165,11 @@ public:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void stopAsync ()
|
||||
{
|
||||
m_scheduler.scheduledTasksStopped ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void doBind (SqliteStatement& statement, NodeObject::ref object)
|
||||
@@ -205,6 +211,7 @@ private:
|
||||
size_t const m_keyBytes;
|
||||
std::string const m_name;
|
||||
ScopedPointer <DatabaseCon> m_db;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -232,5 +239,5 @@ NodeStore::Backend* SqliteBackendFactory::createInstance (
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new Backend (keyBytes, keyValues ["path"].toStdString ());
|
||||
return new Backend (keyBytes, keyValues ["path"].toStdString (), scheduler);
|
||||
}
|
||||
|
||||
@@ -358,7 +358,7 @@ bool PathRequest::doUpdate (RippleLineCache::ref cache, bool fast)
|
||||
return true;
|
||||
}
|
||||
|
||||
void PathRequest::updateAll (Ledger::ref ledger, bool newOnly)
|
||||
void PathRequest::updateAll (Ledger::ref ledger, bool newOnly, CancelCallback shouldCancel)
|
||||
{
|
||||
std::set<wptr> requests;
|
||||
|
||||
@@ -374,6 +374,9 @@ void PathRequest::updateAll (Ledger::ref ledger, bool newOnly)
|
||||
|
||||
BOOST_FOREACH (wref wRequest, requests)
|
||||
{
|
||||
if (shouldCancel())
|
||||
break;
|
||||
|
||||
bool remove = true;
|
||||
PathRequest::pointer pRequest = wRequest.lock ();
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ public:
|
||||
|
||||
bool doUpdate (const boost::shared_ptr<RippleLineCache>&, bool fast); // update jvStatus
|
||||
|
||||
static void updateAll (const boost::shared_ptr<Ledger>& ledger, bool newOnly);
|
||||
static void updateAll (const boost::shared_ptr<Ledger>& ledger, bool newOnly, CancelCallback shouldCancel);
|
||||
|
||||
private:
|
||||
void setValid ();
|
||||
|
||||
@@ -6,12 +6,15 @@
|
||||
|
||||
SETUP_LOG (PeerDoor)
|
||||
|
||||
class PeerDoorImp : public PeerDoor, LeakChecked <PeerDoorImp>
|
||||
class PeerDoorImp
|
||||
: public PeerDoor
|
||||
, public LeakChecked <PeerDoorImp>
|
||||
{
|
||||
public:
|
||||
PeerDoorImp (Kind kind, std::string const& ip, int port,
|
||||
PeerDoorImp (Service& parent, Kind kind, std::string const& ip, int port,
|
||||
boost::asio::io_service& io_service, boost::asio::ssl::context& ssl_context)
|
||||
: m_kind (kind)
|
||||
: PeerDoor (parent)
|
||||
, m_kind (kind)
|
||||
, m_ssl_context (ssl_context)
|
||||
, mAcceptor (io_service, boost::asio::ip::tcp::endpoint (
|
||||
boost::asio::ip::address ().from_string (ip.empty () ? "0.0.0.0" : ip), port))
|
||||
@@ -20,13 +23,20 @@ public:
|
||||
if (! ip.empty () && port != 0)
|
||||
{
|
||||
Log (lsINFO) << "Peer port: " << ip << " " << port;
|
||||
startListening ();
|
||||
|
||||
async_accept ();
|
||||
}
|
||||
}
|
||||
|
||||
~PeerDoorImp ()
|
||||
{
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void startListening ()
|
||||
// Initiating function for performing an asynchronous accept
|
||||
//
|
||||
void async_accept ()
|
||||
{
|
||||
bool const isInbound (true);
|
||||
bool const requirePROXYHandshake (m_kind == sslAndPROXYRequired);
|
||||
@@ -37,40 +47,67 @@ public:
|
||||
isInbound, requirePROXYHandshake));
|
||||
|
||||
mAcceptor.async_accept (new_connection->getNativeSocket (),
|
||||
boost::bind (&PeerDoorImp::handleConnect, this, new_connection,
|
||||
boost::asio::placeholders::error));
|
||||
boost::bind (&PeerDoorImp::handleAccept, this,
|
||||
boost::asio::placeholders::error,
|
||||
new_connection));
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void handleConnect (Peer::pointer new_connection,
|
||||
boost::system::error_code const& error)
|
||||
// Called when the deadline timer wait completes
|
||||
//
|
||||
void handleTimer (boost::system::error_code ec)
|
||||
{
|
||||
async_accept ();
|
||||
}
|
||||
|
||||
// Called when the accept socket wait completes
|
||||
//
|
||||
void handleAccept (boost::system::error_code ec, Peer::pointer new_connection)
|
||||
{
|
||||
bool delay = false;
|
||||
|
||||
if (!error)
|
||||
if (! ec)
|
||||
{
|
||||
new_connection->connected (error);
|
||||
// VFALCO NOTE the error code doesnt seem to be used in connected()
|
||||
new_connection->connected (ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (error == boost::system::errc::too_many_files_open)
|
||||
if (ec == boost::system::errc::too_many_files_open)
|
||||
delay = true;
|
||||
|
||||
WriteLog (lsERROR, PeerDoor) << error;
|
||||
WriteLog (lsERROR, PeerDoor) << ec;
|
||||
}
|
||||
|
||||
if (delay)
|
||||
{
|
||||
mDelayTimer.expires_from_now (boost::posix_time::milliseconds (500));
|
||||
mDelayTimer.async_wait (boost::bind (&PeerDoorImp::startListening, this));
|
||||
mDelayTimer.async_wait (boost::bind (&PeerDoorImp::handleTimer,
|
||||
this, boost::asio::placeholders::error));
|
||||
}
|
||||
else
|
||||
{
|
||||
startListening ();
|
||||
async_accept ();
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void onServiceStop ()
|
||||
{
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
mDelayTimer.cancel (ec);
|
||||
}
|
||||
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
mAcceptor.cancel (ec);
|
||||
}
|
||||
|
||||
serviceStopped ();
|
||||
}
|
||||
|
||||
private:
|
||||
Kind m_kind;
|
||||
boost::asio::ssl::context& m_ssl_context;
|
||||
@@ -80,8 +117,15 @@ private:
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
PeerDoor* PeerDoor::New (Kind kind, std::string const& ip, int port,
|
||||
PeerDoor::PeerDoor (Service& parent)
|
||||
: AsyncService ("PeerDoor", parent)
|
||||
{
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
PeerDoor* PeerDoor::New (Service& parent, Kind kind, std::string const& ip, int port,
|
||||
boost::asio::io_service& io_service, boost::asio::ssl::context& ssl_context)
|
||||
{
|
||||
return new PeerDoorImp (kind, ip, port, io_service, ssl_context);
|
||||
return new PeerDoorImp (parent, kind, ip, port, io_service, ssl_context);
|
||||
}
|
||||
|
||||
@@ -7,10 +7,12 @@
|
||||
#ifndef RIPPLE_PEERDOOR_H_INCLUDED
|
||||
#define RIPPLE_PEERDOOR_H_INCLUDED
|
||||
|
||||
/** Handles incoming connections from peers.
|
||||
*/
|
||||
class PeerDoor : LeakChecked <PeerDoor>
|
||||
/** Handles incoming connections from peers. */
|
||||
class PeerDoor : public AsyncService
|
||||
{
|
||||
protected:
|
||||
explicit PeerDoor (Service& parent);
|
||||
|
||||
public:
|
||||
virtual ~PeerDoor () { }
|
||||
|
||||
@@ -20,7 +22,7 @@ public:
|
||||
sslAndPROXYRequired
|
||||
};
|
||||
|
||||
static PeerDoor* New (Kind kind, std::string const& ip, int port,
|
||||
static PeerDoor* New (Service& parent, Kind kind, std::string const& ip, int port,
|
||||
boost::asio::io_service& io_service, boost::asio::ssl::context& ssl_context);
|
||||
|
||||
//virtual boost::asio::ssl::context& getSSLContext () = 0;
|
||||
|
||||
@@ -8,7 +8,8 @@ SETUP_LOG (Peers)
|
||||
|
||||
class PeersImp
|
||||
: public Peers
|
||||
, LeakChecked <PeersImp>
|
||||
, public Service
|
||||
, public LeakChecked <PeersImp>
|
||||
{
|
||||
public:
|
||||
enum
|
||||
@@ -18,8 +19,11 @@ public:
|
||||
policyIntervalSeconds = 5
|
||||
};
|
||||
|
||||
PeersImp (boost::asio::io_service& io_service, boost::asio::ssl::context& ssl_context)
|
||||
: m_io_service (io_service)
|
||||
PeersImp (Service& parent,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::ssl::context& ssl_context)
|
||||
: Service ("Peers", parent)
|
||||
, m_io_service (io_service)
|
||||
, m_ssl_context (ssl_context)
|
||||
, mPeerLock (this, "PeersImp", __FILE__, __LINE__)
|
||||
, mLastPeer (0)
|
||||
@@ -907,8 +911,10 @@ void PeersImp::scanRefresh ()
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
Peers* Peers::New (boost::asio::io_service& io_service, boost::asio::ssl::context& ssl_context)
|
||||
Peers* Peers::New (Service& parent,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::ssl::context& ssl_context)
|
||||
{
|
||||
return new PeersImp (io_service, ssl_context);
|
||||
return new PeersImp (parent, io_service, ssl_context);
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
class Peers
|
||||
{
|
||||
public:
|
||||
static Peers* New (boost::asio::io_service& io_service,
|
||||
boost::asio::ssl::context& context);
|
||||
static Peers* New (Service& parent,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::ssl::context& context);
|
||||
|
||||
virtual ~Peers () { }
|
||||
|
||||
|
||||
@@ -89,8 +89,9 @@ private:
|
||||
typedef boost::unordered_map<std::pair< std::string, int>, score> epScore;
|
||||
|
||||
public:
|
||||
UniqueNodeListImp ()
|
||||
: mFetchLock (this, "Fetch", __FILE__, __LINE__)
|
||||
explicit UniqueNodeListImp (Service& parent)
|
||||
: UniqueNodeList (parent)
|
||||
, mFetchLock (this, "Fetch", __FILE__, __LINE__)
|
||||
, mUNLLock (this, "UNL", __FILE__, __LINE__)
|
||||
, m_scoreTimer (this)
|
||||
, mFetchActive (0)
|
||||
@@ -100,6 +101,16 @@ public:
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void onServiceStop ()
|
||||
{
|
||||
m_fetchTimer.cancel ();
|
||||
m_scoreTimer.cancel ();
|
||||
|
||||
serviceStopped ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void doScore ()
|
||||
{
|
||||
mtpScoreNext = boost::posix_time::ptime (boost::posix_time::not_a_date_time); // Timer not set.
|
||||
@@ -1150,17 +1161,6 @@ private:
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// Begin scoring if timer was not cancelled.
|
||||
void scoreTimerHandler (const boost::system::error_code& err)
|
||||
{
|
||||
if (!err)
|
||||
{
|
||||
onDeadlineTimer (m_scoreTimer);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// Start a timer to update scores.
|
||||
// <-- bNow: true, to force scoring for debugging.
|
||||
void scoreNext (bool bNow)
|
||||
@@ -2052,9 +2052,6 @@ private:
|
||||
% getConfig ().VALIDATORS_BASE);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
typedef RippleMutex FetchLockType;
|
||||
typedef FetchLockType::ScopedLockType ScopedFetchLockType;
|
||||
@@ -2085,7 +2082,16 @@ private:
|
||||
std::map<RippleAddress, ClusterNodeStatus> m_clusterNodes;
|
||||
};
|
||||
|
||||
UniqueNodeList* UniqueNodeList::New ()
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
UniqueNodeList::UniqueNodeList (Service& parent)
|
||||
: Service ("UniqueNodeList", parent)
|
||||
{
|
||||
return new UniqueNodeListImp ();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
UniqueNodeList* UniqueNodeList::New (Service& parent)
|
||||
{
|
||||
return new UniqueNodeListImp (parent);
|
||||
}
|
||||
|
||||
@@ -7,8 +7,11 @@
|
||||
#ifndef RIPPLE_UNIQUENODELIST_H_INCLUDED
|
||||
#define RIPPLE_UNIQUENODELIST_H_INCLUDED
|
||||
|
||||
class UniqueNodeList
|
||||
class UniqueNodeList : public Service
|
||||
{
|
||||
protected:
|
||||
explicit UniqueNodeList (Service& parent);
|
||||
|
||||
public:
|
||||
enum ValidatorSource
|
||||
{
|
||||
@@ -26,7 +29,7 @@ public:
|
||||
|
||||
public:
|
||||
// VFALCO TODO make this not use boost::asio...
|
||||
static UniqueNodeList* New ();
|
||||
static UniqueNodeList* New (Service& parent);
|
||||
|
||||
virtual ~UniqueNodeList () { }
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <boost/bimap/list_of.hpp>
|
||||
#include <boost/bimap/multiset_of.hpp>
|
||||
#include <boost/bimap/unordered_set_of.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include "ripple_app.h"
|
||||
|
||||
@@ -29,6 +30,9 @@ namespace ripple
|
||||
// Application
|
||||
//
|
||||
|
||||
# include "main/IoServicePool.h"
|
||||
#include "main/IoServicePool.cpp"
|
||||
|
||||
# include "main/FatalErrorReporter.h"
|
||||
#include "main/FatalErrorReporter.cpp"
|
||||
|
||||
|
||||
@@ -28,7 +28,8 @@ public:
|
||||
WSDoorImp (InfoSub::Source& source,
|
||||
std::string const& strIp, int iPort, bool bPublic,
|
||||
boost::asio::ssl::context& ssl_context)
|
||||
: Thread ("websocket")
|
||||
: WSDoor (source)
|
||||
, Thread ("websocket")
|
||||
, m_source (source)
|
||||
, m_ssl_context (ssl_context)
|
||||
, m_endpointLock (this, "WSDoor", __FILE__, __LINE__)
|
||||
@@ -41,15 +42,7 @@ public:
|
||||
|
||||
~WSDoorImp ()
|
||||
{
|
||||
{
|
||||
ScopedLockType lock (m_endpointLock, __FILE__, __LINE__);
|
||||
|
||||
if (m_endpoint != nullptr)
|
||||
m_endpoint->stop ();
|
||||
}
|
||||
|
||||
signalThreadShouldExit ();
|
||||
waitForThreadToExit ();
|
||||
stopThread ();
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -101,6 +94,23 @@ private:
|
||||
|
||||
m_endpoint = nullptr;
|
||||
}
|
||||
|
||||
serviceStopped ();
|
||||
}
|
||||
|
||||
void onServiceStop ()
|
||||
{
|
||||
{
|
||||
ScopedLockType lock (m_endpointLock, __FILE__, __LINE__);
|
||||
|
||||
// VFALCO NOTE we probably dont want to block here
|
||||
// but websocketpp is deficient and broken.
|
||||
//
|
||||
if (m_endpoint != nullptr)
|
||||
m_endpoint->stop ();
|
||||
}
|
||||
|
||||
signalThreadShouldExit ();
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -119,6 +129,13 @@ private:
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
WSDoor::WSDoor (Service& parent)
|
||||
: Service ("WSDoor", parent)
|
||||
{
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
WSDoor* WSDoor::New (InfoSub::Source& source, std::string const& strIp,
|
||||
int iPort, bool bPublic, boost::asio::ssl::context& ssl_context)
|
||||
{
|
||||
|
||||
@@ -8,9 +8,14 @@
|
||||
#define RIPPLE_WSDOOR_RIPPLEHEADER
|
||||
|
||||
/** Handles accepting incoming WebSocket connections. */
|
||||
class WSDoor
|
||||
class WSDoor : public Service
|
||||
{
|
||||
protected:
|
||||
explicit WSDoor (Service& parent);
|
||||
|
||||
public:
|
||||
virtual ~WSDoor () { }
|
||||
|
||||
static WSDoor* New (InfoSub::Source& source, std::string const& strIp,
|
||||
int iPort, bool bPublic, boost::asio::ssl::context& ssl_context);
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user