Merge branch 'develop' of github.com:ripple/rippled into develop

This commit is contained in:
JoelKatz
2013-07-29 14:18:17 -07:00
28 changed files with 1124 additions and 565 deletions

View File

@@ -33,8 +33,11 @@ SqliteStatement::~SqliteStatement ()
sqlite3_finalize (statement);
}
//------------------------------------------------------------------------------
SqliteDatabase::SqliteDatabase (const char* host)
: Database (host)
, m_thread ("sqlitedb")
, mWalQ (NULL)
, walRunning (false)
{
@@ -290,9 +293,13 @@ void SqliteDatabase::doHook (const char* db, int pages)
}
if (mWalQ)
{
mWalQ->addJob (jtWAL, std::string ("WAL:") + mHost, BIND_TYPE (&SqliteDatabase::runWal, this));
}
else
boost::thread (BIND_TYPE (&SqliteDatabase::runWal, this)).detach ();
{
m_thread.call (&SqliteDatabase::runWal, this);
}
}
void SqliteDatabase::runWal ()

View File

@@ -57,6 +57,8 @@ public:
int getKBUsedAll ();
private:
ThreadWithCallQueue m_thread;
sqlite3* mConnection;
// VFALCO TODO Why do we need an "aux" connection? Should just use a second SqliteDatabase object.
sqlite3* mAuxConnection;

View File

@@ -20,6 +20,118 @@ class ApplicationImp
, LeakChecked <ApplicationImp>
{
public:
// RAII container for a boost::asio::io_service run by beast threads
class IoServiceThread
{
public:
IoServiceThread (String const& name,
int expectedConcurrency,
int numberOfExtraThreads = 0)
: m_name (name)
, m_service (expectedConcurrency)
, m_work (m_service)
{
m_threads.ensureStorageAllocated (numberOfExtraThreads);
for (int i = 0; i < numberOfExtraThreads; ++i)
m_threads.add (new ServiceThread (m_name, m_service));
}
~IoServiceThread ()
{
m_service.stop ();
// the dtor of m_threads will block until each thread exits.
}
// TEMPORARY HACK for compatibility with old code
void runExtraThreads ()
{
for (int i = 0; i < m_threads.size (); ++i)
m_threads [i]->start ();
}
// Run on the callers thread.
// This will block until stop is issued.
void run ()
{
Thread const* const currentThread (Thread::getCurrentThread());
String previousThreadName;
if (currentThread != nullptr)
{
previousThreadName = currentThread->getThreadName ();
}
else
{
// we're on the main thread
previousThreadName = "main"; // for vanity
}
Thread::setCurrentThreadName (m_name);
m_service.run ();
Thread::setCurrentThreadName (previousThreadName);
}
void stop ()
{
m_service.stop ();
}
boost::asio::io_service& getService ()
{
return m_service;
}
operator boost::asio::io_service& ()
{
return m_service;
}
private:
class ServiceThread : Thread
{
public:
explicit ServiceThread (String const& name, boost::asio::io_service& service)
: Thread (name)
, m_service (service)
{
//startThread ();
}
~ServiceThread ()
{
m_service.stop ();
stopThread (-1); // wait forever
}
void start ()
{
startThread ();
}
void run ()
{
m_service.run ();
}
private:
boost::asio::io_service& m_service;
};
private:
String const m_name;
boost::asio::io_service m_service;
boost::asio::io_service::work m_work;
OwnedArray <ServiceThread> m_threads;
};
//--------------------------------------------------------------------------
static ApplicationImp* createInstance ()
{
return new ApplicationImp;
@@ -40,14 +152,15 @@ public:
//
: SharedSingleton <Application> (SingletonLifetime::neverDestroyed)
#endif
, mIOService ((getConfig ().NODE_SIZE >= 2) ? 2 : 1)
, mIOWork (mIOService)
, m_mainService ("io",
(getConfig ().NODE_SIZE >= 2) ? 2 : 1,
(getConfig ().NODE_SIZE >= 2) ? 1 : 0)
, m_auxService ("auxio", 1, 1)
, mNetOps (new NetworkOPs (&mLedgerMaster))
, m_rpcServerHandler (*mNetOps)
, mTempNodeCache ("NodeCache", 16384, 90)
, mSLECache ("LedgerEntryCache", 4096, 120)
, mSNTPClient (mAuxService)
, mJobQueue (mIOService)
, mSNTPClient (m_auxService)
// VFALCO New stuff
, m_nodeStore (NodeStore::New (
getConfig ().nodeDatabase,
@@ -61,7 +174,7 @@ public:
, mValidations (IValidations::New ())
, mUNL (UniqueNodeList::New ())
, mProofOfWorkFactory (IProofOfWorkFactory::New ())
, mPeers (IPeers::New (mIOService))
, mPeers (IPeers::New (m_mainService))
, m_loadManager (ILoadManager::New ())
// VFALCO End new stuff
// VFALCO TODO replace all NULL with nullptr
@@ -73,7 +186,7 @@ public:
, mRPCDoor (NULL)
, mWSPublicDoor (NULL)
, mWSPrivateDoor (NULL)
, mSweepTimer (mAuxService)
, mSweepTimer (m_auxService)
, mShutdown (false)
{
// VFALCO TODO remove these once the call is thread safe.
@@ -119,7 +232,7 @@ public:
boost::asio::io_service& getIOService ()
{
return mIOService;
return m_mainService;
}
LedgerMaster& getLedgerMaster ()
@@ -258,7 +371,266 @@ public:
{
return mShutdown;
}
void setup ();
//--------------------------------------------------------------------------
static DatabaseCon* openDatabaseCon (const char* fileName,
const char* dbInit[],
int dbCount)
{
return new DatabaseCon (fileName, dbInit, dbCount);
}
void initSqliteDb (int index)
{
switch (index)
{
case 0: mRpcDB = openDatabaseCon ("rpc.db", RpcDBInit, RpcDBCount); break;
case 1: mTxnDB = openDatabaseCon ("transaction.db", TxnDBInit, TxnDBCount); break;
case 2: mLedgerDB = openDatabaseCon ("ledger.db", LedgerDBInit, LedgerDBCount); break;
case 3: mWalletDB = openDatabaseCon ("wallet.db", WalletDBInit, WalletDBCount); break;
};
}
// VFALCO TODO Is it really necessary to init the dbs in parallel?
void initSqliteDbs ()
{
int const count = 4;
ThreadGroup threadGroup (count);
ParallelFor (threadGroup).loop (count, &ApplicationImp::initSqliteDb, this);
}
#ifdef SIGINT
static void sigIntHandler (int)
{
doShutdown = true;
}
#endif
// VFALCO TODO Break this function up into many small initialization segments.
// Or better yet refactor these initializations into RAII classes
// which are members of the Application object.
//
void setup ()
{
// VFALCO NOTE: 0 means use heuristics to determine the thread count.
mJobQueue.setThreadCount (0, getConfig ().RUN_STANDALONE);
mSweepTimer.expires_from_now (boost::posix_time::seconds (10));
mSweepTimer.async_wait (BIND_TYPE (&ApplicationImp::sweep, this));
m_loadManager->startThread ();
#if ! BEAST_WIN32
#ifdef SIGINT
if (!getConfig ().RUN_STANDALONE)
{
struct sigaction sa;
memset (&sa, 0, sizeof (sa));
sa.sa_handler = &ApplicationImp::sigIntHandler;
sigaction (SIGINT, &sa, NULL);
}
#endif
#endif
assert (mTxnDB == NULL);
if (!getConfig ().DEBUG_LOGFILE.empty ())
{
// Let debug messages go to the file but only WARNING or higher to regular output (unless verbose)
Log::setLogFile (getConfig ().DEBUG_LOGFILE);
if (Log::getMinSeverity () > lsDEBUG)
LogPartition::setSeverity (lsDEBUG);
}
if (!getConfig ().RUN_STANDALONE)
mSNTPClient.init (getConfig ().SNTP_SERVERS);
initSqliteDbs ();
leveldb::Options options;
options.create_if_missing = true;
options.block_cache = leveldb::NewLRUCache (getConfig ().getSize (siHashNodeDBCache) * 1024 * 1024);
getApp().getLedgerDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
(getConfig ().getSize (siLgrDBCache) * 1024)));
getApp().getTxnDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
(getConfig ().getSize (siTxnDBCache) * 1024)));
mTxnDB->getDB ()->setupCheckpointing (&mJobQueue);
mLedgerDB->getDB ()->setupCheckpointing (&mJobQueue);
if (!getConfig ().RUN_STANDALONE)
updateTables ();
mFeatures->addInitialFeatures ();
if (getConfig ().START_UP == Config::FRESH)
{
WriteLog (lsINFO, Application) << "Starting new Ledger";
startNewLedger ();
}
else if ((getConfig ().START_UP == Config::LOAD) || (getConfig ().START_UP == Config::REPLAY))
{
WriteLog (lsINFO, Application) << "Loading specified Ledger";
if (!loadOldLedger (getConfig ().START_LEDGER, getConfig ().START_UP == Config::REPLAY))
{
getApp().stop ();
exit (-1);
}
}
else if (getConfig ().START_UP == Config::NETWORK)
{
// This should probably become the default once we have a stable network
if (!getConfig ().RUN_STANDALONE)
mNetOps->needNetworkLedger ();
startNewLedger ();
}
else
startNewLedger ();
mOrderBookDB.setup (getApp().getLedgerMaster ().getCurrentLedger ());
//
// Begin validation and ip maintenance.
// - LocalCredentials maintains local information: including identity and network connection persistence information.
//
m_localCredentials.start ();
//
// Set up UNL.
//
if (!getConfig ().RUN_STANDALONE)
getUNL ().nodeBootstrap ();
mValidations->tune (getConfig ().getSize (siValidationsSize), getConfig ().getSize (siValidationsAge));
m_nodeStore->tune (getConfig ().getSize (siNodeCacheSize), getConfig ().getSize (siNodeCacheAge));
mLedgerMaster.tune (getConfig ().getSize (siLedgerSize), getConfig ().getSize (siLedgerAge));
mSLECache.setTargetSize (getConfig ().getSize (siSLECacheSize));
mSLECache.setTargetAge (getConfig ().getSize (siSLECacheAge));
mLedgerMaster.setMinValidations (getConfig ().VALIDATION_QUORUM);
//
// Allow peer connections.
//
if (!getConfig ().RUN_STANDALONE)
{
try
{
mPeerDoor = PeerDoor::New (
getConfig ().PEER_IP,
getConfig ().PEER_PORT,
getConfig ().PEER_SSL_CIPHER_LIST,
m_mainService);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open peer service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "Peer interface: disabled";
}
//
// Allow RPC connections.
//
if (! getConfig ().getRpcIP().empty () && getConfig ().getRpcPort() != 0)
{
try
{
mRPCDoor = new RPCDoor (m_mainService, m_rpcServerHandler);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open RPC service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "RPC interface: disabled";
}
//
// Allow private WS connections.
//
if (!getConfig ().WEBSOCKET_IP.empty () && getConfig ().WEBSOCKET_PORT)
{
try
{
mWSPrivateDoor = new WSDoor (getConfig ().WEBSOCKET_IP, getConfig ().WEBSOCKET_PORT, false);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open private websocket service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "WS private interface: disabled";
}
//
// Allow public WS connections.
//
if (!getConfig ().WEBSOCKET_PUBLIC_IP.empty () && getConfig ().WEBSOCKET_PUBLIC_PORT)
{
try
{
mWSPublicDoor = new WSDoor (getConfig ().WEBSOCKET_PUBLIC_IP, getConfig ().WEBSOCKET_PUBLIC_PORT, true);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open public websocket service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "WS public interface: disabled";
}
//
// Begin connecting to network.
//
if (!getConfig ().RUN_STANDALONE)
mPeers->start ();
if (getConfig ().RUN_STANDALONE)
{
WriteLog (lsWARNING, Application) << "Running in standalone mode";
mNetOps->setStandAlone ();
}
else
{
// VFALCO NOTE the state timer resets the deadlock detector.
//
mNetOps->setStateTimer ();
}
}
void run ();
void stop ();
void sweep ();
@@ -270,13 +642,12 @@ private:
bool loadOldLedger (const std::string&, bool);
private:
boost::asio::io_service mIOService;
boost::asio::io_service mAuxService;
// The lifetime of the io_service::work object informs the io_service
// of when the work starts and finishes. io_service::run() will not exit
// while the work object exists.
//
boost::asio::io_service::work mIOWork;
IoServiceThread m_mainService;
IoServiceThread m_auxService;
//boost::asio::io_service mIOService;
//boost::asio::io_service mAuxService;
//boost::asio::io_service::work mIOWork;
MasterLockType mMasterLock;
@@ -330,281 +701,24 @@ void ApplicationImp::stop ()
WriteLog (lsINFO, Application) << "Received shutdown request";
StopSustain ();
mShutdown = true;
mIOService.stop ();
m_mainService.stop ();
m_nodeStore = nullptr;
mValidations->flush ();
mAuxService.stop ();
m_auxService.stop ();
mJobQueue.shutdown ();
WriteLog (lsINFO, Application) << "Stopped: " << mIOService.stopped ();
//WriteLog (lsINFO, Application) << "Stopped: " << mIOService.stopped
mShutdown = false;
}
static void InitDB (DatabaseCon** dbCon, const char* fileName, const char* dbInit[], int dbCount)
{
*dbCon = new DatabaseCon (fileName, dbInit, dbCount);
}
#ifdef SIGINT
void sigIntHandler (int)
{
doShutdown = true;
}
#endif
// VFALCO TODO Figure this out it looks like the wrong tool
static void runAux (boost::asio::io_service& svc)
{
setCallingThreadName ("aux");
svc.run ();
}
static void runIO (boost::asio::io_service& io)
{
setCallingThreadName ("io");
io.run ();
}
// VFALCO TODO Break this function up into many small initialization segments.
// Or better yet refactor these initializations into RAII classes
// which are members of the Application object.
//
void ApplicationImp::setup ()
{
// VFALCO NOTE: 0 means use heuristics to determine the thread count.
mJobQueue.setThreadCount (0, getConfig ().RUN_STANDALONE);
mSweepTimer.expires_from_now (boost::posix_time::seconds (10));
mSweepTimer.async_wait (BIND_TYPE (&ApplicationImp::sweep, this));
m_loadManager->startThread ();
#if ! BEAST_WIN32
#ifdef SIGINT
if (!getConfig ().RUN_STANDALONE)
{
struct sigaction sa;
memset (&sa, 0, sizeof (sa));
sa.sa_handler = sigIntHandler;
sigaction (SIGINT, &sa, NULL);
}
#endif
#endif
assert (mTxnDB == NULL);
if (!getConfig ().DEBUG_LOGFILE.empty ())
{
// Let debug messages go to the file but only WARNING or higher to regular output (unless verbose)
Log::setLogFile (getConfig ().DEBUG_LOGFILE);
if (Log::getMinSeverity () > lsDEBUG)
LogPartition::setSeverity (lsDEBUG);
}
boost::thread (BIND_TYPE (runAux, boost::ref (mAuxService))).detach ();
if (!getConfig ().RUN_STANDALONE)
mSNTPClient.init (getConfig ().SNTP_SERVERS);
//
// Construct databases.
//
boost::thread t1 (BIND_TYPE (&InitDB, &mRpcDB, "rpc.db", RpcDBInit, RpcDBCount));
boost::thread t2 (BIND_TYPE (&InitDB, &mTxnDB, "transaction.db", TxnDBInit, TxnDBCount));
boost::thread t3 (BIND_TYPE (&InitDB, &mLedgerDB, "ledger.db", LedgerDBInit, LedgerDBCount));
boost::thread t4 (BIND_TYPE (&InitDB, &mWalletDB, "wallet.db", WalletDBInit, WalletDBCount));
t1.join ();
t2.join ();
t3.join ();
t4.join ();
leveldb::Options options;
options.create_if_missing = true;
options.block_cache = leveldb::NewLRUCache (getConfig ().getSize (siHashNodeDBCache) * 1024 * 1024);
getApp().getLedgerDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
(getConfig ().getSize (siLgrDBCache) * 1024)));
getApp().getTxnDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
(getConfig ().getSize (siTxnDBCache) * 1024)));
mTxnDB->getDB ()->setupCheckpointing (&mJobQueue);
mLedgerDB->getDB ()->setupCheckpointing (&mJobQueue);
if (!getConfig ().RUN_STANDALONE)
updateTables ();
mFeatures->addInitialFeatures ();
if (getConfig ().START_UP == Config::FRESH)
{
WriteLog (lsINFO, Application) << "Starting new Ledger";
startNewLedger ();
}
else if ((getConfig ().START_UP == Config::LOAD) || (getConfig ().START_UP == Config::REPLAY))
{
WriteLog (lsINFO, Application) << "Loading specified Ledger";
if (!loadOldLedger (getConfig ().START_LEDGER, getConfig ().START_UP == Config::REPLAY))
{
getApp().stop ();
exit (-1);
}
}
else if (getConfig ().START_UP == Config::NETWORK)
{
// This should probably become the default once we have a stable network
if (!getConfig ().RUN_STANDALONE)
mNetOps->needNetworkLedger ();
startNewLedger ();
}
else
startNewLedger ();
mOrderBookDB.setup (getApp().getLedgerMaster ().getCurrentLedger ());
//
// Begin validation and ip maintenance.
// - LocalCredentials maintains local information: including identity and network connection persistence information.
//
m_localCredentials.start ();
//
// Set up UNL.
//
if (!getConfig ().RUN_STANDALONE)
getUNL ().nodeBootstrap ();
mValidations->tune (getConfig ().getSize (siValidationsSize), getConfig ().getSize (siValidationsAge));
m_nodeStore->tune (getConfig ().getSize (siNodeCacheSize), getConfig ().getSize (siNodeCacheAge));
mLedgerMaster.tune (getConfig ().getSize (siLedgerSize), getConfig ().getSize (siLedgerAge));
mSLECache.setTargetSize (getConfig ().getSize (siSLECacheSize));
mSLECache.setTargetAge (getConfig ().getSize (siSLECacheAge));
mLedgerMaster.setMinValidations (getConfig ().VALIDATION_QUORUM);
//
// Allow peer connections.
//
if (!getConfig ().RUN_STANDALONE)
{
try
{
mPeerDoor = PeerDoor::New (
getConfig ().PEER_IP,
getConfig ().PEER_PORT,
getConfig ().PEER_SSL_CIPHER_LIST,
mIOService);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open peer service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "Peer interface: disabled";
}
//
// Allow RPC connections.
//
if (! getConfig ().getRpcIP().empty () && getConfig ().getRpcPort() != 0)
{
try
{
mRPCDoor = new RPCDoor (mIOService, m_rpcServerHandler);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open RPC service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "RPC interface: disabled";
}
//
// Allow private WS connections.
//
if (!getConfig ().WEBSOCKET_IP.empty () && getConfig ().WEBSOCKET_PORT)
{
try
{
mWSPrivateDoor = WSDoor::createWSDoor (getConfig ().WEBSOCKET_IP, getConfig ().WEBSOCKET_PORT, false);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open private websocket service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "WS private interface: disabled";
}
//
// Allow public WS connections.
//
if (!getConfig ().WEBSOCKET_PUBLIC_IP.empty () && getConfig ().WEBSOCKET_PUBLIC_PORT)
{
try
{
mWSPublicDoor = WSDoor::createWSDoor (getConfig ().WEBSOCKET_PUBLIC_IP, getConfig ().WEBSOCKET_PUBLIC_PORT, true);
}
catch (const std::exception& e)
{
// Must run as directed or exit.
WriteLog (lsFATAL, Application) << boost::str (boost::format ("Can not open public websocket service: %s") % e.what ());
exit (3);
}
}
else
{
WriteLog (lsINFO, Application) << "WS public interface: disabled";
}
//
// Begin connecting to network.
//
if (!getConfig ().RUN_STANDALONE)
mPeers->start ();
if (getConfig ().RUN_STANDALONE)
{
WriteLog (lsWARNING, Application) << "Running in standalone mode";
mNetOps->setStandAlone ();
}
else
{
// VFALCO NOTE the state timer resets the deadlock detector.
//
mNetOps->setStateTimer ();
}
}
void ApplicationImp::run ()
{
if (getConfig ().NODE_SIZE >= 2)
{
boost::thread (BIND_TYPE (runIO, boost::ref (mIOService))).detach ();
}
// VFALCO TODO The unit tests crash if we try to
// run these threads in the IoService constructor
// so this hack makes them start later.
//
m_mainService.runExtraThreads ();
if (!getConfig ().RUN_STANDALONE)
{
@@ -614,7 +728,7 @@ void ApplicationImp::run ()
getApp().getLoadManager ().activateDeadlockDetector ();
}
mIOService.run (); // This blocks
m_mainService.run (); // This blocks until the io_service is stopped.
if (mWSPublicDoor)
mWSPublicDoor->stop ();

View File

@@ -145,7 +145,20 @@ public:
{
if (m_shouldLog)
{
#if BEAST_MSVC
if (beast_isRunningUnderDebugger ())
{
Logger::outputDebugString (message);
}
else
{
std::cout << message.toStdString () << std::endl;
}
#else
std::cout << message.toStdString () << std::endl;
#endif
}
}

View File

@@ -21,9 +21,36 @@ SETUP_LOG (WSDoor)
//
// VFALCO NOTE NetworkOPs isn't used here...
//
void WSDoor::startListening ()
WSDoor::WSDoor (std::string const& strIp, int iPort, bool bPublic)
: Thread ("websocket")
, mPublic (bPublic)
, mIp (strIp)
, mPort (iPort)
{
setCallingThreadName ("websocket");
startThread ();
}
WSDoor::~WSDoor ()
{
{
CriticalSection::ScopedLockType lock (m_endpointLock);
if (m_endpoint != nullptr)
m_endpoint->stop ();
}
signalThreadShouldExit ();
waitForThreadToExit ();
}
void WSDoor::run ()
{
WriteLog (lsINFO, WSDoor) << boost::str (boost::format ("Websocket: %s: Listening: %s %d ")
% (mPublic ? "Public" : "Private")
% mIp
% mPort);
// Generate a single SSL context for use by all connections.
boost::shared_ptr<boost::asio::ssl::context> mCtx;
mCtx = boost::make_shared<boost::asio::ssl::context> (boost::asio::ssl::context::sslv23);
@@ -35,11 +62,13 @@ void WSDoor::startListening ()
SSL_CTX_set_tmp_dh_callback (mCtx->native_handle (), handleTmpDh);
// Construct a single handler for all requests.
websocketpp::server_autotls::handler::ptr handler (new WSServerHandler<websocketpp::server_autotls> (mCtx, mPublic));
// Construct a websocket server.
mSEndpoint = new websocketpp::server_autotls (handler);
{
CriticalSection::ScopedLockType lock (m_endpointLock);
m_endpoint = new websocketpp::server_autotls (handler);
}
// mEndpoint->alog().unset_level(websocketpp::log::alevel::ALL);
// mEndpoint->elog().unset_level(websocketpp::log::elevel::ALL);
@@ -47,7 +76,7 @@ void WSDoor::startListening ()
// Call the main-event-loop of the websocket server.
try
{
mSEndpoint->listen (
m_endpoint->listen (
boost::asio::ip::tcp::endpoint (
boost::asio::ip::address ().from_string (mIp), mPort));
}
@@ -60,7 +89,7 @@ void WSDoor::startListening ()
// https://github.com/zaphoyd/websocketpp/issues/98
try
{
mSEndpoint->get_io_service ().run ();
m_endpoint->get_io_service ().run ();
break;
}
catch (websocketpp::exception& e)
@@ -70,32 +99,18 @@ void WSDoor::startListening ()
}
}
delete mSEndpoint;
}
WSDoor* WSDoor::createWSDoor (const std::string& strIp, const int iPort, bool bPublic)
{
WSDoor* wdpResult = new WSDoor (strIp, iPort, bPublic);
WriteLog (lsINFO, WSDoor) <<
boost::str (boost::format ("Websocket: %s: Listening: %s %d ")
% (bPublic ? "Public" : "Private")
% strIp
% iPort);
wdpResult->mThread = new boost::thread (BIND_TYPE (&WSDoor::startListening, wdpResult));
return wdpResult;
delete m_endpoint;
}
void WSDoor::stop ()
{
if (mThread)
{
if (mSEndpoint)
mSEndpoint->stop ();
CriticalSection::ScopedLockType lock (m_endpointLock);
mThread->join ();
if (m_endpoint != nullptr)
m_endpoint->stop ();
}
signalThreadShouldExit ();
waitForThreadToExit ();
}

View File

@@ -7,28 +7,24 @@
#ifndef RIPPLE_WSDOOR_RIPPLEHEADER
#define RIPPLE_WSDOOR_RIPPLEHEADER
class WSDoor : LeakChecked <WSDoor>
class WSDoor : protected Thread, LeakChecked <WSDoor>
{
private:
websocketpp::server_autotls* mSEndpoint;
public:
WSDoor (std::string const& strIp, int iPort, bool bPublic);
boost::thread* mThread;
~WSDoor ();
void stop ();
private:
void run ();
private:
ScopedPointer <websocketpp::server_autotls> m_endpoint;
CriticalSection m_endpointLock;
bool mPublic;
std::string mIp;
int mPort;
void startListening ();
public:
WSDoor (const std::string& strIp, int iPort, bool bPublic) : mSEndpoint (0), mThread (0), mPublic (bPublic), mIp (strIp), mPort (iPort)
{
;
}
void stop ();
static WSDoor* createWSDoor (const std::string& strIp, const int iPort, bool bPublic);
};
#endif

View File

@@ -60,8 +60,6 @@
#include <boost/unordered_set.hpp>
#include <boost/weak_ptr.hpp>
//#include "../ripple_sqlite/ripple_sqlite.h" // for SqliteDatabase.cpp
#include "../ripple_core/ripple_core.h"
#include "beast/modules/beast_db/beast_db.h"

View File

@@ -6,11 +6,9 @@
SETUP_LOG (JobQueue)
JobQueue::JobQueue (boost::asio::io_service& svc)
: mLastJob (0)
, mThreadCount (0)
, mShuttingDown (false)
, mIOService (svc)
JobQueue::JobQueue ()
: m_workers (*this, 0)
, mLastJob (0)
{
mJobLoads [ jtPUBOLDLEDGER ].setTargetLatency (10000, 15000);
mJobLoads [ jtVALIDATION_ut ].setTargetLatency (2000, 5000);
@@ -30,9 +28,13 @@ JobQueue::JobQueue (boost::asio::io_service& svc)
mJobLoads [ jtACCEPTLEDGER ].setTargetLatency (1000, 2500);
}
JobQueue::~JobQueue ()
{
}
void JobQueue::addJob (JobType type, const std::string& name, const FUNCTION_TYPE<void (Job&)>& jobFunc)
{
addLimitJob(type, name, 0, jobFunc);
addLimitJob (type, name, 0, jobFunc);
}
void JobQueue::addLimitJob (JobType type, const std::string& name, int limit, const FUNCTION_TYPE<void (Job&)>& jobFunc)
@@ -41,21 +43,26 @@ void JobQueue::addLimitJob (JobType type, const std::string& name, int limit, co
boost::mutex::scoped_lock sl (mJobLock);
if (type != jtCLIENT) // FIXME: Workaround incorrect client shutdown ordering
assert (mThreadCount != 0); // do not add jobs to a queue with no threads
// FIXME: Workaround incorrect client shutdown ordering
// do not add jobs to a queue with no threads
bassert (type == jtCLIENT || m_workers.getNumberOfThreads () > 0);
std::pair< std::set <Job>::iterator, bool > it =
mJobSet.insert (Job (type, name, limit, ++mLastJob, mJobLoads[type], jobFunc));
it.first->peekEvent().start(); // start timing how long it stays in the queue
++mJobCounts[type].first;
mJobCond.notify_one ();
m_workers.addTask ();
}
int JobQueue::getJobCount (JobType t)
{
boost::mutex::scoped_lock sl (mJobLock);
std::map< JobType, std::pair<int, int> >::iterator c = mJobCounts.find (t);
JobCounts::iterator c = mJobCounts.find (t);
return (c == mJobCounts.end ()) ? 0 : c->second.first;
}
@@ -63,7 +70,8 @@ int JobQueue::getJobCountTotal (JobType t)
{
boost::mutex::scoped_lock sl (mJobLock);
std::map< JobType, std::pair<int, int> >::iterator c = mJobCounts.find (t);
JobCounts::iterator c = mJobCounts.find (t);
return (c == mJobCounts.end ()) ? 0 : (c->second.first + c->second.second);
}
@@ -74,11 +82,13 @@ int JobQueue::getJobCountGE (JobType t)
boost::mutex::scoped_lock sl (mJobLock);
typedef std::map< JobType, std::pair<int, int> >::value_type jt_int_pair;
BOOST_FOREACH (const jt_int_pair & it, mJobCounts)
typedef JobCounts::value_type jt_int_pair;
if (it.first >= t)
ret += it.second.first;
BOOST_FOREACH (jt_int_pair const& it, mJobCounts)
{
if (it.first >= t)
ret += it.second.first;
}
return ret;
}
@@ -89,11 +99,15 @@ std::vector< std::pair<JobType, std::pair<int, int> > > JobQueue::getJobCounts (
std::vector< std::pair<JobType, std::pair<int, int> > > ret;
boost::mutex::scoped_lock sl (mJobLock);
ret.reserve (mJobCounts.size ());
typedef std::map< JobType, std::pair<int, int> >::value_type jt_int_pair;
typedef JobCounts::value_type jt_int_pair;
BOOST_FOREACH (const jt_int_pair & it, mJobCounts)
ret.push_back (it);
{
ret.push_back (it);
}
return ret;
}
@@ -101,9 +115,10 @@ std::vector< std::pair<JobType, std::pair<int, int> > > JobQueue::getJobCounts (
Json::Value JobQueue::getJson (int)
{
Json::Value ret (Json::objectValue);
boost::mutex::scoped_lock sl (mJobLock);
ret["threads"] = mThreadCount;
ret["threads"] = m_workers.getNumberOfThreads ();
Json::Value priorities = Json::arrayValue;
@@ -116,7 +131,7 @@ Json::Value JobQueue::getJson (int)
int jobCount, threadCount;
bool isOver;
mJobLoads[i].getCountAndLatency (count, latencyAvg, latencyPeak, isOver);
std::map< JobType, std::pair<int, int> >::iterator it = mJobCounts.find (static_cast<JobType> (i));
JobCounts::iterator it = mJobCounts.find (static_cast<JobType> (i));
if (it == mJobCounts.end ())
{
@@ -165,6 +180,7 @@ Json::Value JobQueue::getJson (int)
bool JobQueue::isOverloaded ()
{
int count = 0;
boost::mutex::scoped_lock sl (mJobLock);
for (int i = 0; i < NUM_JOB_TYPES; ++i)
@@ -174,16 +190,13 @@ bool JobQueue::isOverloaded ()
return count > 0;
}
// shut down the job queue without completing pending jobs
//
void JobQueue::shutdown ()
{
// shut down the job queue without completing pending jobs
WriteLog (lsINFO, JobQueue) << "Job queue shutting down";
boost::mutex::scoped_lock sl (mJobLock);
mShuttingDown = true;
mJobCond.notify_all ();
while (mThreadCount != 0)
mJobCond.wait (sl);
m_workers.pauseAllThreadsAndWait ();
}
// set the number of thread serving the job queue to precisely this number
@@ -195,7 +208,7 @@ void JobQueue::setThreadCount (int c, bool const standaloneMode)
}
else if (c == 0)
{
c = boost::thread::hardware_concurrency ();
c = SystemStats::getNumCpus ();
// VFALCO NOTE According to boost, hardware_concurrency cannot return
// negative numbers/
@@ -210,113 +223,83 @@ void JobQueue::setThreadCount (int c, bool const standaloneMode)
WriteLog (lsINFO, JobQueue) << "Auto-tuning to " << c << " validation/transaction/proposal threads";
}
// VFALCO TODO Split the function up. The lower part actually does the "do",
// The part above this comment figures out the value for numThreads
//
boost::mutex::scoped_lock sl (mJobLock);
while (mJobCounts[jtDEATH].first != 0)
{
mJobCond.wait (sl);
}
while (mThreadCount < c)
{
++mThreadCount;
boost::thread (BIND_TYPE (&JobQueue::threadEntry, this)).detach ();
}
while (mThreadCount > c)
{
if (mJobCounts[jtDEATH].first != 0)
{
mJobCond.wait (sl);
}
else
{
mJobSet.insert (Job (jtDEATH, 0));
++ (mJobCounts[jtDEATH].first);
}
}
mJobCond.notify_one (); // in case we sucked up someone else's signal
m_workers.setNumberOfThreads (c);
}
bool JobQueue::getJob(Job& job)
{
if (mJobSet.empty() || mShuttingDown)
return false;
bool gotJob = false;
std::set<Job>::iterator it = mJobSet.begin ();
while (1)
if (! mJobSet.empty ())
{
// Are we out of jobs?
if (it == mJobSet.end())
return false;
std::set<Job>::iterator it = mJobSet.begin ();
// Does this job have no limit?
if (it->getLimit() == 0)
break;
for (;;)
{
// VFALCO NOTE how can we be out of jobs if we just checked mJobSet.empty ()?
//
// Are we out of jobs?
if (it == mJobSet.end())
return false; // VFALCO TODO get rid of this return from the middle
// Is this job category below the limit?
if (mJobCounts[it->getType()].second < it->getLimit())
break;
// Does this job have no limit?
if (it->getLimit() == 0)
break;
// Try the next job, if any
++it;
// Is this job category below the limit?
if (mJobCounts[it->getType()].second < it->getLimit())
break;
// Try the next job, if any
++it;
}
job = *it;
mJobSet.erase (it);
gotJob = true;
}
job = *it;
mJobSet.erase (it);
return true;
return gotJob;
}
// do jobs until asked to stop
void JobQueue::threadEntry ()
void JobQueue::processTask ()
{
boost::mutex::scoped_lock sl (mJobLock);
while (1)
{
JobType type;
// This lock shouldn't be needed
boost::mutex::scoped_lock lock (mJobLock);
setCallingThreadName ("waiting");
JobType type (jtINVALID);
{
Job job;
while (!getJob(job))
bool const haveJob = getJob (job);
if (haveJob)
{
if (mShuttingDown)
{
--mThreadCount;
mJobCond.notify_all();
return;
}
mJobCond.wait (sl);
type = job.getType ();
// VFALCO TODO Replace with Atomic <>
--(mJobCounts[type].first);
++(mJobCounts[type].second);
lock.unlock ();
Thread::setCurrentThreadName (Job::toString (type));
WriteLog (lsTRACE, JobQueue) << "Doing " << Job::toString (type) << " job";
job.doJob ();
}
type = job.getType ();
-- (mJobCounts[type].first);
// must destroy job, here, without holding lock
}
if (type == jtDEATH)
{
--mThreadCount;
mJobCond.notify_all();
return;
}
++ (mJobCounts[type].second);
sl.unlock ();
setCallingThreadName (Job::toString (type));
WriteLog (lsTRACE, JobQueue) << "Doing " << Job::toString (type) << " job";
job.doJob ();
} // must destroy job without holding lock
sl.lock ();
-- (mJobCounts[type].second);
if (type != jtINVALID)
{
lock.lock ();
-- (mJobCounts[type].second);
}
}
}
// vim:ts=4

View File

@@ -4,13 +4,17 @@
*/
//==============================================================================
#ifndef RIPPLE_JOBQUEUE_H
#define RIPPLE_JOBQUEUE_H
#ifndef RIPPLE_JOBQUEUE_H_INCLUDED
#define RIPPLE_JOBQUEUE_H_INCLUDED
class JobQueue
class JobQueue : private Workers::Callback
{
public:
explicit JobQueue (boost::asio::io_service&);
typedef std::map<JobType, std::pair<int, int > > JobCounts;
JobQueue ();
~JobQueue ();
// VFALCO TODO make convenience functions that allow the caller to not
// have to call bind.
@@ -46,22 +50,17 @@ public:
Json::Value getJson (int c = 0);
private:
void threadEntry ();
boost::mutex mJobLock;
boost::condition_variable mJobCond;
uint64 mLastJob;
std::set <Job> mJobSet;
LoadMonitor mJobLoads [NUM_JOB_TYPES];
int mThreadCount;
bool mShuttingDown;
boost::asio::io_service& mIOService;
std::map<JobType, std::pair<int, int > > mJobCounts;
bool getJob (Job& job);
void processTask ();
private:
Workers m_workers;
boost::mutex mJobLock; // VFALCO TODO Replace with CriticalSection
uint64 mLastJob;
std::set <Job> mJobSet;
LoadMonitor mJobLoads [NUM_JOB_TYPES];
JobCounts mJobCounts;
};
#endif