mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-27 14:35:52 +00:00
Add ripple_main module
This commit is contained in:
@@ -24,8 +24,6 @@
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/thread.hpp>
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
LogPartition TaggedCachePartition("TaggedCache");
|
||||
LogPartition AutoSocketPartition("AutoSocket");
|
||||
Application* theApp = NULL;
|
||||
@@ -80,7 +78,7 @@ bool Instance::running = true;
|
||||
|
||||
void Application::stop()
|
||||
{
|
||||
cLog(lsINFO) << "Received shutdown request";
|
||||
WriteLog (lsINFO, Application) << "Received shutdown request";
|
||||
StopSustain();
|
||||
mShutdown = true;
|
||||
mIOService.stop();
|
||||
@@ -94,7 +92,7 @@ void Application::stop()
|
||||
mHashNodeLDB = NULL;
|
||||
#endif
|
||||
|
||||
cLog(lsINFO) << "Stopped: " << mIOService.stopped();
|
||||
WriteLog (lsINFO, Application) << "Stopped: " << mIOService.stopped();
|
||||
Instance::shutdown();
|
||||
}
|
||||
|
||||
@@ -172,7 +170,7 @@ void Application::setup()
|
||||
#ifdef USE_LEVELDB
|
||||
if (mHashedObjectStore.isLevelDB())
|
||||
{
|
||||
cLog(lsINFO) << "LevelDB used for nodes";
|
||||
WriteLog (lsINFO, Application) << "LevelDB used for nodes";
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
options.block_cache = leveldb::NewLRUCache(theConfig.getSize(siHashNodeDBCache) * 1024 * 1024);
|
||||
@@ -183,7 +181,7 @@ void Application::setup()
|
||||
leveldb::Status status = leveldb::DB::Open(options, (theConfig.DATA_DIR / "hashnode").string(), &mHashNodeLDB);
|
||||
if (!status.ok() || !mHashNodeLDB)
|
||||
{
|
||||
cLog(lsFATAL) << "Unable to open/create hash node db: "
|
||||
WriteLog (lsFATAL, Application) << "Unable to open/create hash node db: "
|
||||
<< (theConfig.DATA_DIR / "hashnode").string()
|
||||
<< " " << status.ToString();
|
||||
StopSustain();
|
||||
@@ -193,7 +191,7 @@ void Application::setup()
|
||||
else
|
||||
#endif
|
||||
{
|
||||
cLog(lsINFO) << "SQLite used for nodes";
|
||||
WriteLog (lsINFO, Application) << "SQLite used for nodes";
|
||||
boost::thread t5(boost::bind(&InitDB, &mHashNodeDB, "hashnode.db", HashNodeDBInit, HashNodeDBCount));
|
||||
t5.join();
|
||||
}
|
||||
@@ -206,13 +204,13 @@ void Application::setup()
|
||||
|
||||
if (theConfig.START_UP == Config::FRESH)
|
||||
{
|
||||
cLog(lsINFO) << "Starting new Ledger";
|
||||
WriteLog (lsINFO, Application) << "Starting new Ledger";
|
||||
|
||||
startNewLedger();
|
||||
}
|
||||
else if (theConfig.START_UP == Config::LOAD)
|
||||
{
|
||||
cLog(lsINFO) << "Loading specified Ledger";
|
||||
WriteLog (lsINFO, Application) << "Loading specified Ledger";
|
||||
|
||||
if (!loadOldLedger(theConfig.START_LEDGER))
|
||||
{
|
||||
@@ -271,14 +269,14 @@ void Application::setup()
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
// Must run as directed or exit.
|
||||
cLog(lsFATAL) << boost::str(boost::format("Can not open peer service: %s") % e.what());
|
||||
WriteLog (lsFATAL, Application) << boost::str(boost::format("Can not open peer service: %s") % e.what());
|
||||
|
||||
exit(3);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsINFO) << "Peer interface: disabled";
|
||||
WriteLog (lsINFO, Application) << "Peer interface: disabled";
|
||||
}
|
||||
|
||||
//
|
||||
@@ -293,14 +291,14 @@ void Application::setup()
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
// Must run as directed or exit.
|
||||
cLog(lsFATAL) << boost::str(boost::format("Can not open RPC service: %s") % e.what());
|
||||
WriteLog (lsFATAL, Application) << boost::str(boost::format("Can not open RPC service: %s") % e.what());
|
||||
|
||||
exit(3);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsINFO) << "RPC interface: disabled";
|
||||
WriteLog (lsINFO, Application) << "RPC interface: disabled";
|
||||
}
|
||||
|
||||
//
|
||||
@@ -315,14 +313,14 @@ void Application::setup()
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
// Must run as directed or exit.
|
||||
cLog(lsFATAL) << boost::str(boost::format("Can not open private websocket service: %s") % e.what());
|
||||
WriteLog (lsFATAL, Application) << boost::str(boost::format("Can not open private websocket service: %s") % e.what());
|
||||
|
||||
exit(3);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsINFO) << "WS private interface: disabled";
|
||||
WriteLog (lsINFO, Application) << "WS private interface: disabled";
|
||||
}
|
||||
|
||||
//
|
||||
@@ -337,14 +335,14 @@ void Application::setup()
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
// Must run as directed or exit.
|
||||
cLog(lsFATAL) << boost::str(boost::format("Can not open public websocket service: %s") % e.what());
|
||||
WriteLog (lsFATAL, Application) << boost::str(boost::format("Can not open public websocket service: %s") % e.what());
|
||||
|
||||
exit(3);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsINFO) << "WS public interface: disabled";
|
||||
WriteLog (lsINFO, Application) << "WS public interface: disabled";
|
||||
}
|
||||
|
||||
//
|
||||
@@ -356,7 +354,7 @@ void Application::setup()
|
||||
|
||||
if (theConfig.RUN_STANDALONE)
|
||||
{
|
||||
cLog(lsWARNING) << "Running in standalone mode";
|
||||
WriteLog (lsWARNING, Application) << "Running in standalone mode";
|
||||
|
||||
mNetOps.setStandAlone();
|
||||
}
|
||||
@@ -380,7 +378,7 @@ void Application::run()
|
||||
if (mWSPrivateDoor)
|
||||
mWSPrivateDoor->stop();
|
||||
|
||||
cLog(lsINFO) << "Done.";
|
||||
WriteLog (lsINFO, Application) << "Done.";
|
||||
}
|
||||
|
||||
void Application::sweep()
|
||||
@@ -389,7 +387,7 @@ void Application::sweep()
|
||||
boost::filesystem::space_info space = boost::filesystem::space(theConfig.DATA_DIR);
|
||||
if (space.available < (512 * 1024 * 1024))
|
||||
{
|
||||
cLog(lsFATAL) << "Remaining free disk space is less than 512MB";
|
||||
WriteLog (lsFATAL, Application) << "Remaining free disk space is less than 512MB";
|
||||
theApp->stop();
|
||||
}
|
||||
|
||||
@@ -428,8 +426,8 @@ void Application::startNewLedger()
|
||||
RippleAddress rootAddress = RippleAddress::createAccountPublic(rootGeneratorMaster, 0);
|
||||
|
||||
// Print enough information to be able to claim root account.
|
||||
cLog(lsINFO) << "Root master seed: " << rootSeedMaster.humanSeed();
|
||||
cLog(lsINFO) << "Root account: " << rootAddress.humanAccountID();
|
||||
WriteLog (lsINFO, Application) << "Root master seed: " << rootSeedMaster.humanSeed();
|
||||
WriteLog (lsINFO, Application) << "Root account: " << rootAddress.humanAccountID();
|
||||
|
||||
{
|
||||
Ledger::pointer firstLedger = boost::make_shared<Ledger>(rootAddress, SYSTEM_CURRENCY_START);
|
||||
@@ -466,29 +464,29 @@ bool Application::loadOldLedger(const std::string& l)
|
||||
|
||||
if (!loadLedger)
|
||||
{
|
||||
cLog(lsFATAL) << "No Ledger found?" << std::endl;
|
||||
WriteLog (lsFATAL, Application) << "No Ledger found?" << std::endl;
|
||||
return false;
|
||||
}
|
||||
loadLedger->setClosed();
|
||||
|
||||
cLog(lsINFO) << "Loading ledger " << loadLedger->getHash() << " seq:" << loadLedger->getLedgerSeq();
|
||||
WriteLog (lsINFO, Application) << "Loading ledger " << loadLedger->getHash() << " seq:" << loadLedger->getLedgerSeq();
|
||||
|
||||
if (loadLedger->getAccountHash().isZero())
|
||||
{
|
||||
cLog(lsFATAL) << "Ledger is empty.";
|
||||
WriteLog (lsFATAL, Application) << "Ledger is empty.";
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!loadLedger->walkLedger())
|
||||
{
|
||||
cLog(lsFATAL) << "Ledger is missing nodes.";
|
||||
WriteLog (lsFATAL, Application) << "Ledger is missing nodes.";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!loadLedger->assertSane())
|
||||
{
|
||||
cLog(lsFATAL) << "Ledger is not sane.";
|
||||
WriteLog (lsFATAL, Application) << "Ledger is not sane.";
|
||||
return false;
|
||||
}
|
||||
mLedgerMaster.setLedgerRangePresent(loadLedger->getLedgerSeq(), loadLedger->getLedgerSeq());
|
||||
@@ -499,12 +497,12 @@ bool Application::loadOldLedger(const std::string& l)
|
||||
}
|
||||
catch (SHAMapMissingNode&)
|
||||
{
|
||||
cLog(lsFATAL) << "Data is missing for selected ledger";
|
||||
WriteLog (lsFATAL, Application) << "Data is missing for selected ledger";
|
||||
return false;
|
||||
}
|
||||
catch (boost::bad_lexical_cast&)
|
||||
{
|
||||
cLog(lsFATAL) << "Ledger specified '" << l << "' is not valid";
|
||||
WriteLog (lsFATAL, Application) << "Ledger specified '" << l << "' is not valid";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
#include "Log.h"
|
||||
SETUP_LOG();
|
||||
|
||||
// Logic to handle incoming HTTP reqests
|
||||
|
||||
@@ -42,7 +41,7 @@ HTTPRequestAction HTTPRequest::consume(boost::asio::streambuf& buf)
|
||||
std::getline(is, line);
|
||||
boost::trim(line);
|
||||
|
||||
// cLog(lsTRACE) << "HTTPRequest line: " << line;
|
||||
// WriteLog (lsTRACE, HTTPRequest) << "HTTPRequest line: " << line;
|
||||
|
||||
if (eState == await_request)
|
||||
{ // VERB URL PROTO
|
||||
|
||||
@@ -8,8 +8,6 @@
|
||||
#include "Config.h"
|
||||
#include "Application.h"
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
JobQueue::JobQueue(boost::asio::io_service& svc)
|
||||
: mLastJob(0), mThreadCount(0), mShuttingDown(false), mIOThreadCount(0), mMaxIOThreadCount(1), mIOService(svc)
|
||||
{
|
||||
@@ -222,7 +220,7 @@ int JobQueue::isOverloaded()
|
||||
|
||||
void JobQueue::shutdown()
|
||||
{ // shut down the job queue without completing pending jobs
|
||||
cLog(lsINFO) << "Job queue shutting down";
|
||||
WriteLog (lsINFO, JobQueue) << "Job queue shutting down";
|
||||
boost::mutex::scoped_lock sl(mJobLock);
|
||||
mShuttingDown = true;
|
||||
mJobCond.notify_all();
|
||||
@@ -242,7 +240,7 @@ void JobQueue::setThreadCount(int c)
|
||||
if (c > 4) // I/O will bottleneck
|
||||
c = 4;
|
||||
c += 2;
|
||||
cLog(lsINFO) << "Auto-tuning to " << c << " validation/transaction/proposal threads";
|
||||
WriteLog (lsINFO, JobQueue) << "Auto-tuning to " << c << " validation/transaction/proposal threads";
|
||||
}
|
||||
|
||||
boost::mutex::scoped_lock sl(mJobLock);
|
||||
@@ -281,7 +279,7 @@ void JobQueue::IOThread(boost::mutex::scoped_lock& sl)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
cLog(lsWARNING) << "Exception in IOThread";
|
||||
WriteLog (lsWARNING, JobQueue) << "Exception in IOThread";
|
||||
}
|
||||
NameThread("waiting");
|
||||
sl.lock();
|
||||
@@ -327,7 +325,7 @@ void JobQueue::threadEntry()
|
||||
++(mJobCounts[type].second);
|
||||
sl.unlock();
|
||||
NameThread(Job::toString(type));
|
||||
cLog(lsTRACE) << "Doing " << Job::toString(type) << " job";
|
||||
WriteLog (lsTRACE, JobQueue) << "Doing " << Job::toString(type) << " job";
|
||||
job.doJob();
|
||||
} // must destroy job without holding lock
|
||||
sl.lock();
|
||||
|
||||
@@ -8,8 +8,6 @@
|
||||
#include "Config.h"
|
||||
#include "Application.h"
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
static volatile int* uptimePtr = NULL;
|
||||
|
||||
int upTime()
|
||||
@@ -281,7 +279,7 @@ bool LoadFeeTrack::raiseLocalFee()
|
||||
|
||||
if (origFee == mLocalTxnLoadFee)
|
||||
return false;
|
||||
cLog(lsDEBUG) << "Local load fee raised from " << origFee << " to " << mLocalTxnLoadFee;
|
||||
WriteLog (lsDEBUG, LoadManager) << "Local load fee raised from " << origFee << " to " << mLocalTxnLoadFee;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -304,7 +302,7 @@ bool LoadFeeTrack::lowerLocalFee()
|
||||
|
||||
if (origFee == mLocalTxnLoadFee)
|
||||
return false;
|
||||
cLog(lsDEBUG) << "Local load fee lowered from " << origFee << " to " << mLocalTxnLoadFee;
|
||||
WriteLog (lsDEBUG, LoadManager) << "Local load fee lowered from " << origFee << " to " << mLocalTxnLoadFee;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -334,7 +332,7 @@ int LoadManager::getUptime()
|
||||
|
||||
static void LogDeadLock(int dlTime)
|
||||
{
|
||||
cLog(lsWARNING) << "Server stalled for " << dlTime << " seconds.";
|
||||
WriteLog (lsWARNING, LoadManager) << "Server stalled for " << dlTime << " seconds.";
|
||||
}
|
||||
|
||||
void LoadManager::threadEntry()
|
||||
@@ -368,7 +366,7 @@ void LoadManager::threadEntry()
|
||||
bool change;
|
||||
if (theApp->getJobQueue().isOverloaded())
|
||||
{
|
||||
cLog(lsINFO) << theApp->getJobQueue().getJson(0);
|
||||
WriteLog (lsINFO, LoadManager) << theApp->getJobQueue().getJson(0);
|
||||
change = theApp->getFeeTrack().raiseLocalFee();
|
||||
}
|
||||
else
|
||||
@@ -381,7 +379,7 @@ void LoadManager::threadEntry()
|
||||
|
||||
if ((when.is_negative()) || (when.total_seconds() > 1))
|
||||
{
|
||||
cLog(lsWARNING) << "time jump";
|
||||
WriteLog (lsWARNING, LoadManager) << "time jump";
|
||||
t = boost::posix_time::microsec_clock::universal_time();
|
||||
}
|
||||
else
|
||||
@@ -392,24 +390,24 @@ void LoadManager::threadEntry()
|
||||
void LoadManager::logWarning(const std::string& source) const
|
||||
{
|
||||
if (source.empty())
|
||||
cLog(lsDEBUG) << "Load warning from empty source";
|
||||
WriteLog (lsDEBUG, LoadManager) << "Load warning from empty source";
|
||||
else
|
||||
cLog(lsINFO) << "Load warning: " << source;
|
||||
WriteLog (lsINFO, LoadManager) << "Load warning: " << source;
|
||||
}
|
||||
|
||||
void LoadManager::logDisconnect(const std::string& source) const
|
||||
{
|
||||
if (source.empty())
|
||||
cLog(lsINFO) << "Disconnect for empty source";
|
||||
WriteLog (lsINFO, LoadManager) << "Disconnect for empty source";
|
||||
else
|
||||
cLog(lsWARNING) << "Disconnect for: " << source;
|
||||
WriteLog (lsWARNING, LoadManager) << "Disconnect for: " << source;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE(LoadManager_test)
|
||||
|
||||
BOOST_AUTO_TEST_CASE(LoadFeeTrack_test)
|
||||
{
|
||||
cLog(lsDEBUG) << "Running load fee track test";
|
||||
WriteLog (lsDEBUG, LoadManager) << "Running load fee track test";
|
||||
|
||||
Config d; // get a default configuration object
|
||||
LoadFeeTrack l;
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#include "LoadMonitor.h"
|
||||
#include "Log.h"
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
void LoadMonitor::update()
|
||||
{ // call with the mutex
|
||||
int now = upTime();
|
||||
@@ -59,7 +57,7 @@ void LoadMonitor::addCountAndLatency(const std::string& name, int latency)
|
||||
{
|
||||
if (latency > 500)
|
||||
{
|
||||
cLog((latency > 1000) ? lsWARNING : lsINFO) << "Job: " << name << " ExecutionTime: " << latency;
|
||||
WriteLog ((latency > 1000) ? lsWARNING : lsINFO, LoadMonitor) << "Job: " << name << " ExecutionTime: " << latency;
|
||||
}
|
||||
if (latency == 1)
|
||||
latency = 0;
|
||||
|
||||
@@ -7,8 +7,6 @@
|
||||
#include "LedgerTiming.h"
|
||||
#include "Log.h"
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
typedef std::map<uint160, SerializedValidation::pointer>::value_type u160_val_pair;
|
||||
typedef boost::shared_ptr<ValidationSet> VSpointer;
|
||||
|
||||
@@ -47,12 +45,12 @@ bool ValidationCollection::addValidation(SerializedValidation::ref val, const st
|
||||
isCurrent = true;
|
||||
else
|
||||
{
|
||||
cLog(lsWARNING) << "Received stale validation now=" << now << ", close=" << valClose;
|
||||
WriteLog (lsWARNING, ValidationCollection) << "Received stale validation now=" << now << ", close=" << valClose;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsDEBUG) << "Node " << signer.humanNodePublic() << " not in UNL st=" << val->getSignTime() <<
|
||||
WriteLog (lsDEBUG, ValidationCollection) << "Node " << signer.humanNodePublic() << " not in UNL st=" << val->getSignTime() <<
|
||||
", hash=" << val->getLedgerHash() << ", shash=" << val->getSigningHash() << " src=" << source;
|
||||
}
|
||||
|
||||
@@ -82,7 +80,7 @@ bool ValidationCollection::addValidation(SerializedValidation::ref val, const st
|
||||
}
|
||||
}
|
||||
|
||||
cLog(lsDEBUG) << "Val for " << hash << " from " << signer.humanNodePublic()
|
||||
WriteLog (lsDEBUG, ValidationCollection) << "Val for " << hash << " from " << signer.humanNodePublic()
|
||||
<< " added " << (val->isTrusted() ? "trusted/" : "UNtrusted/") << (isCurrent ? "current" : "stale");
|
||||
if (val->isTrusted())
|
||||
theApp->getLedgerMaster().checkAccept(hash);
|
||||
@@ -120,7 +118,7 @@ void ValidationCollection::getValidationCount(const uint256& ledger, bool curren
|
||||
isTrusted = false;
|
||||
else
|
||||
{
|
||||
cLog(lsTRACE) << "VC: Untrusted due to time " << ledger;
|
||||
WriteLog (lsTRACE, ValidationCollection) << "VC: Untrusted due to time " << ledger;
|
||||
}
|
||||
}
|
||||
if (isTrusted)
|
||||
@@ -129,7 +127,7 @@ void ValidationCollection::getValidationCount(const uint256& ledger, bool curren
|
||||
++untrusted;
|
||||
}
|
||||
}
|
||||
cLog(lsTRACE) << "VC: " << ledger << "t:" << trusted << " u:" << untrusted;
|
||||
WriteLog (lsTRACE, ValidationCollection) << "VC: " << ledger << "t:" << trusted << " u:" << untrusted;
|
||||
}
|
||||
|
||||
void ValidationCollection::getValidationTypes(const uint256& ledger, int& full, int& partial)
|
||||
@@ -150,7 +148,7 @@ void ValidationCollection::getValidationTypes(const uint256& ledger, int& full,
|
||||
}
|
||||
}
|
||||
}
|
||||
cLog(lsTRACE) << "VC: " << ledger << "f:" << full << " p:" << partial;
|
||||
WriteLog (lsTRACE, ValidationCollection) << "VC: " << ledger << "f:" << full << " p:" << partial;
|
||||
}
|
||||
|
||||
|
||||
@@ -262,7 +260,7 @@ ValidationCollection::getCurrentValidations(uint256 currentLedger, uint256 prior
|
||||
(valPriorLedger && (it->second->getLedgerHash() == priorLedger))))
|
||||
{
|
||||
countPreferred = true;
|
||||
cLog(lsDEBUG) << "Counting for " << currentLedger << " not " << it->second->getLedgerHash();
|
||||
WriteLog (lsDEBUG, ValidationCollection) << "Counting for " << currentLedger << " not " << it->second->getLedgerHash();
|
||||
}
|
||||
|
||||
currentValidationCount& p = countPreferred ? ret[currentLedger] : ret[it->second->getLedgerHash()];
|
||||
@@ -281,7 +279,7 @@ void ValidationCollection::flush()
|
||||
{
|
||||
bool anyNew = false;
|
||||
|
||||
cLog(lsINFO) << "Flushing validations";
|
||||
WriteLog (lsINFO, ValidationCollection) << "Flushing validations";
|
||||
boost::mutex::scoped_lock sl(mValidationLock);
|
||||
BOOST_FOREACH(u160_val_pair& it, mCurrentValidations)
|
||||
{
|
||||
@@ -298,7 +296,7 @@ void ValidationCollection::flush()
|
||||
boost::this_thread::sleep(boost::posix_time::milliseconds(100));
|
||||
sl.lock();
|
||||
}
|
||||
cLog(lsDEBUG) << "Validations flushed";
|
||||
WriteLog (lsDEBUG, ValidationCollection) << "Validations flushed";
|
||||
}
|
||||
|
||||
void ValidationCollection::condWrite()
|
||||
|
||||
@@ -16,6 +16,7 @@ typedef boost::unordered_map<uint160, SerializedValidation::pointer> ValidationS
|
||||
|
||||
typedef std::pair<int, uint160> currentValidationCount; // nodes validating and highest node ID validating
|
||||
|
||||
// VFALCO: TODO, Rename this to "Validations"
|
||||
class ValidationCollection
|
||||
{
|
||||
protected:
|
||||
|
||||
Reference in New Issue
Block a user