Improve Journal logging framework:

* Allow partition log levels to be adjusted
* Cleanups
This commit is contained in:
Howard Hinnant
2014-06-12 20:49:37 -07:00
committed by Nik Bougalis
parent 488a44b88e
commit 23dc08c925
107 changed files with 751 additions and 1553 deletions

View File

@@ -17,10 +17,9 @@
*/
//==============================================================================
#include <fstream>
#include <beast/asio/io_latency_probe.h>
#include <beast/module/core/thread/DeadlineTimer.h>
#include <ripple/basics/log/Log.h>
#include <ripple/basics/utility/Sustain.h>
#include <ripple/common/seconds_clock.h>
#include <ripple/module/app/main/Tuning.h>
@@ -30,8 +29,6 @@
#include <ripple/nodestore/DummyScheduler.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/overlay/make_Overlay.h>
#include <beast/asio/io_latency_probe.h>
#include <beast/module/core/thread/DeadlineTimer.h>
#include <fstream>
namespace ripple {
@@ -41,46 +38,6 @@ static bool volatile doShutdown = false;
static int const MAJORITY_FRACTION (204);
//------------------------------------------------------------------------------
//
// Specializations for LogPartition names
// VFALCO NOTE This is temporary, until I refactor LogPartition
// and LogPartition::getJournal() to take a string
//
class ApplicationLog;
template <> char const* LogPartition::getPartitionName <ApplicationLog> () { return "Application"; }
class SiteFilesLog;
template <> char const* LogPartition::getPartitionName <SiteFilesLog> () { return "SiteFiles"; }
class ValidatorsLog;
template <> char const* LogPartition::getPartitionName <ValidatorsLog> () { return "Validators"; }
class JobQueueLog;
template <> char const* LogPartition::getPartitionName <JobQueueLog> () { return "JobQueue"; }
class NetworkOPsLog;
template <> char const* LogPartition::getPartitionName <NetworkOPsLog> () { return "NetworkOPs"; }
class RPCServiceManagerLog;
template <> char const* LogPartition::getPartitionName <RPCServiceManagerLog> () { return "RPCServiceManager"; }
class HTTPServerLog;
template <> char const* LogPartition::getPartitionName <HTTPServerLog> () { return "RPCServer"; }
class LoadManagerLog;
template <> char const* LogPartition::getPartitionName <LoadManagerLog> () { return "LoadManager"; }
class ResourceManagerLog;
template <> char const* LogPartition::getPartitionName <ResourceManagerLog> () { return "ResourceManager"; }
class PathRequestLog;
template <> char const* LogPartition::getPartitionName <PathRequestLog> () { return "PathRequest"; }
class RPCManagerLog;
template <> char const* LogPartition::getPartitionName <RPCManagerLog> () { return "RPCManager"; }
class AmendmentTableLog;
template <> char const* LogPartition::getPartitionName <AmendmentTableLog>() { return "AmendmentTable"; }
template <> char const* LogPartition::getPartitionName <CollectorManager> () { return "Collector"; }
struct TaggedCacheLog;
template <> char const* LogPartition::getPartitionName <TaggedCacheLog> () { return "TaggedCache"; }
//
//------------------------------------------------------------------------------
// This hack lets the s_instance variable remain set during
// the call to ~Application
class ApplicationImpBase : public Application
@@ -182,6 +139,7 @@ private:
};
public:
Logs& m_logs;
beast::Journal m_journal;
Application::LockType m_masterMutex;
@@ -260,26 +218,26 @@ public:
//--------------------------------------------------------------------------
ApplicationImp ()
ApplicationImp (Logs& logs)
: RootStoppable ("Application")
, m_journal (LogPartition::getJournal <ApplicationLog> ())
, m_logs (logs)
, m_journal (m_logs.journal("Application"))
, m_nodeStoreManager (NodeStore::make_Manager (
std::move (make_Factories ())))
, m_tempNodeCache ("NodeCache", 16384, 90, get_seconds_clock (),
LogPartition::getJournal <TaggedCacheLog> ())
m_logs.journal("TaggedCache"))
, m_sleCache ("LedgerEntryCache", 4096, 120, get_seconds_clock (),
LogPartition::getJournal <TaggedCacheLog> ())
m_logs.journal("TaggedCache"))
, m_collectorManager (CollectorManager::New (
getConfig().insightSettings,
LogPartition::getJournal <CollectorManager> ()))
getConfig().insightSettings, m_logs.journal("Collector")))
, m_resourceManager (Resource::make_Manager (
m_collectorManager->collector(),
LogPartition::getJournal <ResourceManagerLog> ()))
m_collectorManager->collector(), m_logs.journal("Resource")))
, m_fullBelowCache (std::make_unique <FullBelowCache> (
"full_below", get_seconds_clock (), m_collectorManager->collector (),
@@ -291,7 +249,7 @@ public:
// almost everything is a Stoppable child of the JobQueue.
//
, m_jobQueue (make_JobQueue (m_collectorManager->group ("jobq"),
m_nodeStoreScheduler, LogPartition::getJournal <JobQueueLog> ()))
m_nodeStoreScheduler, m_logs.journal("JobQueue")))
// The io_service must be a child of the JobQueue since we call addJob
// in response to newtwork data from peers and also client requests.
@@ -303,17 +261,17 @@ public:
//
, m_siteFiles (SiteFiles::Manager::New (
*this, LogPartition::getJournal <SiteFilesLog> ()))
*this, m_logs.journal("SiteFiles")))
, m_rpcManager (RPC::make_Manager (LogPartition::getJournal <RPCManagerLog> ()))
, m_rpcManager (RPC::make_Manager (m_logs.journal("RPCManager")))
, m_orderBookDB (*m_jobQueue)
, m_pathRequests ( new PathRequests (
LogPartition::getJournal <PathRequestLog> (), m_collectorManager->collector ()))
, m_pathRequests (new PathRequests (
m_logs.journal("PathRequest"), m_collectorManager->collector ()))
, m_ledgerMaster (LedgerMaster::New (
*m_jobQueue, LogPartition::getJournal <LedgerMaster> ()))
*m_jobQueue, m_logs.journal("LedgerMaster")))
// VFALCO NOTE must come before NetworkOPs to prevent a crash due
// to dependencies in the destructor.
@@ -323,18 +281,18 @@ public:
// VFALCO NOTE Does NetworkOPs depend on LedgerMaster?
, m_networkOPs (NetworkOPs::New (get_seconds_clock (), *m_ledgerMaster,
*m_jobQueue, LogPartition::getJournal <NetworkOPsLog> ()))
*m_jobQueue, m_logs.journal("NetworkOPs")))
// VFALCO NOTE LocalCredentials starts the deprecated UNL service
, m_deprecatedUNL (UniqueNodeList::New (*m_jobQueue))
, m_rpcHTTPServer (RPCHTTPServer::New (*m_networkOPs,
LogPartition::getJournal <HTTPServerLog> (), *m_jobQueue, *m_networkOPs, *m_resourceManager))
m_logs.journal("HTTPServer"), *m_jobQueue, *m_networkOPs, *m_resourceManager))
, m_rpcServerHandler (*m_networkOPs, *m_resourceManager) // passive object, not a Service
, m_nodeStore (m_nodeStoreManager->make_Database ("NodeStore.main", m_nodeStoreScheduler,
LogPartition::getJournal <NodeObject> (), 4, // four read threads for now
m_logs.journal("NodeObject"), 4, // four read threads for now
getConfig ().nodeDatabase, getConfig ().ephemeralNodeDatabase))
, m_sntpClient (SNTPClient::New (*this))
@@ -344,12 +302,12 @@ public:
, m_validators (add (Validators::Manager::New (
*this,
getConfig ().getModuleDatabasePath (),
LogPartition::getJournal <ValidatorsLog> ())))
m_logs.journal("Validators"))))
, m_amendmentTable (make_AmendmentTable (weeks(2), MAJORITY_FRACTION, // 204/256 about 80%
LogPartition::getJournal <AmendmentTableLog> ()))
m_logs.journal("AmendmentTable")))
, mFeeTrack (LoadFeeTrack::New (LogPartition::getJournal <LoadManagerLog> ()))
, mFeeTrack (LoadFeeTrack::New (m_logs.journal("LoadManager")))
, mHashRouter (IHashRouter::New (IHashRouter::getDefaultHoldTime ()))
@@ -357,7 +315,7 @@ public:
, mProofOfWorkFactory (ProofOfWorkFactory::New ())
, m_loadManager (LoadManager::New (*this, LogPartition::getJournal <LoadManagerLog> ()))
, m_loadManager (LoadManager::New (*this, m_logs.journal("LoadManager")))
, m_sweepTimer (this)
@@ -366,8 +324,7 @@ public:
, m_resolver (ResolverAsio::New (m_mainIoPool.getService (), beast::Journal ()))
, m_io_latency_sampler (m_collectorManager->collector()->make_event ("ios_latency"),
LogPartition::getJournal <ApplicationLog> (),
std::chrono::milliseconds (100), m_mainIoPool.getService())
m_logs.journal("Application"), std::chrono::milliseconds (100), m_mainIoPool.getService())
{
add (m_resourceManager.get ());
@@ -641,15 +598,10 @@ public:
if (!getConfig ().DEBUG_LOGFILE.empty ())
{
// Let debug messages go to the file but only WARNING or higher to regular output (unless verbose)
LogSink::get()->setLogFile (getConfig ().DEBUG_LOGFILE);
m_logs.open(getConfig ().DEBUG_LOGFILE);
if (LogSink::get()->getMinSeverity () > lsDEBUG)
LogPartition::setSeverity (lsDEBUG);
}
if (!getConfig().CONSOLE_LOG_OUTPUT.empty())
{
LogPartition::setConsoleOutput (getConfig().CONSOLE_LOG_OUTPUT);
if (m_logs.severity() > beast::Journal::kDebug)
m_logs.severity (beast::Journal::kDebug);
}
if (!getConfig ().RUN_STANDALONE)
@@ -1436,7 +1388,7 @@ static bool schemaHas (DatabaseCon* dbc, const std::string& dbName, int line, co
if (static_cast<int> (schema.size ()) <= line)
{
Log (lsFATAL) << "Schema for " << dbName << " has too few lines";
WriteLog (lsFATAL, Application) << "Schema for " << dbName << " has too few lines";
throw std::runtime_error ("bad schema");
}
@@ -1448,14 +1400,14 @@ static void addTxnSeqField ()
if (schemaHas (getApp().getTxnDB (), "AccountTransactions", 0, "TxnSeq"))
return;
Log (lsWARNING) << "Transaction sequence field is missing";
WriteLog (lsWARNING, Application) << "Transaction sequence field is missing";
Database* db = getApp().getTxnDB ()->getDB ();
std::vector< std::pair<uint256, int> > txIDs;
txIDs.reserve (300000);
Log (lsINFO) << "Parsing transactions";
WriteLog (lsINFO, Application) << "Parsing transactions";
int i = 0;
uint256 transID;
SQL_FOREACH (db, "SELECT TransID,TxnMeta FROM Transactions;")
@@ -1479,7 +1431,7 @@ static void addTxnSeqField ()
if (rawMeta.size () == 0)
{
txIDs.push_back (std::make_pair (transID, -1));
Log (lsINFO) << "No metadata for " << transID;
WriteLog (lsINFO, Application) << "No metadata for " << transID;
}
else
{
@@ -1488,17 +1440,19 @@ static void addTxnSeqField ()
}
if ((++i % 1000) == 0)
Log (lsINFO) << i << " transactions read";
{
WriteLog (lsINFO, Application) << i << " transactions read";
}
}
Log (lsINFO) << "All " << i << " transactions read";
WriteLog (lsINFO, Application) << "All " << i << " transactions read";
db->executeSQL ("BEGIN TRANSACTION;");
Log (lsINFO) << "Dropping old index";
WriteLog (lsINFO, Application) << "Dropping old index";
db->executeSQL ("DROP INDEX AcctTxIndex;");
Log (lsINFO) << "Altering table";
WriteLog (lsINFO, Application) << "Altering table";
db->executeSQL ("ALTER TABLE AccountTransactions ADD COLUMN TxnSeq INTEGER;");
boost::format fmt ("UPDATE AccountTransactions SET TxnSeq = %d WHERE TransID = '%s';");
@@ -1508,10 +1462,12 @@ static void addTxnSeqField ()
db->executeSQL (boost::str (fmt % t.second % to_string (t.first)));
if ((++i % 1000) == 0)
Log (lsINFO) << i << " transactions updated";
{
WriteLog (lsINFO, Application) << i << " transactions updated";
}
}
Log (lsINFO) << "Building new index";
WriteLog (lsINFO, Application) << "Building new index";
db->executeSQL ("CREATE INDEX AcctTxIndex ON AccountTransactions(Account, LedgerSeq, TxnSeq, TransID);");
db->executeSQL ("END TRANSACTION;");
}
@@ -1520,7 +1476,7 @@ void ApplicationImp::updateTables ()
{
if (getConfig ().nodeDatabase.size () <= 0)
{
Log (lsFATAL) << "The [node_db] configuration setting has been updated and must be set";
WriteLog (lsFATAL, Application) << "The [node_db] configuration setting has been updated and must be set";
StopSustain ();
exit (1);
}
@@ -1532,7 +1488,7 @@ void ApplicationImp::updateTables ()
if (schemaHas (getApp().getTxnDB (), "AccountTransactions", 0, "PRIMARY"))
{
Log (lsFATAL) << "AccountTransactions database should not have a primary key";
WriteLog (lsFATAL, Application) << "AccountTransactions database should not have a primary key";
StopSustain ();
exit (1);
}
@@ -1542,7 +1498,7 @@ void ApplicationImp::updateTables ()
NodeStore::DummyScheduler scheduler;
std::unique_ptr <NodeStore::Database> source (
m_nodeStoreManager->make_Database ("NodeStore.import", scheduler,
LogPartition::getJournal <NodeObject> (), 0,
deprecatedLogs().journal("NodeObject"), 0,
getConfig ().importNodeDatabase));
WriteLog (lsWARNING, NodeObject) <<
@@ -1565,9 +1521,10 @@ Application::Application ()
{
}
std::unique_ptr <Application> make_Application ()
std::unique_ptr <Application>
make_Application (Logs& logs)
{
return std::make_unique <ApplicationImp> ();
return std::make_unique <ApplicationImp> (logs);
}
Application& getApp ()