mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-20 10:35:50 +00:00
Remove dead code, fix import.
This commit is contained in:
@@ -13,37 +13,11 @@ NodeStore::NodeStore (String backendParameters, String fastBackendParameters, in
|
||||
, mWriteGeneration (0)
|
||||
, mWriteLoad (0)
|
||||
, mWritePending (false)
|
||||
, mLevelDB (false)
|
||||
, mEphemeralDB (false)
|
||||
{
|
||||
if (fastBackendParameters.isNotEmpty ())
|
||||
m_fastBackend = createBackend (fastBackendParameters);
|
||||
|
||||
mWriteSet.reserve (128);
|
||||
|
||||
// VFALCO TODO Eliminate usage of theConfig
|
||||
// This can be done by passing required parameters through
|
||||
// the backendParameters string.
|
||||
//
|
||||
if (theConfig.NODE_DB == "leveldb" || theConfig.NODE_DB == "LevelDB")
|
||||
{
|
||||
mLevelDB = true;
|
||||
}
|
||||
else if (theConfig.NODE_DB == "SQLite" || theConfig.NODE_DB == "sqlite")
|
||||
{
|
||||
mLevelDB = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsFATAL, NodeObject) << "Incorrect database selection";
|
||||
assert (false);
|
||||
}
|
||||
|
||||
if (!theConfig.LDB_EPHEMERAL.empty ())
|
||||
{
|
||||
// VFALCO NOTE This is cryptic
|
||||
mEphemeralDB = true;
|
||||
}
|
||||
}
|
||||
|
||||
void NodeStore::addBackendFactory (BackendFactory& factory)
|
||||
@@ -51,12 +25,6 @@ void NodeStore::addBackendFactory (BackendFactory& factory)
|
||||
s_factories.add (&factory);
|
||||
}
|
||||
|
||||
// DEPRECATED
|
||||
bool NodeStore::isLevelDB ()
|
||||
{
|
||||
return mLevelDB;
|
||||
}
|
||||
|
||||
float NodeStore::getCacheHitRate ()
|
||||
{
|
||||
return mCache.getHitRate ();
|
||||
@@ -121,7 +89,6 @@ bool NodeStore::store (NodeObjectType type, uint32 index,
|
||||
|
||||
void NodeStore::bulkWrite (Job&)
|
||||
{
|
||||
assert (mLevelDB);
|
||||
int setSize = 0;
|
||||
|
||||
while (1)
|
||||
|
||||
@@ -83,8 +83,6 @@ public:
|
||||
*/
|
||||
static void addBackendFactory (BackendFactory& factory);
|
||||
|
||||
bool isLevelDB ();
|
||||
|
||||
float getCacheHitRate ();
|
||||
|
||||
bool store (NodeObjectType type, uint32 index, Blob const& data,
|
||||
@@ -121,8 +119,6 @@ private:
|
||||
|
||||
std::vector< boost::shared_ptr<NodeObject> > mWriteSet;
|
||||
bool mWritePending;
|
||||
bool mLevelDB;
|
||||
bool mEphemeralDB;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -85,8 +85,8 @@ public:
|
||||
boost::filesystem::path DEBUG_LOGFILE;
|
||||
boost::filesystem::path VALIDATORS_FILE; // As specifed in rippled.cfg.
|
||||
std::string NODE_DB; // Database to use for nodes
|
||||
std::string LDB_EPHEMERAL; // Database for temporary storage
|
||||
bool LDB_IMPORT; // Import into LevelDB
|
||||
std::string FASTNODE_DB; // Database for temporary storage
|
||||
std::string DB_IMPORT; // Import from old DB
|
||||
bool ELB_SUPPORT; // Support Amazon ELB
|
||||
|
||||
std::string VALIDATORS_SITE; // Where to find validators.txt on the Internet.
|
||||
|
||||
@@ -2401,14 +2401,6 @@ Json::Value RPCHandler::doGetCounts (Json::Value params, LoadType* loadType, Sco
|
||||
if (dbKB > 0)
|
||||
ret["dbKBLedger"] = dbKB;
|
||||
|
||||
if (!getApp().getNodeStore ().isLevelDB ())
|
||||
{
|
||||
dbKB = getApp().getHashNodeDB ()->getDB ()->getKBUsedDB ();
|
||||
|
||||
if (dbKB > 0)
|
||||
ret["dbKBHashNode"] = dbKB;
|
||||
}
|
||||
|
||||
dbKB = getApp().getTxnDB ()->getDB ()->getKBUsedDB ();
|
||||
|
||||
if (dbKB > 0)
|
||||
|
||||
@@ -256,7 +256,7 @@ public:
|
||||
void sweep ();
|
||||
|
||||
private:
|
||||
void updateTables (bool);
|
||||
void updateTables ();
|
||||
void startNewLedger ();
|
||||
bool loadOldLedger (const std::string&);
|
||||
|
||||
@@ -445,53 +445,6 @@ void Application::setup ()
|
||||
options.create_if_missing = true;
|
||||
options.block_cache = leveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
|
||||
if (theConfig.NODE_SIZE >= 2)
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (10);
|
||||
|
||||
if (theConfig.LDB_IMPORT)
|
||||
options.write_buffer_size = 32 << 20;
|
||||
|
||||
if (m_nodeStore.isLevelDB ())
|
||||
{
|
||||
WriteLog (lsINFO, Application) << "LevelDB used for nodes";
|
||||
leveldb::Status status = leveldb::DB::Open (options, (theConfig.DATA_DIR / "hashnode").string (), &mHashNodeLDB);
|
||||
|
||||
if (!status.ok () || !mHashNodeLDB)
|
||||
{
|
||||
WriteLog (lsFATAL, Application) << "Unable to open/create hash node db: "
|
||||
<< (theConfig.DATA_DIR / "hashnode").string ()
|
||||
<< " " << status.ToString ();
|
||||
StopSustain ();
|
||||
exit (3);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsINFO, Application) << "SQLite used for nodes";
|
||||
boost::thread t5 (BIND_TYPE (&InitDB, &mHashNodeDB, "hashnode.db", HashNodeDBInit, HashNodeDBCount));
|
||||
t5.join ();
|
||||
}
|
||||
|
||||
if (!theConfig.LDB_EPHEMERAL.empty ())
|
||||
{
|
||||
leveldb::Status status = leveldb::DB::Open (options, theConfig.LDB_EPHEMERAL, &mEphemeralLDB);
|
||||
|
||||
if (!status.ok () || !mEphemeralLDB)
|
||||
{
|
||||
WriteLog (lsFATAL, Application) << "Unable to open/create epehemeral db: "
|
||||
<< theConfig.LDB_EPHEMERAL << " " << status.ToString ();
|
||||
StopSustain ();
|
||||
exit (3);
|
||||
}
|
||||
}
|
||||
|
||||
if (!m_nodeStore.isLevelDB ())
|
||||
{
|
||||
getApp().getHashNodeDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize (siHashNodeDBCache) * 1024)));
|
||||
getApp().getHashNodeDB ()->getDB ()->setupCheckpointing (&mJobQueue);
|
||||
}
|
||||
|
||||
getApp().getLedgerDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize (siLgrDBCache) * 1024)));
|
||||
getApp().getTxnDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
@@ -501,7 +454,7 @@ void Application::setup ()
|
||||
mLedgerDB->getDB ()->setupCheckpointing (&mJobQueue);
|
||||
|
||||
if (!theConfig.RUN_STANDALONE)
|
||||
updateTables (theConfig.LDB_IMPORT);
|
||||
updateTables ();
|
||||
|
||||
mFeatures->addInitialFeatures ();
|
||||
|
||||
@@ -980,7 +933,7 @@ static void addTxnSeqField ()
|
||||
db->executeSQL ("END TRANSACTION;");
|
||||
}
|
||||
|
||||
void Application::updateTables (bool ldbImport)
|
||||
void Application::updateTables ()
|
||||
{
|
||||
// perform any needed table updates
|
||||
assert (schemaHas (getApp().getTxnDB (), "AccountTransactions", 0, "TransID"));
|
||||
@@ -994,27 +947,8 @@ void Application::updateTables (bool ldbImport)
|
||||
exit (1);
|
||||
}
|
||||
|
||||
if (getApp().getNodeStore ().isLevelDB ())
|
||||
{
|
||||
boost::filesystem::path hashPath = theConfig.DATA_DIR / "hashnode.db";
|
||||
|
||||
if (boost::filesystem::exists (hashPath))
|
||||
{
|
||||
if (theConfig.LDB_IMPORT)
|
||||
{
|
||||
Log (lsWARNING) << "Importing SQLite -> LevelDB";
|
||||
getApp().getNodeStore ().import (hashPath.string ());
|
||||
Log (lsWARNING) << "Remove or remname the hashnode.db file";
|
||||
}
|
||||
else
|
||||
{
|
||||
Log (lsWARNING) << "SQLite hashnode database exists. Please either remove or import";
|
||||
Log (lsWARNING) << "To import, start with the '--import' option. Otherwise, remove hashnode.db";
|
||||
StopSustain ();
|
||||
exit (1);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!theConfig.DB_IMPORT.empty())
|
||||
getApp().getNodeStore().import(theConfig.DB_IMPORT);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -174,7 +174,7 @@ int rippleMain (int argc, char** argv)
|
||||
("start", "Start from a fresh Ledger.")
|
||||
("net", "Get the initial ledger from the network.")
|
||||
("fg", "Run in the foreground.")
|
||||
("import", "Import SQLite node DB into LevelDB.")
|
||||
("import", po::value<std::string> (), "Import old DB into new DB.")
|
||||
;
|
||||
|
||||
// Interpret positional arguments as --parameters.
|
||||
@@ -271,7 +271,8 @@ int rippleMain (int argc, char** argv)
|
||||
|
||||
if (vm.count ("start")) theConfig.START_UP = Config::FRESH;
|
||||
|
||||
if (vm.count ("import")) theConfig.LDB_IMPORT = true;
|
||||
if (vm.count ("import"))
|
||||
theConfig.DB_IMPORT = vm["import"].as<std::string> ();
|
||||
|
||||
if (vm.count ("ledger"))
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user