mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-20 02:25:53 +00:00
Replaces StringPairArray with Section in Config.
This commit is contained in:
@@ -20,6 +20,7 @@
|
||||
#include <BeastConfig.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/app/data/Database.h>
|
||||
#include <cstring>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <BeastConfig.h>
|
||||
#include <ripple/app/data/DatabaseCon.h>
|
||||
#include <ripple/app/data/SqliteDatabase.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -54,8 +55,8 @@ setup_DatabaseCon (Config const& c)
|
||||
{
|
||||
DatabaseCon::Setup setup;
|
||||
|
||||
if (c.nodeDatabase["online_delete"].isNotEmpty())
|
||||
setup.onlineDelete = c.nodeDatabase["online_delete"].getIntValue();
|
||||
auto const& sec = c.section (ConfigSection::nodeDatabase ());
|
||||
get_if_exists (sec, "online_delete", setup.onlineDelete);
|
||||
setup.startUp = c.START_UP;
|
||||
setup.standAlone = c.RUN_STANDALONE;
|
||||
setup.dataDir = c.legacy ("database_path");
|
||||
|
||||
@@ -337,7 +337,7 @@ public:
|
||||
m_logs.journal("TaggedCache"))
|
||||
|
||||
, m_collectorManager (CollectorManager::New (
|
||||
getConfig().insightSettings, m_logs.journal("Collector")))
|
||||
getConfig().section (SECTION_INSIGHT), m_logs.journal("Collector")))
|
||||
|
||||
, family_ (*m_nodeStore, *m_collectorManager)
|
||||
|
||||
@@ -1457,7 +1457,7 @@ static void addTxnSeqField ()
|
||||
|
||||
void ApplicationImp::updateTables ()
|
||||
{
|
||||
if (getConfig ().nodeDatabase.size () <= 0)
|
||||
if (getConfig ().section (ConfigSection::nodeDatabase ()).empty ())
|
||||
{
|
||||
WriteLog (lsFATAL, Application) << "The [node_db] configuration setting has been updated and must be set";
|
||||
exitWithCode(1);
|
||||
@@ -1480,7 +1480,7 @@ void ApplicationImp::updateTables ()
|
||||
std::unique_ptr <NodeStore::Database> source =
|
||||
NodeStore::Manager::instance().make_Database ("NodeStore.import", scheduler,
|
||||
deprecatedLogs().journal("NodeObject"), 0,
|
||||
getConfig ().importNodeDatabase);
|
||||
getConfig ()[ConfigSection::importNodeDatabase ()]);
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) <<
|
||||
"Node import from '" << source->getName () << "' to '"
|
||||
|
||||
@@ -30,17 +30,17 @@ public:
|
||||
beast::insight::Collector::ptr m_collector;
|
||||
std::unique_ptr <beast::insight::Groups> m_groups;
|
||||
|
||||
CollectorManagerImp (beast::StringPairArray const& params,
|
||||
CollectorManagerImp (Section const& params,
|
||||
beast::Journal journal)
|
||||
: m_journal (journal)
|
||||
{
|
||||
std::string const& server (params ["server"].toStdString());
|
||||
std::string const& server = get<std::string> (params, "server");
|
||||
|
||||
if (server == "statsd")
|
||||
{
|
||||
beast::IP::Endpoint const address (beast::IP::Endpoint::from_string (
|
||||
params ["address"].toStdString ()));
|
||||
std::string const& prefix (params ["prefix"].toStdString ());
|
||||
get<std::string> (params, "address")));
|
||||
std::string const& prefix (get<std::string> (params, "prefix"));
|
||||
|
||||
m_collector = beast::insight::StatsDCollector::New (address, prefix, journal);
|
||||
}
|
||||
@@ -73,7 +73,7 @@ CollectorManager::~CollectorManager ()
|
||||
{
|
||||
}
|
||||
|
||||
CollectorManager* CollectorManager::New (beast::StringPairArray const& params,
|
||||
CollectorManager* CollectorManager::New (Section const& params,
|
||||
beast::Journal journal)
|
||||
{
|
||||
return new CollectorManagerImp (params, journal);
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#ifndef RIPPLE_APP_MAIN_COLLECTORMANAGER_H_INCLUDED
|
||||
#define RIPPLE_APP_MAIN_COLLECTORMANAGER_H_INCLUDED
|
||||
|
||||
#include <beast/module/core/text/StringPairArray.h>
|
||||
#include <ripple/basics/BasicConfig.h>
|
||||
#include <beast/Insight.h>
|
||||
|
||||
namespace ripple {
|
||||
@@ -29,7 +29,7 @@ namespace ripple {
|
||||
class CollectorManager
|
||||
{
|
||||
public:
|
||||
static CollectorManager* New (beast::StringPairArray const& params,
|
||||
static CollectorManager* New (Section const& params,
|
||||
beast::Journal journal);
|
||||
virtual ~CollectorManager () = 0;
|
||||
virtual beast::insight::Collector::ptr const& collector () = 0;
|
||||
|
||||
@@ -163,9 +163,11 @@ static
|
||||
void
|
||||
setupConfigForUnitTests (Config* config)
|
||||
{
|
||||
config->nodeDatabase = parseDelimitedKeyValueString ("type=memory|path=main");
|
||||
config->ephemeralNodeDatabase = beast::StringPairArray ();
|
||||
config->importNodeDatabase = beast::StringPairArray ();
|
||||
config->overwrite (ConfigSection::nodeDatabase (), "type", "memory");
|
||||
config->overwrite (ConfigSection::nodeDatabase (), "path", "main");
|
||||
|
||||
config->deprecatedClearSection (ConfigSection::tempNodeDatabase ());
|
||||
config->deprecatedClearSection (ConfigSection::importNodeDatabase ());
|
||||
}
|
||||
|
||||
static int runShutdownTests ()
|
||||
|
||||
@@ -43,8 +43,8 @@ public:
|
||||
std::uint32_t deleteInterval = 0;
|
||||
bool advisoryDelete = false;
|
||||
std::uint32_t ledgerHistory = 0;
|
||||
beast::StringPairArray nodeDatabase;
|
||||
beast::StringPairArray ephemeralNodeDatabase;
|
||||
Section nodeDatabase;
|
||||
Section ephemeralNodeDatabase;
|
||||
std::string databasePath;
|
||||
std::uint32_t deleteBatch = 100;
|
||||
std::uint32_t backOff = 100;
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <ripple/app/misc/SHAMapStoreImp.h>
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <boost/format.hpp>
|
||||
#include <beast/cxx14/memory.h> // <memory>
|
||||
|
||||
@@ -437,7 +438,7 @@ void
|
||||
SHAMapStoreImp::dbPaths()
|
||||
{
|
||||
boost::filesystem::path dbPath =
|
||||
setup_.nodeDatabase["path"].toStdString();
|
||||
get<std::string>(setup_.nodeDatabase, "path");
|
||||
|
||||
if (boost::filesystem::exists (dbPath))
|
||||
{
|
||||
@@ -488,7 +489,7 @@ SHAMapStoreImp::dbPaths()
|
||||
<< "remove the files matching "
|
||||
<< stateDbPathName.string()
|
||||
<< " and contents of the directory "
|
||||
<< setup_.nodeDatabase["path"].toStdString()
|
||||
<< get<std::string>(setup_.nodeDatabase, "path")
|
||||
<< std::endl;
|
||||
|
||||
throw std::runtime_error ("state db error");
|
||||
@@ -499,7 +500,7 @@ std::shared_ptr <NodeStore::Backend>
|
||||
SHAMapStoreImp::makeBackendRotating (std::string path)
|
||||
{
|
||||
boost::filesystem::path newPath;
|
||||
NodeStore::Parameters parameters = setup_.nodeDatabase;
|
||||
Section parameters = setup_.nodeDatabase;
|
||||
|
||||
if (path.size())
|
||||
{
|
||||
@@ -507,7 +508,7 @@ SHAMapStoreImp::makeBackendRotating (std::string path)
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::filesystem::path p = parameters["path"].toStdString();
|
||||
boost::filesystem::path p = get<std::string>(parameters, "path");
|
||||
p /= dbPrefix_;
|
||||
p += ".%%%%";
|
||||
newPath = boost::filesystem::unique_path (p);
|
||||
@@ -693,20 +694,20 @@ setup_SHAMapStore (Config const& c)
|
||||
{
|
||||
SHAMapStore::Setup setup;
|
||||
|
||||
if (c.nodeDatabase["online_delete"].isNotEmpty())
|
||||
setup.deleteInterval = c.nodeDatabase["online_delete"].getIntValue();
|
||||
if (c.nodeDatabase["advisory_delete"].isNotEmpty() && setup.deleteInterval)
|
||||
setup.advisoryDelete = c.nodeDatabase["advisory_delete"].getIntValue();
|
||||
auto const& sec = c.section (ConfigSection::nodeDatabase ());
|
||||
get_if_exists (sec, "online_delete", setup.deleteInterval);
|
||||
|
||||
if (setup.deleteInterval)
|
||||
get_if_exists (sec, "advisory_delete", setup.advisoryDelete);
|
||||
|
||||
setup.ledgerHistory = c.LEDGER_HISTORY;
|
||||
setup.nodeDatabase = c.nodeDatabase;
|
||||
setup.ephemeralNodeDatabase = c.ephemeralNodeDatabase;
|
||||
setup.nodeDatabase = c[ConfigSection::nodeDatabase ()];
|
||||
setup.ephemeralNodeDatabase = c[ConfigSection::tempNodeDatabase ()];
|
||||
setup.databasePath = c.legacy("database_path");
|
||||
if (c.nodeDatabase["delete_batch"].isNotEmpty())
|
||||
setup.deleteBatch = c.nodeDatabase["delete_batch"].getIntValue();
|
||||
if (c.nodeDatabase["backOff"].isNotEmpty())
|
||||
setup.backOff = c.nodeDatabase["backOff"].getIntValue();
|
||||
if (c.nodeDatabase["age_threshold"].isNotEmpty())
|
||||
setup.ageThreshold = c.nodeDatabase["age_threshold"].getIntValue();
|
||||
|
||||
get_if_exists (sec, "delete_batch", setup.deleteBatch);
|
||||
get_if_exists (sec, "backOff", setup.backOff);
|
||||
get_if_exists (sec, "age_threshold", setup.ageThreshold);
|
||||
|
||||
return setup;
|
||||
}
|
||||
|
||||
@@ -249,11 +249,11 @@ public:
|
||||
}
|
||||
|
||||
std::unique_ptr <NodeStore::Backend> createInstance (
|
||||
size_t, NodeStore::Parameters const& keyValues,
|
||||
size_t, Section const& keyValues,
|
||||
NodeStore::Scheduler&, beast::Journal)
|
||||
{
|
||||
return std::make_unique <SqliteBackend> (
|
||||
keyValues ["path"].toStdString (),
|
||||
get<std::string>(keyValues, "path"),
|
||||
getConfig ().getSize(siHashNodeDBCache) * 1024);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -182,6 +182,11 @@ public:
|
||||
overwrite (std::string const& section, std::string const& key,
|
||||
std::string const& value);
|
||||
|
||||
/** Remove all the key/value pairs from the section.
|
||||
*/
|
||||
void
|
||||
deprecatedClearSection (std::string const& section);
|
||||
|
||||
/**
|
||||
* Set a value that is not a key/value pair.
|
||||
*
|
||||
@@ -232,7 +237,7 @@ set (T& target, std::string const& name, Section const& section)
|
||||
target = boost::lexical_cast <T> (result.first);
|
||||
return true;
|
||||
}
|
||||
catch(...)
|
||||
catch (boost::bad_lexical_cast&)
|
||||
{
|
||||
}
|
||||
return false;
|
||||
@@ -256,7 +261,7 @@ set (T& target, T const& defaultValue,
|
||||
target = boost::lexical_cast <T> (result.first);
|
||||
return true;
|
||||
}
|
||||
catch(...)
|
||||
catch (boost::bad_lexical_cast&)
|
||||
{
|
||||
target = defaultValue;
|
||||
}
|
||||
@@ -280,12 +285,46 @@ get (Section const& section,
|
||||
{
|
||||
return boost::lexical_cast <T> (result.first);
|
||||
}
|
||||
catch(...)
|
||||
catch (boost::bad_lexical_cast&)
|
||||
{
|
||||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool
|
||||
get_if_exists (Section const& section,
|
||||
std::string const& name, T& v)
|
||||
{
|
||||
auto const result = section.find (name);
|
||||
if (! result.second)
|
||||
return false;
|
||||
try
|
||||
{
|
||||
v = boost::lexical_cast <T> (result.first);
|
||||
return true;
|
||||
}
|
||||
catch (boost::bad_lexical_cast&)
|
||||
{
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline
|
||||
bool
|
||||
get_if_exists<bool> (Section const& section,
|
||||
std::string const& name, bool& v)
|
||||
{
|
||||
int intVal = 0;
|
||||
if (get_if_exists (section, name, intVal))
|
||||
{
|
||||
v = bool (intVal);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <boost/format.hpp>
|
||||
#include <beast/module/core/text/StringPairArray.h>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
@@ -96,16 +95,6 @@ bool parseIpPort (std::string const& strSource, std::string& strIP, int& iPort);
|
||||
bool parseUrl (std::string const& strUrl, std::string& strScheme,
|
||||
std::string& strDomain, int& iPort, std::string& strPath);
|
||||
|
||||
/** Create a Parameters from a String.
|
||||
|
||||
Parameter strings have the format:
|
||||
|
||||
<key>=<value>['|'<key>=<value>]
|
||||
*/
|
||||
extern beast::StringPairArray
|
||||
parseDelimitedKeyValueString (
|
||||
beast::String s, beast::beast_wchar delimiter='|');
|
||||
|
||||
} // ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -115,6 +115,14 @@ BasicConfig::overwrite (std::string const& section, std::string const& key,
|
||||
result.first->second.set (key, value);
|
||||
}
|
||||
|
||||
void
|
||||
BasicConfig::deprecatedClearSection (std::string const& section)
|
||||
{
|
||||
auto i = map_.find(section);
|
||||
if (i != map_.end())
|
||||
i->second = Section(section);
|
||||
}
|
||||
|
||||
void
|
||||
BasicConfig::legacy(std::string const& section, std::string value)
|
||||
{
|
||||
|
||||
@@ -221,45 +221,4 @@ bool parseUrl (std::string const& strUrl, std::string& strScheme, std::string& s
|
||||
|
||||
return bMatch;
|
||||
}
|
||||
|
||||
beast::StringPairArray parseDelimitedKeyValueString (beast::String parameters,
|
||||
beast::beast_wchar delimiter)
|
||||
{
|
||||
beast::StringPairArray keyValues;
|
||||
|
||||
while (parameters.isNotEmpty ())
|
||||
{
|
||||
beast::String pair;
|
||||
|
||||
{
|
||||
int const delimiterPos = parameters.indexOfChar (delimiter);
|
||||
|
||||
if (delimiterPos != -1)
|
||||
{
|
||||
pair = parameters.substring (0, delimiterPos);
|
||||
|
||||
parameters = parameters.substring (delimiterPos + 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
pair = parameters;
|
||||
|
||||
parameters = beast::String::empty;
|
||||
}
|
||||
}
|
||||
|
||||
int const equalPos = pair.indexOfChar ('=');
|
||||
|
||||
if (equalPos != -1)
|
||||
{
|
||||
beast::String const key = pair.substring (0, equalPos);
|
||||
beast::String const value = pair.substring (equalPos + 1, pair.length ());
|
||||
|
||||
keyValues.set (key, value);
|
||||
}
|
||||
}
|
||||
|
||||
return keyValues;
|
||||
}
|
||||
|
||||
} // ripple
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
#include <beast/http/URL.h>
|
||||
#include <beast/net/IPEndpoint.h>
|
||||
#include <beast/module/core/files/File.h>
|
||||
#include <beast/module/core/text/StringPairArray.h>
|
||||
#include <beast/utility/ci_char_traits.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
@@ -52,14 +51,6 @@ countSectionEntries (IniFileSections& secSource, std::string const& strSection);
|
||||
IniFileSections::mapped_type*
|
||||
getIniFileSection (IniFileSections& secSource, std::string const& strSection);
|
||||
|
||||
/** Parse a section of lines as a key/value array.
|
||||
Each line is in the form <key>=<value>.
|
||||
Spaces are considered part of the key and value.
|
||||
*/
|
||||
// DEPRECATED
|
||||
beast::StringPairArray
|
||||
parseKeyValueSection (IniFileSections& secSource, std::string const& strSection);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
enum SizedItemName
|
||||
@@ -164,40 +155,7 @@ public:
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Parameters for the insight collection module */
|
||||
beast::StringPairArray insightSettings;
|
||||
|
||||
/** Parameters for the main NodeStore database.
|
||||
|
||||
This is 1 or more strings of the form <key>=<value>
|
||||
The 'type' and 'path' keys are required, see rippled-example.cfg
|
||||
|
||||
@see Database
|
||||
*/
|
||||
beast::StringPairArray nodeDatabase;
|
||||
|
||||
/** Parameters for the ephemeral NodeStore database.
|
||||
|
||||
This is an auxiliary database for the NodeStore, usually placed
|
||||
on a separate faster volume. However, the volume data may not persist
|
||||
between launches. Use of the ephemeral database is optional.
|
||||
|
||||
The format is the same as that for @ref nodeDatabase
|
||||
|
||||
@see Database
|
||||
*/
|
||||
beast::StringPairArray ephemeralNodeDatabase;
|
||||
|
||||
/** Parameters for importing an old database in to the current node database.
|
||||
If this is not empty, then it specifies the key/value parameters for
|
||||
another node database from which to import all data into the current
|
||||
node database specified by @ref nodeDatabase.
|
||||
The format of this string is in the form:
|
||||
<key>'='<value>['|'<key>'='value]
|
||||
@see parseDelimitedKeyValueString
|
||||
*/
|
||||
bool doImport;
|
||||
beast::StringPairArray importNodeDatabase;
|
||||
|
||||
//
|
||||
//
|
||||
|
||||
@@ -147,36 +147,6 @@ bool getSingleSection (IniFileSections& secSource,
|
||||
return bSingle;
|
||||
}
|
||||
|
||||
beast::StringPairArray
|
||||
parseKeyValueSection (IniFileSections& secSource, std::string const& strSection)
|
||||
{
|
||||
beast::StringPairArray result;
|
||||
|
||||
typedef IniFileSections::mapped_type Entries;
|
||||
|
||||
Entries* const entries = getIniFileSection (secSource, strSection);
|
||||
|
||||
if (entries != nullptr)
|
||||
{
|
||||
for (Entries::const_iterator iter = entries->begin ();
|
||||
iter != entries->end (); ++iter)
|
||||
{
|
||||
std::string const line (iter->c_str ());
|
||||
|
||||
int const equalPos = line.find ('=');
|
||||
|
||||
if (equalPos != std::string::npos)
|
||||
{
|
||||
result.set (
|
||||
line.substr (0, equalPos),
|
||||
line.substr (equalPos + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Parses a set of strings into IP::Endpoint
|
||||
Strings which fail to parse are not included in the output. If a stream is
|
||||
provided, human readable diagnostic error messages are written for each
|
||||
@@ -469,17 +439,6 @@ void Config::loadFromString (std::string const& fileContents)
|
||||
parsedAddresses.cbegin (), parsedAddresses.cend ());
|
||||
}
|
||||
|
||||
insightSettings = parseKeyValueSection (secConfig, SECTION_INSIGHT);
|
||||
|
||||
nodeDatabase = parseKeyValueSection (
|
||||
secConfig, ConfigSection::nodeDatabase ());
|
||||
|
||||
ephemeralNodeDatabase = parseKeyValueSection (
|
||||
secConfig, ConfigSection::tempNodeDatabase ());
|
||||
|
||||
importNodeDatabase = parseKeyValueSection (
|
||||
secConfig, ConfigSection::importNodeDatabase ());
|
||||
|
||||
if (getSingleSection (secConfig, SECTION_NODE_SIZE, strTemp))
|
||||
{
|
||||
if (strTemp == "tiny")
|
||||
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
*/
|
||||
virtual
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (size_t keyBytes, Parameters const& parameters,
|
||||
createInstance (size_t keyBytes, Section const& parameters,
|
||||
Scheduler& scheduler, beast::Journal journal) = 0;
|
||||
};
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ public:
|
||||
/** Create a backend. */
|
||||
virtual
|
||||
std::unique_ptr <Backend>
|
||||
make_Backend (Parameters const& parameters,
|
||||
make_Backend (Section const& parameters,
|
||||
Scheduler& scheduler, beast::Journal journal) = 0;
|
||||
|
||||
/** Construct a NodeStore database.
|
||||
@@ -87,8 +87,8 @@ public:
|
||||
std::unique_ptr <Database>
|
||||
make_Database (std::string const& name, Scheduler& scheduler,
|
||||
beast::Journal journal, int readThreads,
|
||||
Parameters const& backendParameters,
|
||||
Parameters fastBackendParameters = Parameters()) = 0;
|
||||
Section const& backendParameters,
|
||||
Section fastBackendParameters = Section()) = 0;
|
||||
|
||||
virtual
|
||||
std::unique_ptr <DatabaseRotating>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#define RIPPLE_NODESTORE_TYPES_H_INCLUDED
|
||||
|
||||
#include <ripple/nodestore/NodeObject.h>
|
||||
#include <beast/module/core/text/StringPairArray.h>
|
||||
#include <ripple/basics/BasicConfig.h>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
@@ -48,11 +48,6 @@ enum Status
|
||||
|
||||
/** A batch of NodeObjects to write at once. */
|
||||
typedef std::vector <NodeObject::Ptr> Batch;
|
||||
|
||||
/** A list of key/value parameter pairs passed to the backend. */
|
||||
// VFALCO TODO Use std::string, pair, vector
|
||||
typedef beast::StringPairArray Parameters;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ public:
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (
|
||||
size_t keyBytes,
|
||||
Parameters const& keyValues,
|
||||
Section const& keyValues,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal);
|
||||
|
||||
@@ -82,9 +82,9 @@ private:
|
||||
MemoryDB* db_;
|
||||
|
||||
public:
|
||||
MemoryBackend (size_t keyBytes, Parameters const& keyValues,
|
||||
MemoryBackend (size_t keyBytes, Section const& keyValues,
|
||||
Scheduler& scheduler, beast::Journal journal)
|
||||
: name_ (keyValues ["path"].toStdString ())
|
||||
: name_ (get<std::string>(keyValues, "path"))
|
||||
, journal_ (journal)
|
||||
{
|
||||
if (name_.empty())
|
||||
@@ -187,7 +187,7 @@ MemoryFactory::getName() const
|
||||
std::unique_ptr <Backend>
|
||||
MemoryFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
Parameters const& keyValues,
|
||||
Section const& keyValues,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal)
|
||||
{
|
||||
|
||||
@@ -62,11 +62,11 @@ public:
|
||||
std::atomic <bool> deletePath_;
|
||||
Scheduler& scheduler_;
|
||||
|
||||
NuDBBackend (int keyBytes, Parameters const& keyValues,
|
||||
NuDBBackend (int keyBytes, Section const& keyValues,
|
||||
Scheduler& scheduler, beast::Journal journal)
|
||||
: journal_ (journal)
|
||||
, keyBytes_ (keyBytes)
|
||||
, name_ (keyValues ["path"].toStdString ())
|
||||
, name_ (get<std::string>(keyValues, "path"))
|
||||
, deletePath_(false)
|
||||
, scheduler_ (scheduler)
|
||||
{
|
||||
@@ -259,7 +259,7 @@ public:
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (
|
||||
size_t keyBytes,
|
||||
Parameters const& keyValues,
|
||||
Section const& keyValues,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal)
|
||||
{
|
||||
|
||||
@@ -111,7 +111,7 @@ public:
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (
|
||||
size_t,
|
||||
Parameters const&,
|
||||
Section const&,
|
||||
Scheduler&, beast::Journal)
|
||||
{
|
||||
return std::make_unique <NullBackend> ();
|
||||
|
||||
@@ -99,16 +99,15 @@ public:
|
||||
std::string m_name;
|
||||
std::unique_ptr <rocksdb::DB> m_db;
|
||||
|
||||
RocksDBBackend (int keyBytes, Parameters const& keyValues,
|
||||
RocksDBBackend (int keyBytes, Section const& keyValues,
|
||||
Scheduler& scheduler, beast::Journal journal, RocksDBEnv* env)
|
||||
: m_deletePath (false)
|
||||
, m_journal (journal)
|
||||
, m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (m_name.empty())
|
||||
if (!get_if_exists(keyValues, "path", m_name))
|
||||
throw std::runtime_error ("Missing path in RocksDBFactory backend");
|
||||
|
||||
rocksdb::Options options;
|
||||
@@ -116,51 +115,45 @@ public:
|
||||
options.create_if_missing = true;
|
||||
options.env = env;
|
||||
|
||||
if (keyValues["cache_mb"].isEmpty())
|
||||
if (!keyValues.exists ("cache_mb"))
|
||||
{
|
||||
table_options.block_cache = rocksdb::NewLRUCache (getConfig ().getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
table_options.block_cache = rocksdb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
table_options.block_cache = rocksdb::NewLRUCache (get<int>(keyValues, "cache_mb") * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues["filter_bits"].isEmpty())
|
||||
if (!keyValues.exists ("filter_bits"))
|
||||
{
|
||||
if (getConfig ().NODE_SIZE >= 2)
|
||||
table_options.filter_policy.reset (rocksdb::NewBloomFilterPolicy (10));
|
||||
}
|
||||
else if (keyValues["filter_bits"].getIntValue() != 0)
|
||||
else if (auto const v = get<int>(keyValues, "filter_bits"))
|
||||
{
|
||||
table_options.filter_policy.reset (rocksdb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue()));
|
||||
table_options.filter_policy.reset (rocksdb::NewBloomFilterPolicy (v));
|
||||
}
|
||||
|
||||
if (! keyValues["open_files"].isEmpty())
|
||||
{
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
}
|
||||
get_if_exists (keyValues, "open_files", options.max_open_files);
|
||||
|
||||
if (! keyValues["file_size_mb"].isEmpty())
|
||||
if (keyValues.exists ("file_size_mb"))
|
||||
{
|
||||
options.target_file_size_base = 1024 * 1024 * keyValues["file_size_mb"].getIntValue();
|
||||
options.target_file_size_base = 1024 * 1024 * get<int>(keyValues,"file_size_mb");
|
||||
options.max_bytes_for_level_base = 5 * options.target_file_size_base;
|
||||
options.write_buffer_size = 2 * options.target_file_size_base;
|
||||
}
|
||||
|
||||
if (! keyValues["file_size_mult"].isEmpty())
|
||||
{
|
||||
options.target_file_size_multiplier = keyValues["file_size_mult"].getIntValue();
|
||||
}
|
||||
get_if_exists (keyValues, "file_size_mult", options.target_file_size_multiplier);
|
||||
|
||||
if (! keyValues["bg_threads"].isEmpty())
|
||||
if (keyValues.exists ("bg_threads"))
|
||||
{
|
||||
options.env->SetBackgroundThreads
|
||||
(keyValues["bg_threads"].getIntValue(), rocksdb::Env::LOW);
|
||||
(get<int>(keyValues, "bg_threads"), rocksdb::Env::LOW);
|
||||
}
|
||||
|
||||
if (! keyValues["high_threads"].isEmpty())
|
||||
if (keyValues.exists ("high_threads"))
|
||||
{
|
||||
auto const highThreads = keyValues["high_threads"].getIntValue();
|
||||
auto const highThreads = get<int>(keyValues, "high_threads");
|
||||
options.env->SetBackgroundThreads (highThreads, rocksdb::Env::HIGH);
|
||||
|
||||
// If we have high-priority threads, presumably we want to
|
||||
@@ -169,29 +162,22 @@ public:
|
||||
options.max_background_flushes = highThreads;
|
||||
}
|
||||
|
||||
if (! keyValues["compression"].isEmpty ())
|
||||
{
|
||||
if (keyValues["compression"].getIntValue () == 0)
|
||||
if (keyValues.exists ("compression") &&
|
||||
(get<int>(keyValues, "compression") == 0))
|
||||
{
|
||||
options.compression = rocksdb::kNoCompression;
|
||||
}
|
||||
}
|
||||
|
||||
if (! keyValues["block_size"].isEmpty ())
|
||||
{
|
||||
table_options.block_size = keyValues["block_size"].getIntValue ();
|
||||
}
|
||||
get_if_exists (keyValues, "block_size", table_options.block_size);
|
||||
|
||||
if (! keyValues["universal_compaction"].isEmpty ())
|
||||
if (keyValues.exists ("universal_compaction") &&
|
||||
(get<int>(keyValues, "universal_compaction") != 0))
|
||||
{
|
||||
if (keyValues["universal_compaction"].getIntValue () != 0)
|
||||
{
|
||||
options.compaction_style = rocksdb:: kCompactionStyleUniversal;
|
||||
options.compaction_style = rocksdb::kCompactionStyleUniversal;
|
||||
options.min_write_buffer_number_to_merge = 2;
|
||||
options.max_write_buffer_number = 6;
|
||||
options.write_buffer_size = 6 * options.target_file_size_base;
|
||||
}
|
||||
}
|
||||
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
|
||||
@@ -400,7 +386,7 @@ public:
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (
|
||||
size_t keyBytes,
|
||||
Parameters const& keyValues,
|
||||
Section const& keyValues,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal)
|
||||
{
|
||||
|
||||
@@ -95,12 +95,12 @@ public:
|
||||
std::string m_name;
|
||||
std::unique_ptr <rocksdb::DB> m_db;
|
||||
|
||||
RocksDBQuickBackend (int keyBytes, Parameters const& keyValues,
|
||||
RocksDBQuickBackend (int keyBytes, Section const& keyValues,
|
||||
Scheduler& scheduler, beast::Journal journal, RocksDBQuickEnv* env)
|
||||
: m_deletePath (false)
|
||||
, m_journal (journal)
|
||||
, m_keyBytes (keyBytes)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
, m_name (get<std::string>(keyValues, "path"))
|
||||
{
|
||||
if (m_name.empty())
|
||||
throw std::runtime_error ("Missing path in RocksDBQuickFactory backend");
|
||||
@@ -110,14 +110,9 @@ public:
|
||||
std::string style("level");
|
||||
std::uint64_t threads=4;
|
||||
|
||||
if (!keyValues["budget"].isEmpty())
|
||||
budget = keyValues["budget"].getIntValue();
|
||||
|
||||
if (!keyValues["style"].isEmpty())
|
||||
style = keyValues["style"].toStdString();
|
||||
|
||||
if (!keyValues["threads"].isEmpty())
|
||||
threads = keyValues["threads"].getIntValue();
|
||||
get_if_exists (keyValues, "budget", budget);
|
||||
get_if_exists (keyValues, "style", style);
|
||||
get_if_exists (keyValues, "threads", threads);
|
||||
|
||||
|
||||
// Set options
|
||||
@@ -163,18 +158,11 @@ public:
|
||||
// options.memtable_factory.reset(
|
||||
// rocksdb::NewHashCuckooRepFactory(options.write_buffer_size));
|
||||
|
||||
if (! keyValues["open_files"].isEmpty())
|
||||
{
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
}
|
||||
get_if_exists (keyValues, "open_files", options.max_open_files);
|
||||
|
||||
if (! keyValues["compression"].isEmpty ())
|
||||
{
|
||||
if (keyValues["compression"].getIntValue () == 0)
|
||||
{
|
||||
if (keyValues.exists ("compression") &&
|
||||
(get<int>(keyValues, "compression") == 0))
|
||||
options.compression = rocksdb::kNoCompression;
|
||||
}
|
||||
}
|
||||
|
||||
rocksdb::DB* db = nullptr;
|
||||
|
||||
@@ -385,7 +373,7 @@ public:
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (
|
||||
size_t keyBytes,
|
||||
Parameters const& keyValues,
|
||||
Section const& keyValues,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal)
|
||||
{
|
||||
|
||||
@@ -55,13 +55,13 @@ ManagerImp::~ManagerImp()
|
||||
|
||||
std::unique_ptr <Backend>
|
||||
ManagerImp::make_Backend (
|
||||
Parameters const& parameters,
|
||||
Section const& parameters,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal)
|
||||
{
|
||||
std::unique_ptr <Backend> backend;
|
||||
|
||||
std::string const type (parameters ["type"].toStdString ());
|
||||
std::string const type (get<std::string>(parameters, "type"));
|
||||
|
||||
if (! type.empty ())
|
||||
{
|
||||
@@ -91,8 +91,8 @@ ManagerImp::make_Database (
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal,
|
||||
int readThreads,
|
||||
Parameters const& backendParameters,
|
||||
Parameters fastBackendParameters)
|
||||
Section const& backendParameters,
|
||||
Section fastBackendParameters)
|
||||
{
|
||||
std::unique_ptr <Backend> backend (make_Backend (
|
||||
backendParameters, scheduler, journal));
|
||||
@@ -167,11 +167,8 @@ std::unique_ptr <Backend>
|
||||
make_Backend (Section const& config,
|
||||
Scheduler& scheduler, beast::Journal journal)
|
||||
{
|
||||
beast::StringPairArray v;
|
||||
for (auto const& _ : config)
|
||||
v.set (_.first, _.second);
|
||||
return Manager::instance().make_Backend (
|
||||
v, scheduler, journal);
|
||||
config, scheduler, journal);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
|
||||
std::unique_ptr <Backend>
|
||||
make_Backend (
|
||||
Parameters const& parameters,
|
||||
Section const& parameters,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal) override;
|
||||
|
||||
@@ -67,8 +67,8 @@ public:
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal,
|
||||
int readThreads,
|
||||
Parameters const& backendParameters,
|
||||
Parameters fastBackendParameters) override;
|
||||
Section const& backendParameters,
|
||||
Section fastBackendParameters) override;
|
||||
|
||||
std::unique_ptr <DatabaseRotating>
|
||||
make_DatabaseRotating (
|
||||
|
||||
@@ -38,10 +38,10 @@ public:
|
||||
|
||||
testcase ("Backend type=" + type);
|
||||
|
||||
beast::StringPairArray params;
|
||||
Section params;
|
||||
beast::UnitTestUtilities::TempDirectory path ("node_db");
|
||||
params.set ("type", type);
|
||||
params.set ("path", path.getFullPathName ());
|
||||
params.set ("path", path.getFullPathName ().toStdString ());
|
||||
|
||||
// Create a batch
|
||||
Batch batch;
|
||||
|
||||
@@ -35,9 +35,9 @@ public:
|
||||
DummyScheduler scheduler;
|
||||
|
||||
beast::UnitTestUtilities::TempDirectory node_db ("node_db");
|
||||
beast::StringPairArray srcParams;
|
||||
Section srcParams;
|
||||
srcParams.set ("type", srcBackendType);
|
||||
srcParams.set ("path", node_db.getFullPathName ());
|
||||
srcParams.set ("path", node_db.getFullPathName ().toStdString ());
|
||||
|
||||
// Create a batch
|
||||
Batch batch;
|
||||
@@ -61,9 +61,9 @@ public:
|
||||
|
||||
// Set up the destination database
|
||||
beast::UnitTestUtilities::TempDirectory dest_db ("dest_db");
|
||||
beast::StringPairArray destParams;
|
||||
Section destParams;
|
||||
destParams.set ("type", destBackendType);
|
||||
destParams.set ("path", dest_db.getFullPathName ());
|
||||
destParams.set ("path", dest_db.getFullPathName ().toStdString ());
|
||||
|
||||
std::unique_ptr <Database> dest = Manager::instance().make_Database (
|
||||
"test", scheduler, j, 2, destParams);
|
||||
@@ -101,16 +101,16 @@ public:
|
||||
testcase (s);
|
||||
|
||||
beast::UnitTestUtilities::TempDirectory node_db ("node_db");
|
||||
beast::StringPairArray nodeParams;
|
||||
Section nodeParams;
|
||||
nodeParams.set ("type", type);
|
||||
nodeParams.set ("path", node_db.getFullPathName ());
|
||||
nodeParams.set ("path", node_db.getFullPathName ().toStdString ());
|
||||
|
||||
beast::UnitTestUtilities::TempDirectory temp_db ("temp_db");
|
||||
beast::StringPairArray tempParams;
|
||||
Section tempParams;
|
||||
if (useEphemeralDatabase)
|
||||
{
|
||||
tempParams.set ("type", type);
|
||||
tempParams.set ("path", temp_db.getFullPathName ());
|
||||
tempParams.set ("path", temp_db.getFullPathName ().toStdString ());
|
||||
}
|
||||
|
||||
// Create a batch
|
||||
@@ -164,7 +164,7 @@ public:
|
||||
{
|
||||
// Verify the ephemeral db
|
||||
std::unique_ptr <Database> db = Manager::instance().make_Database ("test",
|
||||
scheduler, j, 2, tempParams, beast::StringPairArray ());
|
||||
scheduler, j, 2, tempParams, Section ());
|
||||
|
||||
// Read it back in
|
||||
Batch copy;
|
||||
|
||||
@@ -50,11 +50,12 @@ public:
|
||||
TestFamily (beast::Journal j)
|
||||
: treecache_ ("TreeNodeCache", 65536, 60, clock_, j)
|
||||
, fullbelow_ ("full_below", clock_)
|
||||
, db_(NodeStore::Manager::instance().make_Database(
|
||||
"test", scheduler_, j, 1,
|
||||
parseDelimitedKeyValueString(
|
||||
"type=memory|Path=SHAMap_test")))
|
||||
{
|
||||
Section testSection;
|
||||
testSection.set("type", "memory");
|
||||
testSection.set("Path", "SHAMap_test");
|
||||
db_ = NodeStore::Manager::instance ().make_Database (
|
||||
"test", scheduler_, j, 1, testSection);
|
||||
}
|
||||
|
||||
beast::manual_clock <std::chrono::steady_clock>
|
||||
|
||||
Reference in New Issue
Block a user