mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-20 02:25:53 +00:00
NodeStore improvements:
* Add Backend::verify API for doing consistency checks * Add Database::close so caller can catch exceptions * Improved Timing test for NodeStore creates a simulated workload
This commit is contained in:
committed by
Edward Hennis
parent
67b9cf9e82
commit
749e083e6e
@@ -794,6 +794,8 @@
|
||||
<ClCompile Include="..\..\src\beast\beast\net\tests\IPEndpoint.test.cpp">
|
||||
<ExcludedFromBuild>True</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClInclude Include="..\..\src\beast\beast\random\xor_shift_engine.h">
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\SmartPtr.h">
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\smart_ptr\SharedObject.h">
|
||||
@@ -916,7 +918,8 @@
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\unit_test\suite_list.h">
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\unit_test\thread.h" />
|
||||
<ClInclude Include="..\..\src\beast\beast\unit_test\thread.h">
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\utility\ci_char_traits.h">
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\utility\Debug.h">
|
||||
|
||||
@@ -163,6 +163,9 @@
|
||||
<Filter Include="beast\net\tests">
|
||||
<UniqueIdentifier>{EADD6FA3-A535-01B1-8B05-B6363E6AE41E}</UniqueIdentifier>
|
||||
</Filter>
|
||||
<Filter Include="beast\random">
|
||||
<UniqueIdentifier>{94B0990A-9ABE-B1EC-C220-83FD8C2F529F}</UniqueIdentifier>
|
||||
</Filter>
|
||||
<Filter Include="beast\smart_ptr">
|
||||
<UniqueIdentifier>{C8013957-E624-4A24-C0F8-CBAAC144AF09}</UniqueIdentifier>
|
||||
</Filter>
|
||||
@@ -1431,6 +1434,9 @@
|
||||
<ClCompile Include="..\..\src\beast\beast\net\tests\IPEndpoint.test.cpp">
|
||||
<Filter>beast\net\tests</Filter>
|
||||
</ClCompile>
|
||||
<ClInclude Include="..\..\src\beast\beast\random\xor_shift_engine.h">
|
||||
<Filter>beast\random</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\SmartPtr.h">
|
||||
<Filter>beast</Filter>
|
||||
</ClInclude>
|
||||
@@ -1596,6 +1602,9 @@
|
||||
<ClInclude Include="..\..\src\beast\beast\unit_test\suite_list.h">
|
||||
<Filter>beast\unit_test</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\unit_test\thread.h">
|
||||
<Filter>beast\unit_test</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\beast\beast\utility\ci_char_traits.h">
|
||||
<Filter>beast\utility</Filter>
|
||||
</ClInclude>
|
||||
@@ -5443,9 +5452,4 @@
|
||||
<Filter>websocket\src</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\src\beast\beast\unit_test\thread.h">
|
||||
<Filter>beast\unit_test</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
|
||||
@@ -277,6 +277,8 @@ def config_env(toolchain, variant, env):
|
||||
'-std=c++11',
|
||||
'-Wno-invalid-offsetof'])
|
||||
|
||||
env.Append(CPPDEFINES=['_FILE_OFFSET_BITS=64'])
|
||||
|
||||
if Beast.system.osx:
|
||||
env.Append(CPPDEFINES={
|
||||
'BEAST_COMPILE_OBJECTIVE_CPP': 1,
|
||||
@@ -304,11 +306,10 @@ def config_env(toolchain, variant, env):
|
||||
'boost_program_options',
|
||||
'boost_regex',
|
||||
'boost_system',
|
||||
'boost_thread'
|
||||
]
|
||||
# We prefer static libraries for boost
|
||||
if env.get('BOOST_ROOT'):
|
||||
# Need to add boost_thread. Not needed when dynamic linking is used.
|
||||
boost_libs += ['boost_thread']
|
||||
static_libs = ['%s/stage/lib/lib%s.a' % (env['BOOST_ROOT'], l) for
|
||||
l in boost_libs]
|
||||
if all(os.path.exists(f) for f in static_libs):
|
||||
|
||||
@@ -149,7 +149,7 @@ static
|
||||
void
|
||||
setupConfigForUnitTests (Config* config)
|
||||
{
|
||||
config->nodeDatabase = parseDelimitedKeyValueString ("type=memory");
|
||||
config->nodeDatabase = parseDelimitedKeyValueString ("type=memory|path=main");
|
||||
config->ephemeralNodeDatabase = beast::StringPairArray ();
|
||||
config->importNodeDatabase = beast::StringPairArray ();
|
||||
}
|
||||
|
||||
@@ -73,6 +73,13 @@ public:
|
||||
return m_name;
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
// VFALCO how do we do this?
|
||||
assert(false);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
NodeStore::Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
@@ -221,6 +228,11 @@ public:
|
||||
return type;
|
||||
}
|
||||
|
||||
void
|
||||
verify() override
|
||||
{
|
||||
}
|
||||
|
||||
private:
|
||||
std::string const m_name;
|
||||
std::unique_ptr <DatabaseCon> m_db;
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_BASICS_BASICCONFIG_H_INCLUDED
|
||||
#define RIPPLE_BASICS_BASICCONFIG_H_INCLUDED
|
||||
|
||||
#include <beast/container/const_container.h>
|
||||
#include <beast/utility/ci_char_traits.h>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <map>
|
||||
@@ -37,16 +38,18 @@ using IniFileSections = std::map<std::string, std::vector<std::string>>;
|
||||
A configuration file contains zero or more sections.
|
||||
*/
|
||||
class Section
|
||||
: public beast::const_container <
|
||||
std::map <std::string, std::string, beast::ci_less>>
|
||||
{
|
||||
private:
|
||||
std::string name_;
|
||||
std::vector <std::string> lines_;
|
||||
std::vector <std::string> values_;
|
||||
std::map <std::string, std::string, beast::ci_less> map_;
|
||||
|
||||
public:
|
||||
/** Create an empty section. */
|
||||
Section (std::string const& name);
|
||||
explicit
|
||||
Section (std::string const& name = "");
|
||||
|
||||
/** Returns the name of this section. */
|
||||
std::string const&
|
||||
@@ -55,13 +58,6 @@ public:
|
||||
return name_;
|
||||
}
|
||||
|
||||
/** Returns the number of key/value pairs. */
|
||||
std::size_t
|
||||
keys() const
|
||||
{
|
||||
return map_.size();
|
||||
}
|
||||
|
||||
/** Returns all the lines in the section.
|
||||
This includes everything.
|
||||
*/
|
||||
|
||||
@@ -32,7 +32,7 @@ Section::Section (std::string const& name)
|
||||
void
|
||||
Section::set (std::string const& key, std::string const& value)
|
||||
{
|
||||
auto const result = map_.emplace (key, value);
|
||||
auto const result = cont().emplace (key, value);
|
||||
if (! result.second)
|
||||
result.first->second = value;
|
||||
}
|
||||
@@ -68,14 +68,14 @@ Section::append (std::vector <std::string> const& lines)
|
||||
bool
|
||||
Section::exists (std::string const& name) const
|
||||
{
|
||||
return map_.find (name) != map_.end();
|
||||
return cont().find (name) != cont().end();
|
||||
}
|
||||
|
||||
std::pair <std::string, bool>
|
||||
Section::find (std::string const& name) const
|
||||
{
|
||||
auto const iter = map_.find (name);
|
||||
if (iter == map_.end())
|
||||
auto const iter = cont().find (name);
|
||||
if (iter == cont().end())
|
||||
return {{}, false};
|
||||
return {iter->second, true};
|
||||
}
|
||||
@@ -83,7 +83,7 @@ Section::find (std::string const& name) const
|
||||
std::ostream&
|
||||
operator<< (std::ostream& os, Section const& section)
|
||||
{
|
||||
for (auto const& kv : section.map_)
|
||||
for (auto const& kv : section.cont())
|
||||
os << kv.first << "=" << kv.second << "\n";
|
||||
return os;
|
||||
}
|
||||
@@ -113,7 +113,7 @@ BasicConfig::remap (std::string const& legacy_section,
|
||||
auto const iter = map_.find (legacy_section);
|
||||
if (iter == map_.end())
|
||||
return;
|
||||
if (iter->second.keys() != 0)
|
||||
if (iter->second.size() != 0)
|
||||
return;
|
||||
if (iter->second.lines().size() != 1)
|
||||
return;
|
||||
|
||||
@@ -50,6 +50,11 @@ public:
|
||||
*/
|
||||
virtual std::string getName() = 0;
|
||||
|
||||
/** Close the backend.
|
||||
This allows the caller to catch exceptions.
|
||||
*/
|
||||
virtual void close() = 0;
|
||||
|
||||
/** Fetch a single object.
|
||||
If the object is not found or an error is encountered, the
|
||||
result will indicate the condition.
|
||||
@@ -87,6 +92,9 @@ public:
|
||||
|
||||
/** Remove contents on disk upon destruction. */
|
||||
virtual void setDeletePath() = 0;
|
||||
|
||||
/** Perform consistency checks on database .*/
|
||||
virtual void verify() = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -55,6 +55,11 @@ public:
|
||||
*/
|
||||
virtual std::string getName () const = 0;
|
||||
|
||||
/** Close the database.
|
||||
This allows the caller to catch exceptions.
|
||||
*/
|
||||
virtual void close() = 0;
|
||||
|
||||
/** Fetch an object.
|
||||
If the object is known to be not in the database, isn't found in the
|
||||
database during the fetch, or failed to load correctly during the fetch,
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
|
||||
#include <ripple/nodestore/Factory.h>
|
||||
#include <ripple/nodestore/DatabaseRotating.h>
|
||||
#include <ripple/basics/BasicConfig.h>
|
||||
#include <beast/utility/Journal.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace NodeStore {
|
||||
@@ -98,6 +100,13 @@ public:
|
||||
beast::Journal journal) = 0;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Create a Backend. */
|
||||
std::unique_ptr <Backend>
|
||||
make_Backend (Section const& config,
|
||||
Scheduler& scheduler, beast::Journal journal);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -101,12 +101,7 @@ public:
|
||||
|
||||
~HyperDBBackend ()
|
||||
{
|
||||
if (m_deletePath)
|
||||
{
|
||||
m_db.reset();
|
||||
boost::filesystem::path dir = m_name;
|
||||
boost::filesystem::remove_all (dir);
|
||||
}
|
||||
close();
|
||||
}
|
||||
|
||||
std::string
|
||||
@@ -115,6 +110,20 @@ public:
|
||||
return m_name;
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
if (m_db)
|
||||
{
|
||||
m_db.reset();
|
||||
if (m_deletePath)
|
||||
{
|
||||
boost::filesystem::path dir = m_name;
|
||||
boost::filesystem::remove_all (dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status
|
||||
@@ -251,6 +260,11 @@ public:
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
void
|
||||
verify() override
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -108,12 +108,7 @@ public:
|
||||
|
||||
~LevelDBBackend()
|
||||
{
|
||||
if (m_deletePath)
|
||||
{
|
||||
m_db.reset();
|
||||
boost::filesystem::path dir = m_name;
|
||||
boost::filesystem::remove_all (dir);
|
||||
}
|
||||
close();
|
||||
}
|
||||
|
||||
std::string
|
||||
@@ -122,6 +117,20 @@ public:
|
||||
return m_name;
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
if (m_db)
|
||||
{
|
||||
m_db.reset();
|
||||
if (m_deletePath)
|
||||
{
|
||||
boost::filesystem::path dir = m_name;
|
||||
boost::filesystem::remove_all (dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status
|
||||
@@ -258,6 +267,11 @@ public:
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
void
|
||||
verify() override
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -20,37 +20,93 @@
|
||||
#include <BeastConfig.h>
|
||||
#include <ripple/nodestore/Factory.h>
|
||||
#include <ripple/nodestore/Manager.h>
|
||||
#include <beast/utility/ci_char_traits.h>
|
||||
#include <beast/cxx14/memory.h> // <memory>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
namespace NodeStore {
|
||||
|
||||
struct MemoryDB
|
||||
{
|
||||
std::mutex mutex;
|
||||
bool open = false;
|
||||
std::map <uint256 const, NodeObject::Ptr> table;
|
||||
};
|
||||
|
||||
class MemoryFactory : public Factory
|
||||
{
|
||||
private:
|
||||
std::mutex mutex_;
|
||||
std::map <std::string, MemoryDB, beast::ci_less> map_;
|
||||
|
||||
public:
|
||||
MemoryFactory();
|
||||
~MemoryFactory();
|
||||
|
||||
std::string
|
||||
getName() const;
|
||||
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (
|
||||
size_t keyBytes,
|
||||
Parameters const& keyValues,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal);
|
||||
|
||||
MemoryDB&
|
||||
open (std::string const& path)
|
||||
{
|
||||
std::lock_guard<std::mutex> _(mutex_);
|
||||
auto const result = map_.emplace (std::piecewise_construct,
|
||||
std::make_tuple(path), std::make_tuple());
|
||||
MemoryDB& db = result.first->second;
|
||||
if (db.open)
|
||||
throw std::runtime_error("already open");
|
||||
return db;
|
||||
}
|
||||
};
|
||||
|
||||
static MemoryFactory memoryFactory;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class MemoryBackend : public Backend
|
||||
{
|
||||
public:
|
||||
typedef std::map <uint256 const, NodeObject::Ptr> Map;
|
||||
beast::Journal m_journal;
|
||||
size_t const m_keyBytes;
|
||||
Map m_map;
|
||||
Scheduler& m_scheduler;
|
||||
private:
|
||||
using Map = std::map <uint256 const, NodeObject::Ptr>;
|
||||
|
||||
std::string name_;
|
||||
beast::Journal journal_;
|
||||
MemoryDB* db_;
|
||||
|
||||
public:
|
||||
MemoryBackend (size_t keyBytes, Parameters const& keyValues,
|
||||
Scheduler& scheduler, beast::Journal journal)
|
||||
: m_journal (journal)
|
||||
, m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
: name_ (keyValues ["path"].toStdString ())
|
||||
, journal_ (journal)
|
||||
{
|
||||
if (name_.empty())
|
||||
throw std::runtime_error ("Missing path in Memory backend");
|
||||
db_ = &memoryFactory.open(name_);
|
||||
}
|
||||
|
||||
~MemoryBackend ()
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
std::string
|
||||
getName ()
|
||||
{
|
||||
return "memory";
|
||||
return name_;
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
db_ = nullptr;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -60,29 +116,23 @@ public:
|
||||
{
|
||||
uint256 const hash (uint256::fromVoid (key));
|
||||
|
||||
Map::iterator iter = m_map.find (hash);
|
||||
std::lock_guard<std::mutex> _(db_->mutex);
|
||||
|
||||
if (iter != m_map.end ())
|
||||
Map::iterator iter = db_->table.find (hash);
|
||||
if (iter == db_->table.end())
|
||||
{
|
||||
pObject->reset();
|
||||
return notFound;
|
||||
}
|
||||
*pObject = iter->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
pObject->reset ();
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
void
|
||||
store (NodeObject::ref object)
|
||||
{
|
||||
Map::iterator iter = m_map.find (object->getHash ());
|
||||
|
||||
if (iter == m_map.end ())
|
||||
{
|
||||
m_map.insert (std::make_pair (object->getHash (), object));
|
||||
}
|
||||
std::lock_guard<std::mutex> _(db_->mutex);
|
||||
db_->table.emplace (object->getHash(), object);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -95,56 +145,55 @@ public:
|
||||
void
|
||||
for_each (std::function <void(NodeObject::Ptr)> f)
|
||||
{
|
||||
for (auto const& e : m_map)
|
||||
for (auto const& e : db_->table)
|
||||
f (e.second);
|
||||
}
|
||||
|
||||
int
|
||||
getWriteLoad ()
|
||||
getWriteLoad()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
setDeletePath() override {}
|
||||
setDeletePath() override
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
verify() override
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class MemoryFactory : public Factory
|
||||
MemoryFactory::MemoryFactory()
|
||||
{
|
||||
public:
|
||||
MemoryFactory()
|
||||
{
|
||||
Manager::instance().insert(*this);
|
||||
}
|
||||
}
|
||||
|
||||
~MemoryFactory()
|
||||
{
|
||||
MemoryFactory::~MemoryFactory()
|
||||
{
|
||||
Manager::instance().erase(*this);
|
||||
}
|
||||
}
|
||||
|
||||
std::string
|
||||
getName () const
|
||||
{
|
||||
std::string
|
||||
MemoryFactory::getName() const
|
||||
{
|
||||
return "Memory";
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr <Backend>
|
||||
createInstance (
|
||||
std::unique_ptr <Backend>
|
||||
MemoryFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
Parameters const& keyValues,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal)
|
||||
{
|
||||
{
|
||||
return std::make_unique <MemoryBackend> (
|
||||
keyBytes, keyValues, scheduler, journal);
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
static MemoryFactory memoryFactory;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,11 @@ public:
|
||||
return std::string ();
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
}
|
||||
|
||||
Status
|
||||
fetch (void const*, NodeObject::Ptr*)
|
||||
{
|
||||
@@ -70,7 +75,14 @@ public:
|
||||
}
|
||||
|
||||
void
|
||||
setDeletePath() override {}
|
||||
setDeletePath() override
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
verify() override
|
||||
{
|
||||
}
|
||||
|
||||
private:
|
||||
};
|
||||
|
||||
@@ -174,12 +174,7 @@ public:
|
||||
|
||||
~RocksDBBackend ()
|
||||
{
|
||||
if (m_deletePath)
|
||||
{
|
||||
m_db.reset();
|
||||
boost::filesystem::path dir = m_name;
|
||||
boost::filesystem::remove_all (dir);
|
||||
}
|
||||
close();
|
||||
}
|
||||
|
||||
std::string
|
||||
@@ -188,6 +183,20 @@ public:
|
||||
return m_name;
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
if (m_db)
|
||||
{
|
||||
m_db.reset();
|
||||
if (m_deletePath)
|
||||
{
|
||||
boost::filesystem::path dir = m_name;
|
||||
boost::filesystem::remove_all (dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status
|
||||
@@ -330,6 +339,11 @@ public:
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
void
|
||||
verify() override
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -109,6 +109,21 @@ public:
|
||||
return m_backend->getName ();
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
if (m_backend)
|
||||
{
|
||||
m_backend->close();
|
||||
m_backend = nullptr;
|
||||
}
|
||||
if (m_fastBackend)
|
||||
{
|
||||
m_fastBackend->close();
|
||||
m_fastBackend = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
bool asyncFetch (uint256 const& hash, NodeObject::pointer& object)
|
||||
|
||||
@@ -85,6 +85,13 @@ public:
|
||||
return getWritableBackend()->getName();
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
// VFALCO TODO How do we close everything?
|
||||
assert(false);
|
||||
}
|
||||
|
||||
std::int32_t getWriteLoad() const override
|
||||
{
|
||||
return getWritableBackend()->getWriteLoad();
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <ripple/nodestore/impl/ManagerImp.h>
|
||||
#include <ripple/nodestore/impl/DatabaseImp.h>
|
||||
#include <ripple/nodestore/impl/DatabaseRotatingImp.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <beast/utility/ci_char_traits.h>
|
||||
#include <beast/cxx14/memory.h> // <memory>
|
||||
#include <stdexcept>
|
||||
@@ -160,5 +161,18 @@ Manager::instance()
|
||||
return ManagerImp::instance();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
std::unique_ptr <Backend>
|
||||
make_Backend (Section const& config,
|
||||
Scheduler& scheduler, beast::Journal journal)
|
||||
{
|
||||
beast::StringPairArray v;
|
||||
for (auto const& _ : config)
|
||||
v.set (_.first, _.second);
|
||||
return Manager::instance().make_Backend (
|
||||
v, scheduler, journal);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,8 @@ NodeObject::NodeObject (
|
||||
mData = std::move (data);
|
||||
}
|
||||
|
||||
NodeObject::Ptr NodeObject::createObject (
|
||||
NodeObject::Ptr
|
||||
NodeObject::createObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob&& data,
|
||||
|
||||
@@ -100,7 +100,7 @@ public:
|
||||
testBackend ("hyperleveldb", seedValue);
|
||||
#endif
|
||||
|
||||
#if RIPPLE_ROCKSDB_AVAILABLE
|
||||
#if RIPPLE_ROCKSDB_AVAILABLE
|
||||
testBackend ("rocksdb", seedValue);
|
||||
#endif
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -121,7 +121,7 @@ public:
|
||||
TreeNodeCache treeNodeCache ("test.tree_node_cache", 65536, 60, clock, j);
|
||||
NodeStore::DummyScheduler scheduler;
|
||||
auto db = NodeStore::Manager::instance().make_Database (
|
||||
"test", scheduler, j, 0, parseDelimitedKeyValueString("type=memory"));
|
||||
"test", scheduler, j, 0, parseDelimitedKeyValueString("type=memory|path=FetchPack"));
|
||||
|
||||
std::shared_ptr <Table> t1 (std::make_shared <Table> (
|
||||
smtFREE, fullBelowCache, treeNodeCache, *db, Handler(), beast::Journal()));
|
||||
|
||||
@@ -62,7 +62,7 @@ public:
|
||||
TreeNodeCache treeNodeCache ("test.tree_node_cache", 65536, 60, clock, j);
|
||||
NodeStore::DummyScheduler scheduler;
|
||||
auto db = NodeStore::Manager::instance().make_Database (
|
||||
"test", scheduler, j, 0, parseDelimitedKeyValueString("type=memory"));
|
||||
"test", scheduler, j, 0, parseDelimitedKeyValueString("type=memory|Path=SHAMap_test"));
|
||||
|
||||
// h3 and h4 differ only in the leaf, same terminal node (level 19)
|
||||
uint256 h1, h2, h3, h4, h5;
|
||||
|
||||
@@ -114,7 +114,7 @@ public:
|
||||
TreeNodeCache treeNodeCache ("test.tree_node_cache", 65536, 60, clock, j);
|
||||
NodeStore::DummyScheduler scheduler;
|
||||
auto db = NodeStore::Manager::instance().make_Database (
|
||||
"test", scheduler, j, 1, parseDelimitedKeyValueString("type=memory"));
|
||||
"test", scheduler, j, 1, parseDelimitedKeyValueString("type=memory|path=SHAMapSync_test"));
|
||||
|
||||
SHAMap source (smtFREE, fullBelowCache, treeNodeCache,
|
||||
*db, Handler(), beast::Journal());
|
||||
|
||||
@@ -38,3 +38,5 @@
|
||||
#include <ripple/nodestore/tests/Basics.test.cpp>
|
||||
#include <ripple/nodestore/tests/Database.test.cpp>
|
||||
#include <ripple/nodestore/tests/Timing.test.cpp>
|
||||
|
||||
|
||||
|
||||
@@ -56,7 +56,8 @@ exports.servers = {
|
||||
'admin = allow',
|
||||
'protocol = ws'),
|
||||
|
||||
'node_db': 'type=memory'
|
||||
'node_db': lines('type=memory',
|
||||
'path=integration')
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user