Add NuDB backend:

The NuDB database backend is a high performance key/value store presented
as an alternative to RocksDB on Mac and Linux deployments, and the preferred
backend option for Windows deployments. The LevelDB backend is deprecated for
all platforms.

This includes these changes:

* Add Backend::verify API for doing consistency checks
* Add Database::close so caller can catch exceptions
* Improved Timing test for NodeStore creates a simulated workload
This commit is contained in:
Vinnie Falco
2015-01-12 13:07:38 -08:00
parent 2a3f2ca28d
commit 94629edb9b
10 changed files with 635 additions and 19 deletions

View File

@@ -807,6 +807,71 @@
<ClCompile Include="..\..\src\beast\beast\net\tests\IPEndpoint.test.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<ClInclude Include="..\..\src\beast\beast\nudb.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\create.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\arena.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\bucket.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\buffers.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\bulkio.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\cache.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\config.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\field.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\format.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\gentex.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\pool.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\posix_file.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\stream.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\win32_file.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\error.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\file.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\mode.h">
</ClInclude>
<ClCompile Include="..\..\src\beast\beast\nudb\nudb.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<None Include="..\..\src\beast\beast\nudb\README.md">
</None>
<ClInclude Include="..\..\src\beast\beast\nudb\recover.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\store.h">
</ClInclude>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\callgrind_test.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<ClInclude Include="..\..\src\beast\beast\nudb\tests\common.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\tests\fail_file.h">
</ClInclude>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\recover_test.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\store_test.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\verify_test.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<ClInclude Include="..\..\src\beast\beast\nudb\verify.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\visit.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\random\rngfill.h">
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\random\xor_shift_engine.h">
@@ -2450,6 +2515,9 @@
<ClCompile Include="..\..\src\ripple\nodestore\backend\MemoryFactory.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\ripple\nodestore\backend\NuDBFactory.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\ripple\nodestore\backend\NullFactory.cpp">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
@@ -3324,8 +3392,8 @@
<ClCompile Include="..\..\src\ripple\unity\net.cpp">
</ClCompile>
<ClCompile Include="..\..\src\ripple\unity\nodestore.cpp">
<AdditionalIncludeDirectories Condition="'$(Configuration)|$(Platform)'=='debug|x64'">..\..\src\leveldb\include;..\..\src\rocksdb2\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories Condition="'$(Configuration)|$(Platform)'=='release|x64'">..\..\src\leveldb\include;..\..\src\rocksdb2\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories Condition="'$(Configuration)|$(Platform)'=='debug|x64'">..\..\src\leveldb\include;..\..\src\rocksdb2\include;..\..\src\snappy\config;..\..\src\snappy\snappy;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories Condition="'$(Configuration)|$(Platform)'=='release|x64'">..\..\src\leveldb\include;..\..\src\rocksdb2\include;..\..\src\snappy\config;..\..\src\snappy\snappy;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<ClCompile Include="..\..\src\ripple\unity\overlay.cpp">
</ClCompile>

View File

@@ -169,6 +169,15 @@
<Filter Include="beast\net\tests">
<UniqueIdentifier>{EADD6FA3-A535-01B1-8B05-B6363E6AE41E}</UniqueIdentifier>
</Filter>
<Filter Include="beast\nudb">
<UniqueIdentifier>{D20F5803-9150-FD61-2E8F-973EB53E8674}</UniqueIdentifier>
</Filter>
<Filter Include="beast\nudb\detail">
<UniqueIdentifier>{5CE9DE06-3D47-E45D-4D84-C2FEF2E1D821}</UniqueIdentifier>
</Filter>
<Filter Include="beast\nudb\tests">
<UniqueIdentifier>{ACEF9E32-BEA2-86E3-BDE3-E772564EA55B}</UniqueIdentifier>
</Filter>
<Filter Include="beast\random">
<UniqueIdentifier>{94B0990A-9ABE-B1EC-C220-83FD8C2F529F}</UniqueIdentifier>
</Filter>
@@ -1461,6 +1470,96 @@
<ClCompile Include="..\..\src\beast\beast\net\tests\IPEndpoint.test.cpp">
<Filter>beast\net\tests</Filter>
</ClCompile>
<ClInclude Include="..\..\src\beast\beast\nudb.h">
<Filter>beast</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\create.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\arena.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\bucket.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\buffers.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\bulkio.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\cache.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\config.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\field.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\format.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\gentex.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\pool.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\posix_file.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\stream.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\detail\win32_file.h">
<Filter>beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\error.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\file.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\mode.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClCompile Include="..\..\src\beast\beast\nudb\nudb.cpp">
<Filter>beast\nudb</Filter>
</ClCompile>
<None Include="..\..\src\beast\beast\nudb\README.md">
<Filter>beast\nudb</Filter>
</None>
<ClInclude Include="..\..\src\beast\beast\nudb\recover.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\store.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\callgrind_test.cpp">
<Filter>beast\nudb\tests</Filter>
</ClCompile>
<ClInclude Include="..\..\src\beast\beast\nudb\tests\common.h">
<Filter>beast\nudb\tests</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\tests\fail_file.h">
<Filter>beast\nudb\tests</Filter>
</ClInclude>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\recover_test.cpp">
<Filter>beast\nudb\tests</Filter>
</ClCompile>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\store_test.cpp">
<Filter>beast\nudb\tests</Filter>
</ClCompile>
<ClCompile Include="..\..\src\beast\beast\nudb\tests\verify_test.cpp">
<Filter>beast\nudb\tests</Filter>
</ClCompile>
<ClInclude Include="..\..\src\beast\beast\nudb\verify.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\nudb\visit.h">
<Filter>beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\beast\random\rngfill.h">
<Filter>beast\random</Filter>
</ClInclude>
@@ -3498,6 +3597,9 @@
<ClCompile Include="..\..\src\ripple\nodestore\backend\MemoryFactory.cpp">
<Filter>ripple\nodestore\backend</Filter>
</ClCompile>
<ClCompile Include="..\..\src\ripple\nodestore\backend\NuDBFactory.cpp">
<Filter>ripple\nodestore\backend</Filter>
</ClCompile>
<ClCompile Include="..\..\src\ripple\nodestore\backend\NullFactory.cpp">
<Filter>ripple\nodestore\backend</Filter>
</ClCompile>

View File

@@ -616,8 +616,9 @@ for tu_style in ['classic', 'unity']:
*list_sources('src/ripple/nodestore', '.cpp'),
CPPPATH=[
'src/leveldb/include',
#'src/hyperleveldb/include', # hyper
'src/rocksdb2/include',
'src/snappy/snappy',
'src/snappy/config',
])
else:
object_builder.add_source_files(
@@ -645,9 +646,10 @@ for tu_style in ['classic', 'unity']:
object_builder.add_source_files(
'src/ripple/unity/nodestore.cpp',
CPPPATH=[
'src/leveldb/include',
#'src/hyperleveldb/include', # hyper
'src/leveldb/include',
'src/rocksdb2/include',
'src/snappy/snappy',
'src/snappy/config',
])
git_commit_tag = {}

View File

@@ -665,10 +665,11 @@
#
# Choices for 'type' (not case-sensitive)
# RocksDB Use Facebook's RocksDB database (preferred)
# HyperLevelDB Use an improved version of LevelDB
# SQLite Use SQLite
# LevelDB Use Google's LevelDB database (deprecated)
# none Use no backend
# NuDB Use Ripple Labs' NuDB (Windows preferred)
# HyperLevelDB (Deprecated)
# SQLite (Deprecated)
# LevelDB (Deprecated)
# none (No backend)
#
# Required keys:
# path Location to store the database (all types)

View File

@@ -0,0 +1,438 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <BeastConfig.h>
#include <ripple/nodestore/Factory.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
#include <ripple/nodestore/impl/EncodedBlob.h>
#include <beast/nudb.h>
#include <beast/nudb/visit.h>
#include <snappy.h>
#include <boost/filesystem.hpp>
#include <cassert>
#include <chrono>
#include <cstdio>
#include <cstdint>
#include <exception>
#include <memory>
namespace ripple {
namespace NodeStore {
class NuDBBackend
: public Backend
{
public:
enum
{
// This needs to be tuned for the
// distribution of data sizes.
arena_alloc_size = 16 * 1024 * 1024,
// Version 1
// No compression
//
typeOne = 1,
// Version 2
// Snappy compression
typeTwo = 2,
currentType = typeTwo
};
beast::Journal journal_;
size_t const keyBytes_;
std::string const name_;
beast::nudb::store db_;
std::atomic <bool> deletePath_;
Scheduler& scheduler_;
NuDBBackend (int keyBytes, Parameters const& keyValues,
Scheduler& scheduler, beast::Journal journal)
: journal_ (journal)
, keyBytes_ (keyBytes)
, name_ (keyValues ["path"].toStdString ())
, deletePath_(false)
, scheduler_ (scheduler)
{
if (name_.empty())
throw std::runtime_error (
"nodestore: Missing path in NuDB backend");
auto const folder = boost::filesystem::path (name_);
boost::filesystem::create_directories (folder);
auto const dp = (folder / "nudb.dat").string();
auto const kp = (folder / "nudb.key").string ();
auto const lp = (folder / "nudb.log").string ();
using beast::nudb::make_salt;
beast::nudb::create (dp, kp, lp,
currentType, make_salt(), keyBytes,
beast::nudb::block_size(kp),
0.50);
try
{
if (! db_.open (dp, kp, lp,
arena_alloc_size))
throw std::runtime_error(
"nodestore: open failed");
if (db_.appnum() != currentType)
throw std::runtime_error(
"nodestore: unknown appnum");
}
catch (std::exception const& e)
{
// log and terminate?
std::cerr << e.what();
std::terminate();
}
}
~NuDBBackend ()
{
close();
}
std::string
getName()
{
return name_;
}
void
close() override
{
if (db_.is_open())
{
db_.close();
if (deletePath_)
{
boost::filesystem::remove_all (name_);
}
}
}
//--------------------------------------------------------------------------
class Buffer
{
private:
std::size_t size_ = 0;
std::size_t capacity_ = 0;
std::unique_ptr <std::uint8_t[]> buf_;
public:
Buffer() = default;
Buffer (Buffer const&) = delete;
Buffer& operator= (Buffer const&) = delete;
explicit
Buffer (std::size_t n)
{
resize (n);
}
std::size_t
size() const
{
return size_;
}
std::size_t
capacity() const
{
return capacity_;
}
void*
get()
{
return buf_.get();
}
void
resize (std::size_t n)
{
if (capacity_ < n)
{
capacity_ = beast::nudb::detail::ceil_pow2(n);
buf_.reset (new std::uint8_t[capacity_]);
}
size_ = n;
}
// Meet the requirements of BufferFactory
void*
operator() (std::size_t n)
{
resize(n);
return get();
}
};
//--------------------------------------------------------------------------
Status
fetch1 (void const* key,
std::shared_ptr <NodeObject>* pno)
{
pno->reset();
std::size_t bytes;
std::unique_ptr <std::uint8_t[]> data;
if (! db_.fetch (key,
[&data, &bytes](std::size_t n)
{
bytes = n;
data.reset(new std::uint8_t[bytes]);
return data.get();
}))
return notFound;
DecodedBlob decoded (key, data.get(), bytes);
if (! decoded.wasOk ())
return dataCorrupt;
*pno = decoded.createObject();
return ok;
}
void
insert1 (void const* key, void const* data,
std::size_t size)
{
db_.insert (key, data, size);
}
//--------------------------------------------------------------------------
Status
fetch2 (void const* key,
std::shared_ptr <NodeObject>* pno)
{
pno->reset();
std::size_t actual;
std::unique_ptr <char[]> compressed;
if (! db_.fetch (key,
[&](std::size_t n)
{
actual = n;
compressed.reset(
new char[n]);
return compressed.get();
}))
return notFound;
std::size_t size;
if (! snappy::GetUncompressedLength(
(char const*)compressed.get(),
actual, &size))
return dataCorrupt;
std::unique_ptr <char[]> data (new char[size]);
snappy::RawUncompress (compressed.get(),
actual, data.get());
DecodedBlob decoded (key, data.get(), size);
if (! decoded.wasOk ())
return dataCorrupt;
*pno = decoded.createObject();
return ok;
}
void
insert2 (void const* key, void const* data,
std::size_t size)
{
std::unique_ptr<char> buf (
new char[snappy::MaxCompressedLength(size)]);
std::size_t actual;
snappy::RawCompress ((char const*)data, size,
buf.get(), &actual);
db_.insert (key, buf.get(), actual);
}
//--------------------------------------------------------------------------
Status
fetch (void const* key, NodeObject::Ptr* pno)
{
Buffer b1;
if (! db_.fetch (key, b1))
return notFound;
switch (db_.appnum())
{
case typeOne: return fetch1 (key, pno);
case typeTwo: return fetch2 (key, pno);
}
throw std::runtime_error(
"nodestore: unknown appnum");
return notFound;
}
void
do_insert (std::shared_ptr <NodeObject> const& no)
{
EncodedBlob e;
e.prepare (no);
switch (db_.appnum())
{
case typeOne: return insert1 (e.getKey(), e.getData(), e.getSize());
case typeTwo: return insert2 (e.getKey(), e.getData(), e.getSize());
}
throw std::runtime_error(
"nodestore: unknown appnum");
}
void
store (std::shared_ptr <NodeObject> const& no) override
{
BatchWriteReport report;
report.writeCount = 1;
auto const start =
std::chrono::steady_clock::now();
do_insert (no);
report.elapsed = std::chrono::duration_cast <
std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start);
scheduler_.onBatchWrite (report);
}
void
storeBatch (Batch const& batch) override
{
BatchWriteReport report;
EncodedBlob encoded;
report.writeCount = batch.size();
auto const start =
std::chrono::steady_clock::now();
for (auto const& e : batch)
do_insert (e);
report.elapsed = std::chrono::duration_cast <
std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start);
scheduler_.onBatchWrite (report);
}
void
for_each (std::function <void(NodeObject::Ptr)> f)
{
auto const dp = db_.dat_path();
auto const kp = db_.key_path();
auto const lp = db_.log_path();
auto const appnum = db_.appnum();
db_.close();
beast::nudb::visit (dp,
[&](
void const* key, std::size_t key_bytes,
void const* data, std::size_t size)
{
switch (appnum)
{
case typeOne:
{
DecodedBlob decoded (key, data, size);
if (! decoded.wasOk ())
return false;
f (decoded.createObject());
break;
}
case typeTwo:
{
std::size_t actual;
if (! snappy::GetUncompressedLength(
(char const*)data, size, &actual))
return false;
std::unique_ptr <char[]> buf (new char[actual]);
if (! snappy::RawUncompress ((char const*)data,
size, buf.get()))
return false;
DecodedBlob decoded (key, buf.get(), actual);
if (! decoded.wasOk ())
return false;
f (decoded.createObject());
break;
}
}
return true;
});
db_.open (dp, kp, lp,
arena_alloc_size);
}
int
getWriteLoad ()
{
return 0;
}
void
setDeletePath() override
{
deletePath_ = true;
}
void
verify() override
{
auto const dp = db_.dat_path();
auto const kp = db_.key_path();
auto const lp = db_.log_path();
db_.close();
beast::nudb::verify (dp, kp);
db_.open (dp, kp, lp,
arena_alloc_size);
}
};
//------------------------------------------------------------------------------
class NuDBFactory : public Factory
{
public:
NuDBFactory()
{
Manager::instance().insert(*this);
}
~NuDBFactory()
{
Manager::instance().erase(*this);
}
std::string
getName() const
{
return "NuDB";
}
std::unique_ptr <Backend>
createInstance (
size_t keyBytes,
Parameters const& keyValues,
Scheduler& scheduler,
beast::Journal journal)
{
return std::make_unique <NuDBBackend> (
keyBytes, keyValues, scheduler, journal);
}
};
static NuDBFactory nuDBFactory;
}
}

View File

@@ -94,6 +94,8 @@ public:
{
int const seedValue = 50;
testBackend ("nudb", seedValue);
testBackend ("leveldb", seedValue);
#if RIPPLE_HYPERLEVELDB_AVAILABLE

View File

@@ -26,7 +26,7 @@
namespace ripple {
namespace NodeStore {
class NodeStoreDatabase_test : public TestBase
class Database_test : public TestBase
{
public:
void testImport (std::string const& destBackendType,
@@ -182,6 +182,8 @@ public:
void runBackendTests (bool useEphemeralDatabase, std::int64_t const seedValue)
{
testNodeStore ("nudb", useEphemeralDatabase, true, seedValue);
testNodeStore ("leveldb", useEphemeralDatabase, true, seedValue);
#if RIPPLE_HYPERLEVELDB_AVAILABLE
@@ -201,8 +203,10 @@ public:
void runImportTests (std::int64_t const seedValue)
{
testImport ("leveldb", "leveldb", seedValue);
testImport ("nudb", "nudb", seedValue);
testImport ("leveldb", "leveldb", seedValue);
#if RIPPLE_HYPERLEVELDB_AVAILABLE
testImport ("hyperleveldb", "hyperleveldb", seedValue);
#endif
@@ -232,7 +236,7 @@ public:
}
};
BEAST_DEFINE_TESTSUITE(NodeStoreDatabase,ripple_core,ripple);
BEAST_DEFINE_TESTSUITE(Database,NodeStore,ripple);
}
}

View File

@@ -147,7 +147,7 @@ public:
missingNodePercent = 20
};
std::size_t const default_repeat = 1;
std::size_t const default_repeat = 3;
#ifndef NDEBUG
std::size_t const default_items = 10000;
#else
@@ -708,10 +708,7 @@ public:
*/
std::string default_args =
#ifdef _MSC_VER
"type=leveldb"
#endif
//"type=nudb"
"type=nudb"
#if RIPPLE_ROCKSDB_AVAILABLE
";type=rocksdb,open_files=2000,filter_bits=12,cache_mb=256,"
"file_size_mb=8,file_size_mult=2"

View File

@@ -40,6 +40,7 @@
#include <beast/http/HTTP.unity.cpp>
#include <beast/insight/Insight.unity.cpp>
#include <beast/net/Net.unity.cpp>
//#include <beast/nudb/nudb.cpp>
#include <beast/streams/streams.unity.cpp>
#include <beast/strings/Strings.unity.cpp>
#include <beast/threads/Threads.unity.cpp>

View File

@@ -19,9 +19,12 @@
#include <BeastConfig.h>
#include <beast/nudb/nudb.cpp>
#include <ripple/nodestore/backend/HyperDBFactory.cpp>
#include <ripple/nodestore/backend/LevelDBFactory.cpp>
#include <ripple/nodestore/backend/MemoryFactory.cpp>
#include <ripple/nodestore/backend/NuDBFactory.cpp>
#include <ripple/nodestore/backend/NullFactory.cpp>
#include <ripple/nodestore/backend/RocksDBFactory.cpp>
@@ -38,5 +41,3 @@
#include <ripple/nodestore/tests/Basics.test.cpp>
#include <ripple/nodestore/tests/Database.test.cpp>
#include <ripple/nodestore/tests/Timing.test.cpp>