Use NuDB context with backends

This commit is contained in:
Miguel Portilla
2019-02-28 16:40:53 -05:00
committed by Nik Bougalis
parent 89b3bf0796
commit a988b3224f
14 changed files with 215 additions and 143 deletions

View File

@@ -11,8 +11,8 @@ env:
# to boost's .tar.gz.
- LCOV_ROOT=$HOME/lcov
- GDB_ROOT=$HOME/gdb
- BOOST_ROOT=$HOME/boost_1_67_0
- BOOST_URL='http://sourceforge.net/projects/boost/files/boost/1.67.0/boost_1_67_0.tar.gz'
- BOOST_ROOT=$HOME/boost_1_70_0
- BOOST_URL='http://sourceforge.net/projects/boost/files/boost/1.70.0/boost_1_70_0.tar.gz'
addons:
apt:

View File

@@ -17,7 +17,7 @@ need these software components
| [Visual Studio 2017](README.md#install-visual-studio-2017)| 15.5.4 |
| [Git for Windows](README.md#install-git-for-windows)| 2.16.1 |
| [OpenSSL Library](README.md#install-openssl) | 1.0.2n |
| [Boost library](README.md#build-boost) | 1.67.0 |
| [Boost library](README.md#build-boost) | 1.70.0 |
| [CMake for Windows](README.md#optional-install-cmake-for-windows)* | 3.12 |
\* Only needed if not using the integrated CMake in VS 2017 and prefer generating dedicated project/solution files.
@@ -78,13 +78,13 @@ to get the correct 32-/64-bit variant.
### Build Boost
Boost 1.67 or later is required.
Boost 1.70 or later is required.
After [downloading boost](http://www.boost.org/users/download/) and unpacking it
to `c:\lib`. As of this writing, the most recent version of boost is 1.68.0,
which will unpack into a directory named `boost_1_68_0`. We recommended either
to `c:\lib`. As of this writing, the most recent version of boost is 1.70.0,
which will unpack into a directory named `boost_1_70_0`. We recommended either
renaming this directory to `boost`, or creating a junction link `mklink /J boost
boost_1_68_0`, so that you can more easily switch between versions.
boost_1_70_0`, so that you can more easily switch between versions.
Next, open **Developer Command Prompt** and type the following commands
@@ -214,7 +214,7 @@ execute the following commands within your `rippled` cloned repository:
```
mkdir build\cmake
cd build\cmake
cmake ..\.. -G"Visual Studio 15 2017 Win64" -DBOOST_ROOT="C:\lib\boost_1_68_0" -DOPENSSL_ROOT="C:\lib\OpenSSL-Win64"
cmake ..\.. -G"Visual Studio 15 2017 Win64" -DBOOST_ROOT="C:\lib\boost_1_70_0" -DOPENSSL_ROOT="C:\lib\OpenSSL-Win64"
```
Now launch Visual Studio 2017 and select **File | Open | Project/Solution**.
Navigate to the `build\cmake` folder created above and select the `rippled.sln`

View File

@@ -25,14 +25,14 @@ protobuf will give errors.
### Build Boost
Boost 1.67 or later is required. We recommend downloading and compiling boost
Boost 1.70 or later is required. We recommend downloading and compiling boost
with the following process: After changing to the directory where
you wish to download and compile boost, run
```
$ wget https://dl.bintray.com/boostorg/release/1.68.0/source/boost_1_68_0.tar.gz
$ tar -xzf boost_1_68_0.tar.gz
$ cd boost_1_68_0
$ wget https://dl.bintray.com/boostorg/release/1.70.0/source/boost_1_70_0.tar.gz
$ tar -xzf boost_1_70_0.tar.gz
$ cd boost_1_70_0
$ ./bootstrap.sh
$ ./b2 headers
$ ./b2 -j<Num Parallel>
@@ -81,14 +81,14 @@ git checkout develop
If you didn't persistently set the `BOOST_ROOT` environment variable to the
directory in which you compiled boost, then you should set it temporarily.
For example, you built Boost in your home directory `~/boost_1_68_0`, you
For example, you built Boost in your home directory `~/boost_1_70_0`, you
would do for any shell in which you want to build:
```
export BOOST_ROOT=~/boost_1_68_0
export BOOST_ROOT=~/boost_1_70_0
```
Alternatively, you can add `DBOOST_ROOT=~/boost_1_68_0` to the command line when
Alternatively, you can add `DBOOST_ROOT=~/boost_1_70_0` to the command line when
invoking `cmake`.
### Generate and Build

View File

@@ -60,11 +60,11 @@ brew install git cmake pkg-config protobuf openssl ninja
### Build Boost
Boost 1.67 or later is required.
Boost 1.70 or later is required.
We want to compile boost with clang/libc++
Download [a release](https://dl.bintray.com/boostorg/release/1.68.0/source/boost_1_68_0.tar.bz2)
Download [a release](https://dl.bintray.com/boostorg/release/1.70.0/source/boost_1_70_0.tar.bz2)
Extract it to a folder, making note of where, open a terminal, then:
@@ -120,11 +120,11 @@ If you didn't persistently set the `BOOST_ROOT` environment variable to the
root of the extracted directory above, then you should set it temporarily.
For example, assuming your username were `Abigail` and you extracted Boost
1.68.0 in `/Users/Abigail/Downloads/boost_1_68_0`, you would do for any
1.70.0 in `/Users/Abigail/Downloads/boost_1_70_0`, you would do for any
shell in which you want to build:
```
export BOOST_ROOT=/Users/Abigail/Downloads/boost_1_68_0
export BOOST_ROOT=/Users/Abigail/Downloads/boost_1_70_0
```
### Generate and Build

View File

@@ -714,7 +714,7 @@ if (static AND NOT APPLE)
else ()
set (Boost_USE_STATIC_RUNTIME OFF)
endif ()
find_package (Boost 1.67 REQUIRED
find_package (Boost 1.70 REQUIRED
COMPONENTS
chrono
context
@@ -749,15 +749,6 @@ target_link_libraries (ripple_boost
Boost::system
Boost::thread)
# workaround for xcode 10.2 and boost < 1.69
# once we require Boost 1.69 or higher, this can be removed
# see: https://github.com/boostorg/asio/commit/43874d5
if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" AND
CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10.0.1.10010043 AND
Boost_VERSION LESS 106900)
target_compile_definitions (opts INTERFACE BOOST_ASIO_HAS_STD_STRING_VIEW)
endif ()
#[===================================================================[
NIH dep: openssl
#]===================================================================]
@@ -1419,8 +1410,8 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
FetchContent_Declare(
nudb_src
GIT_REPOSITORY https://github.com/vinniefalco/NuDB.git
GIT_TAG 1.0.0
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
GIT_TAG 2.0.1
)
FetchContent_GetProperties(nudb_src)
if(NOT nudb_src_POPULATED)
@@ -1430,8 +1421,8 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build
else ()
ExternalProject_Add (nudb_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/vinniefalco/NuDB.git
GIT_TAG 1.0.0
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
GIT_TAG 2.0.1
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
TEST_COMMAND ""

4
Jenkinsfile vendored
View File

@@ -202,7 +202,7 @@ try {
"NIH_CACHE_ROOT=${cdir}/.nih_c"])
if (compiler == 'msvc') {
env_vars.addAll([
'BOOST_ROOT=c:\\lib\\boost_1_67',
'BOOST_ROOT=c:\\lib\\boost_1_70',
'PROJECT_NAME=rippled',
'MSBUILDDISABLENODEREUSE=1', // this ENV setting is probably redundant since we also pass /nr:false to msbuild
'OPENSSL_ROOT=c:\\OpenSSL-Win64'])
@@ -219,7 +219,7 @@ try {
'LCOV_ROOT=""',
'PATH+CMAKE_BIN=/opt/local/cmake',
'GDB_ROOT=/opt/local/gdb',
'BOOST_ROOT=/opt/local/boost_1_67_0',
'BOOST_ROOT=/opt/local/boost_1_70_0',
"USE_CCACHE=${ucc}"])
}

View File

@@ -868,18 +868,10 @@
# ...
#
# Example:
# type=nudb
# path=db/shards/nudb
#
# The "type" field must be present and controls the choice of backend:
#
# type = NuDB
# NuDB is recommended for shards.
#
# type = RocksDB
#
# Required keys:
# path Location to store the database (all types)
# path Location to store the database
#
# max_size_gb Maximum disk space the database will utilize (in gigabytes)
#
@@ -1193,7 +1185,6 @@ advisory_delete=0
# NuDB requires SSD storage. Helpful information can be found here
# https://ripple.com/build/history-sharding
#[shard_db]
#type=NuDB
#path=/var/lib/rippled/db/shards/nudb
#max_size_gb=500

View File

@@ -109,11 +109,18 @@ public:
/** Remove contents on disk upon destruction. */
virtual void setDeletePath() = 0;
/** Perform consistency checks on database .*/
/** Perform consistency checks on database. */
virtual void verify() = 0;
/** Returns the number of file handles the backend expects to need */
/** Returns the number of file handles the backend expects to need. */
virtual int fdlimit() const = 0;
/** Returns true if the backend uses permanent storage. */
bool
backed() const
{
return fdlimit();
}
};
}

View File

@@ -23,6 +23,7 @@
#include <ripple/nodestore/Backend.h>
#include <ripple/nodestore/Scheduler.h>
#include <ripple/beast/utility/Journal.h>
#include <nudb/store.hpp>
namespace ripple {
namespace NodeStore {
@@ -42,14 +43,37 @@ public:
/** Create an instance of this factory's backend.
@param keyBytes The fixed number of bytes per key.
@param keyValues A set of key/value configuration pairs.
@param parameters A set of key/value configuration pairs.
@param scheduler The scheduler to use for running tasks.
@return A pointer to the Backend object.
*/
virtual
std::unique_ptr <Backend>
createInstance (size_t keyBytes, Section const& parameters,
Scheduler& scheduler, beast::Journal journal) = 0;
createInstance (
size_t keyBytes,
Section const& parameters,
Scheduler& scheduler,
beast::Journal journal) = 0;
/** Create an instance of this factory's backend.
@param keyBytes The fixed number of bytes per key.
@param parameters A set of key/value configuration pairs.
@param scheduler The scheduler to use for running tasks.
@param context The context used by database.
@return A pointer to the Backend object.
*/
virtual
std::unique_ptr <Backend>
createInstance (
size_t keyBytes,
Section const& parameters,
Scheduler& scheduler,
nudb::context& context,
beast::Journal journal)
{
return {};
}
};
}

View File

@@ -40,9 +40,6 @@ class NuDBBackend
: public Backend
{
public:
// This needs to be tuned for the
// distribution of data sizes.
static constexpr std::size_t arena_alloc_size = megabytes(16);
static constexpr std::size_t currentType = 1;
beast::Journal j_;
@@ -52,8 +49,11 @@ public:
std::atomic <bool> deletePath_;
Scheduler& scheduler_;
NuDBBackend (int keyBytes, Section const& keyValues,
Scheduler& scheduler, beast::Journal journal)
NuDBBackend (
size_t keyBytes,
Section const& keyValues,
Scheduler& scheduler,
beast::Journal journal)
: j_(journal)
, keyBytes_ (keyBytes)
, name_ (get<std::string>(keyValues, "path"))
@@ -65,6 +65,24 @@ public:
"nodestore: Missing path in NuDB backend");
}
NuDBBackend (
size_t keyBytes,
Section const& keyValues,
Scheduler& scheduler,
nudb::context& context,
beast::Journal journal)
: j_(journal)
, keyBytes_ (keyBytes)
, name_ (get<std::string>(keyValues, "path"))
, db_ (context)
, deletePath_(false)
, scheduler_ (scheduler)
{
if (name_.empty())
Throw<std::runtime_error> (
"nodestore: Missing path in NuDB backend");
}
~NuDBBackend () override
{
close();
@@ -278,7 +296,6 @@ public:
Throw<nudb::system_error>(ec);
}
/** Returns the number of file handles the backend expects to need */
int
fdlimit() const override
{
@@ -317,6 +334,18 @@ public:
return std::make_unique <NuDBBackend> (
keyBytes, keyValues, scheduler, journal);
}
std::unique_ptr <Backend>
createInstance (
size_t keyBytes,
Section const& keyValues,
Scheduler& scheduler,
nudb::context& context,
beast::Journal journal) override
{
return std::make_unique <NuDBBackend> (
keyBytes, keyValues, scheduler, context, journal);
}
};
static NuDBFactory nuDBFactory;

View File

@@ -34,65 +34,95 @@ namespace NodeStore {
constexpr std::uint32_t DatabaseShard::ledgersPerShardDefault;
DatabaseShardImp::DatabaseShardImp(Application& app,
std::string const& name, Stoppable& parent, Scheduler& scheduler,
int readThreads, Section const& config, beast::Journal j)
DatabaseShardImp::DatabaseShardImp(
Application& app,
std::string const& name,
Stoppable& parent,
Scheduler& scheduler,
int readThreads,
Section const& config,
beast::Journal j)
: DatabaseShard(name, parent, scheduler, readThreads, config, j)
, app_(app)
, ctx_(std::make_unique<nudb::context>())
, config_(config)
, dir_(get<std::string>(config, "path"))
, backendName_(Manager::instance().find(
get<std::string>(config_, "type"))->getName())
get<std::string>(config, "type", "nudb"))->getName())
, maxDiskSpace_(get<std::uint64_t>(config, "max_size_gb") << 30)
, ledgersPerShard_(get<std::uint32_t>(
config, "ledgers_per_shard", ledgersPerShardDefault))
, earliestShardIndex_(seqToShardIndex(earliestSeq()))
, avgShardSz_(ledgersPerShard_ * (192 * 1024))
{
if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
Throw<std::runtime_error>(
"ledgers_per_shard must be a multiple of 256");
ctx_->start();
}
DatabaseShardImp::~DatabaseShardImp()
{
// Stop threads before data members are destroyed
stopThreads();
// Close backend databases before destroying the context
std::lock_guard<std::mutex> lock(m_);
complete_.clear();
if (incomplete_)
incomplete_.reset();
preShards_.clear();
ctx_.reset();
}
bool
DatabaseShardImp::init()
{
using namespace boost::filesystem;
using namespace boost::beast::detail;
std::lock_guard<std::mutex> lock(m_);
if (init_)
{
assert(false);
JLOG(j_.error()) <<
"Already initialized";
return false;
}
// Find backend type and file handle requirement
try
{
fdLimit_ = Manager::instance().make_Backend(
config_, scheduler_, j_)->fdlimit();
}
catch (std::exception const&)
if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
{
JLOG(j_.error()) <<
"Invalid or missing shard store "
"type specified in [shard_db]";
"ledgers_per_shard must be a multiple of 256";
return false;
}
backed_ = static_cast<bool>(fdLimit_);
// NuDB is the default and only supported permanent storage backend
// "Memory" and "none" types are supported for tests
if (!iequals(backendName_, "NuDB") &&
!iequals(backendName_, "Memory") &&
!iequals(backendName_, "none"))
{
JLOG(j_.error()) <<
"Unsupported shard store type: " << backendName_;
return false;
}
{
// Find backend file handle requirement
auto factory {Manager::instance().find(backendName_)};
if (!factory)
{
JLOG(j_.error()) <<
"Failed to create shard store type " << backendName_;
return false;
}
auto backend {factory->createInstance(NodeObject::keyBytes,
config_, scheduler_, *ctx_, j_)};
backed_ = backend->backed();
if (!backed_)
{
init_ = true;
return true;
}
fdLimit_ = backend->fdlimit();
}
try
{
@@ -136,7 +166,7 @@ DatabaseShardImp::init()
auto shard {std::make_unique<Shard>(
*this, shardIndex, cacheSz_, cacheAge_, j_)};
if (!shard->open(config_, scheduler_))
if (!shard->open(config_, scheduler_, *ctx_))
return false;
usedDiskSpace_ += shard->fileSize();
@@ -220,7 +250,7 @@ DatabaseShardImp::prepareLedger(std::uint32_t validLedgerSeq)
1, static_cast<int>(complete_.size() + 1)))};
incomplete_ = std::make_unique<Shard>(
*this, *shardIndex, sz, cacheAge_, j_);
if (!incomplete_->open(config_, scheduler_))
if (!incomplete_->open(config_, scheduler_, *ctx_))
{
incomplete_.reset();
return boost::none;
@@ -380,7 +410,7 @@ DatabaseShardImp::importShard(std::uint32_t shardIndex,
if(it == preShards_.end())
{
JLOG(j_.error()) <<
"Invalid shard index " << std::to_string(shardIndex);
"Invalid shard index " << shardIndex;
return false;
}
@@ -394,16 +424,20 @@ DatabaseShardImp::importShard(std::uint32_t shardIndex,
*this, shardIndex, cacheSz_, cacheAge_, j_)};
auto fail = [&](std::string msg)
{
JLOG(j_.error()) << msg;
shard.release();
if (!msg.empty())
{
JLOG(j_.error()) <<
"Import shard " << shardIndex << ": " << msg;
}
shard.reset();
move(dstDir, srcDir);
return false;
};
if (!shard->open(config_, scheduler_))
return fail("Failure");
if (!shard->open(config_, scheduler_, *ctx_))
return fail({});
if (!shard->complete())
return fail("Incomplete shard");
return fail("incomplete shard");
try
{
@@ -412,7 +446,7 @@ DatabaseShardImp::importShard(std::uint32_t shardIndex,
}
catch (std::exception const& e)
{
return fail(std::string("exception: ") + e.what());
return fail(e.what());
}
// Validate shard ledgers
@@ -716,7 +750,7 @@ DatabaseShardImp::import(Database& source)
auto const shardDir {dir_ / std::to_string(shardIndex)};
auto shard = std::make_unique<Shard>(
*this, shardIndex, shardCacheSz, cacheAge_, j_);
if (!shard->open(config_, scheduler_))
if (!shard->open(config_, scheduler_, *ctx_))
{
shard.reset();
continue;
@@ -1087,6 +1121,7 @@ DatabaseShardImp::updateStats(std::lock_guard<std::mutex>&)
}
else if(incomplete_)
filesPerShard = incomplete_->fdlimit();
if (!backed_)
return;

View File

@@ -31,11 +31,18 @@ class DatabaseShardImp : public DatabaseShard
public:
DatabaseShardImp() = delete;
DatabaseShardImp(DatabaseShardImp const&) = delete;
DatabaseShardImp(DatabaseShardImp&&) = delete;
DatabaseShardImp& operator=(DatabaseShardImp const&) = delete;
DatabaseShardImp& operator=(DatabaseShardImp&&) = delete;
DatabaseShardImp(Application& app, std::string const& name,
Stoppable& parent, Scheduler& scheduler, int readThreads,
Section const& config, beast::Journal j);
DatabaseShardImp(
Application& app,
std::string const& name,
Stoppable& parent,
Scheduler& scheduler,
int readThreads,
Section const& config,
beast::Journal j);
~DatabaseShardImp() override;
@@ -161,6 +168,9 @@ private:
mutable std::mutex m_;
bool init_ {false};
// The context shared with all shard backend databases
std::unique_ptr<nudb::context> ctx_;
// Complete shards
std::map<std::uint32_t, std::unique_ptr<Shard>> complete_;

View File

@@ -49,79 +49,66 @@ Shard::Shard(DatabaseShard const& db, std::uint32_t index,
}
bool
Shard::open(Section config, Scheduler& scheduler)
Shard::open(Section config, Scheduler& scheduler, nudb::context& ctx)
{
assert(!backend_);
using namespace boost::filesystem;
using namespace boost::beast::detail;
bool dirPreexist;
bool dirEmpty;
try
{
if (!exists(dir_))
{
dirPreexist = false;
dirEmpty = true;
}
else if (is_directory(dir_))
{
dirPreexist = true;
dirEmpty = is_empty(dir_);
}
else
std::string const type (get<std::string>(config, "type", "nudb"));
auto factory {Manager::instance().find(type)};
if (!factory)
{
JLOG(j_.error()) <<
"path exists as file: " << dir_.string();
"shard " << index_ <<
": failed to create shard store type " << type;
return false;
}
}
catch (std::exception const& e)
boost::system::error_code ec;
auto const preexist {exists(dir_, ec)};
if (ec)
{
JLOG(j_.error()) <<
"shard " + std::to_string(index_) + " exception: " + e.what();
"shard " << index_ << ": " << ec.message();
return false;
}
config.set("path", dir_.string());
backend_ = factory->createInstance(
NodeObject::keyBytes, config, scheduler, ctx, j_);
auto fail = [&](std::string msg)
{
JLOG(j_.error()) <<
"shard " << std::to_string(index_) << " error: " << msg;
if (!dirPreexist)
removeAll(dir_, j_);
else if (dirEmpty)
if (!msg.empty())
{
for (auto const& p : recursive_directory_iterator(dir_))
removeAll(p.path(), j_);
JLOG(j_.error()) <<
"shard " << index_ << ": " << msg;
}
if (!preexist)
removeAll(dir_, j_);
return false;
};
config.set("path", dir_.string());
try
{
backend_ = Manager::instance().make_Backend(
config, scheduler, j_);
backend_->open(!dirPreexist || dirEmpty);
backend_->open(!preexist);
if (backend_->fdlimit() == 0)
if (!backend_->backed())
return true;
if (!dirPreexist || dirEmpty)
if (!preexist)
{
// New shard, create a control file
if (!saveControl())
return fail("failure");
return fail({});
}
else if (is_regular_file(control_))
{
// Incomplete shard, inspect control file
std::ifstream ifs(control_.string());
if (!ifs.is_open())
{
return fail("shard " + std::to_string(index_) +
", unable to open control file");
}
return fail("failed to open control file");
boost::archive::text_iarchive ar(ifs);
ar & storedSeqs_;
@@ -130,15 +117,14 @@ Shard::open(Section config, Scheduler& scheduler)
if (boost::icl::first(storedSeqs_) < firstSeq_ ||
boost::icl::last(storedSeqs_) > lastSeq_)
{
return fail("shard " + std::to_string(index_) +
": Invalid control file");
return fail("invalid control file");
}
if (boost::icl::length(storedSeqs_) >= maxLedgers_)
{
JLOG(j_.error()) <<
"shard " << index_ <<
" found control file for complete shard";
": found control file for complete shard";
storedSeqs_.clear();
complete_ = true;
remove_all(control_);
@@ -155,9 +141,7 @@ Shard::open(Section config, Scheduler& scheduler)
}
catch (std::exception const& e)
{
JLOG(j_.error()) <<
"shard " << std::to_string(index_) << " error: " << e.what();
return false;
return fail(e.what());
}
return true;
@@ -177,7 +161,7 @@ Shard::setStored(std::shared_ptr<Ledger const> const& l)
}
if (boost::icl::length(storedSeqs_) >= maxLedgers_ - 1)
{
if (backend_->fdlimit() != 0)
if (backend_->backed())
{
if (!removeAll(control_, j_))
return false;
@@ -212,7 +196,7 @@ Shard::setStored(std::shared_ptr<Ledger const> const& l)
{
storedSeqs_.insert(l->info().seq);
lastStored_ = l;
if (backend_->fdlimit() != 0 && !saveControl())
if (backend_->backed() && !saveControl())
return false;
JLOG(j_.debug()) <<

View File

@@ -26,6 +26,7 @@
#include <ripple/nodestore/NodeObject.h>
#include <ripple/nodestore/Scheduler.h>
#include <nudb/nudb.hpp>
#include <boost/filesystem.hpp>
#include <boost/serialization/map.hpp>
#include <boost/archive/text_oarchive.hpp>
@@ -69,7 +70,7 @@ public:
std::chrono::seconds cacheAge, beast::Journal& j);
bool
open(Section config, Scheduler& scheduler);
open(Section config, Scheduler& scheduler, nudb::context& ctx);
bool
setStored(std::shared_ptr<Ledger const> const& l);