Merge remote-tracking branch 'upstream/develop' into ximinez/lending-XLS-66

* upstream/develop:
  refactor: Retire ImmediateOfferKilled amendment (5973)
  ci: Update CI image hashes to use netstat (5987)
  chore: Remove version number in find_dependency for OpenSSL (5985)
  refactor: Modularize shamap and nodestore (5668)
  refactor: Retire fixMasterKeyAsRegularKey amendment (5959)
  refactor: Retire fixReducedOffersV1 amendment (5972)
  refactor: Retire fixAmendmentMajorityCalc amendment (5961)
  refactor: Clean up `TxMeta` (5845)
  fix: Address permission delegation vulnerability (5825)
This commit is contained in:
Ed Hennis
2025-11-03 13:09:03 -05:00
140 changed files with 753 additions and 1088 deletions

View File

@@ -0,0 +1,51 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_UNITY_ROCKSDB_H_INCLUDED
#define RIPPLE_UNITY_ROCKSDB_H_INCLUDED
#if RIPPLE_ROCKSDB_AVAILABLE
// #include <rocksdb2/port/port_posix.h>
#include <rocksdb/cache.h>
#include <rocksdb/compaction_filter.h>
#include <rocksdb/comparator.h>
#include <rocksdb/convenience.h>
#include <rocksdb/db.h>
#include <rocksdb/env.h>
#include <rocksdb/filter_policy.h>
#include <rocksdb/flush_block_policy.h>
#include <rocksdb/iterator.h>
#include <rocksdb/memtablerep.h>
#include <rocksdb/merge_operator.h>
#include <rocksdb/options.h>
#include <rocksdb/perf_context.h>
#include <rocksdb/slice.h>
#include <rocksdb/slice_transform.h>
#include <rocksdb/statistics.h>
#include <rocksdb/status.h>
#include <rocksdb/table.h>
#include <rocksdb/table_properties.h>
#include <rocksdb/transaction_log.h>
#include <rocksdb/types.h>
#include <rocksdb/universal_compaction.h>
#include <rocksdb/write_batch.h>
#endif
#endif

View File

@@ -0,0 +1,167 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_BACKEND_H_INCLUDED
#define RIPPLE_NODESTORE_BACKEND_H_INCLUDED
#include <xrpl/nodestore/Types.h>
#include <cstdint>
namespace ripple {
namespace NodeStore {
/** A backend used for the NodeStore.
The NodeStore uses a swappable backend so that other database systems
can be tried. Different databases may offer various features such
as improved performance, fault tolerant or distributed storage, or
all in-memory operation.
A given instance of a backend is fixed to a particular key size.
*/
class Backend
{
public:
/** Destroy the backend.
All open files are closed and flushed. If there are batched writes
or other tasks scheduled, they will be completed before this call
returns.
*/
virtual ~Backend() = default;
/** Get the human-readable name of this backend.
This is used for diagnostic output.
*/
virtual std::string
getName() = 0;
/** Get the block size for backends that support it
*/
virtual std::optional<std::size_t>
getBlockSize() const
{
return std::nullopt;
}
/** Open the backend.
@param createIfMissing Create the database files if necessary.
This allows the caller to catch exceptions.
*/
virtual void
open(bool createIfMissing = true) = 0;
/** Returns true is the database is open.
*/
virtual bool
isOpen() = 0;
/** Open the backend.
@param createIfMissing Create the database files if necessary.
@param appType Deterministic appType used to create a backend.
@param uid Deterministic uid used to create a backend.
@param salt Deterministic salt used to create a backend.
@throws std::runtime_error is function is called not for NuDB backend.
*/
virtual void
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
{
Throw<std::runtime_error>(
"Deterministic appType/uid/salt not supported by backend " +
getName());
}
/** Close the backend.
This allows the caller to catch exceptions.
*/
virtual void
close() = 0;
/** Fetch a single object.
If the object is not found or an error is encountered, the
result will indicate the condition.
@note This will be called concurrently.
@param key A pointer to the key data.
@param pObject [out] The created object if successful.
@return The result of the operation.
*/
virtual Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) = 0;
/** Fetch a batch synchronously. */
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) = 0;
/** Store a single object.
Depending on the implementation this may happen immediately
or deferred using a scheduled task.
@note This will be called concurrently.
@param object The object to store.
*/
virtual void
store(std::shared_ptr<NodeObject> const& object) = 0;
/** Store a group of objects.
@note This function will not be called concurrently with
itself or @ref store.
*/
virtual void
storeBatch(Batch const& batch) = 0;
virtual void
sync() = 0;
/** Visit every object in the database
This is usually called during import.
@note This routine will not be called concurrently with itself
or other methods.
@see import
*/
virtual void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) = 0;
/** Estimate the number of write operations pending. */
virtual int
getWriteLoad() = 0;
/** Remove contents on disk upon destruction. */
virtual void
setDeletePath() = 0;
/** Perform consistency checks on database.
*
* This method is implemented only by NuDBBackend. It is not yet called
* anywhere, but it might be a good idea to one day call it at startup to
* avert a crash.
*/
virtual void
verify()
{
}
/** Returns the number of file descriptors the backend expects to need. */
virtual int
fdRequired() const = 0;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,315 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2017 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_DATABASE_H_INCLUDED
#define RIPPLE_NODESTORE_DATABASE_H_INCLUDED
#include <xrpl/basics/BasicConfig.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/TaggedCache.ipp>
#include <xrpl/nodestore/Backend.h>
#include <xrpl/nodestore/NodeObject.h>
#include <xrpl/nodestore/Scheduler.h>
#include <xrpl/protocol/SystemParameters.h>
#include <condition_variable>
namespace ripple {
namespace NodeStore {
/** Persistency layer for NodeObject
A Node is a ledger object which is uniquely identified by a key, which is
the 256-bit hash of the body of the node. The payload is a variable length
block of serialized data.
All ledger data is stored as node objects and as such, needs to be persisted
between launches. Furthermore, since the set of node objects will in
general be larger than the amount of available memory, purged node objects
which are later accessed must be retrieved from the node store.
@see NodeObject
*/
class Database
{
public:
Database() = delete;
/** Construct the node store.
@param scheduler The scheduler to use for performing asynchronous tasks.
@param readThreads The number of asynchronous read threads to create.
@param config The configuration settings
@param journal Destination for logging output.
*/
Database(
Scheduler& scheduler,
int readThreads,
Section const& config,
beast::Journal j);
/** Destroy the node store.
All pending operations are completed, pending writes flushed,
and files closed before this returns.
*/
virtual ~Database();
/** Retrieve the name associated with this backend.
This is used for diagnostics and may not reflect the actual path
or paths used by the underlying backend.
*/
virtual std::string
getName() const = 0;
/** Import objects from another database. */
virtual void
importDatabase(Database& source) = 0;
/** Retrieve the estimated number of pending write operations.
This is used for diagnostics.
*/
virtual std::int32_t
getWriteLoad() const = 0;
/** Store the object.
The caller's Blob parameter is overwritten.
@param type The type of object.
@param data The payload of the object. The caller's
variable is overwritten.
@param hash The 256-bit hash of the payload data.
@param ledgerSeq The sequence of the ledger the object belongs to.
@return `true` if the object was stored?
*/
virtual void
store(
NodeObjectType type,
Blob&& data,
uint256 const& hash,
std::uint32_t ledgerSeq) = 0;
/* Check if two ledgers are in the same database
If these two sequence numbers map to the same database,
the result of a fetch with either sequence number would
be identical.
@param s1 The first sequence number
@param s2 The second sequence number
@return 'true' if both ledgers would be in the same DB
*/
virtual bool
isSameDB(std::uint32_t s1, std::uint32_t s2) = 0;
virtual void
sync() = 0;
/** Fetch a node object.
If the object is known to be not in the database, isn't found in the
database during the fetch, or failed to load correctly during the fetch,
`nullptr` is returned.
@note This can be called concurrently.
@param hash The key of the object to retrieve.
@param ledgerSeq The sequence of the ledger where the object is stored.
@param fetchType the type of fetch, synchronous or asynchronous.
@return The object, or nullptr if it couldn't be retrieved.
*/
std::shared_ptr<NodeObject>
fetchNodeObject(
uint256 const& hash,
std::uint32_t ledgerSeq = 0,
FetchType fetchType = FetchType::synchronous,
bool duplicate = false);
/** Fetch an object without waiting.
If I/O is required to determine whether or not the object is present,
`false` is returned. Otherwise, `true` is returned and `object` is set
to refer to the object, or `nullptr` if the object is not present.
If I/O is required, the I/O is scheduled and `true` is returned
@note This can be called concurrently.
@param hash The key of the object to retrieve
@param ledgerSeq The sequence of the ledger where the
object is stored.
@param callback Callback function when read completes
*/
virtual void
asyncFetch(
uint256 const& hash,
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback);
/** Remove expired entries from the positive and negative caches. */
virtual void
sweep() = 0;
/** Gather statistics pertaining to read and write activities.
*
* @param obj Json object reference into which to place counters.
*/
std::uint64_t
getStoreCount() const
{
return storeCount_;
}
std::uint32_t
getFetchTotalCount() const
{
return fetchTotalCount_;
}
std::uint32_t
getFetchHitCount() const
{
return fetchHitCount_;
}
std::uint64_t
getStoreSize() const
{
return storeSz_;
}
std::uint32_t
getFetchSize() const
{
return fetchSz_;
}
void
getCountsJson(Json::Value& obj);
/** Returns the number of file descriptors the database expects to need */
int
fdRequired() const
{
return fdRequired_;
}
virtual void
stop();
bool
isStopping() const;
/** @return The earliest ledger sequence allowed
*/
[[nodiscard]] std::uint32_t
earliestLedgerSeq() const noexcept
{
return earliestLedgerSeq_;
}
protected:
beast::Journal const j_;
Scheduler& scheduler_;
int fdRequired_{0};
std::atomic<std::uint32_t> fetchHitCount_{0};
std::atomic<std::uint32_t> fetchSz_{0};
// The default is XRP_LEDGER_EARLIEST_SEQ (32570) to match the XRP ledger
// network's earliest allowed ledger sequence. Can be set through the
// configuration file using the 'earliest_seq' field under the 'node_db'
// stanza. If specified, the value must be greater than zero.
// Only unit tests or alternate
// networks should change this value.
std::uint32_t const earliestLedgerSeq_;
// The maximum number of requests a thread extracts from the queue in an
// attempt to minimize the overhead of mutex acquisition. This is an
// advanced tunable, via the config file. The default value is 4.
int const requestBundle_;
void
storeStats(std::uint64_t count, std::uint64_t sz)
{
XRPL_ASSERT(
count <= sz,
"ripple::NodeStore::Database::storeStats : valid inputs");
storeCount_ += count;
storeSz_ += sz;
}
// Called by the public import function
void
importInternal(Backend& dstBackend, Database& srcDB);
void
updateFetchMetrics(uint64_t fetches, uint64_t hits, uint64_t duration)
{
fetchTotalCount_ += fetches;
fetchHitCount_ += hits;
fetchDurationUs_ += duration;
}
private:
std::atomic<std::uint64_t> storeCount_{0};
std::atomic<std::uint64_t> storeSz_{0};
std::atomic<std::uint64_t> fetchTotalCount_{0};
std::atomic<std::uint64_t> fetchDurationUs_{0};
std::atomic<std::uint64_t> storeDurationUs_{0};
mutable std::mutex readLock_;
std::condition_variable readCondVar_;
// reads to do
std::map<
uint256,
std::vector<std::pair<
std::uint32_t,
std::function<void(std::shared_ptr<NodeObject> const&)>>>>
read_;
std::atomic<bool> readStopping_ = false;
std::atomic<int> readThreads_ = 0;
std::atomic<int> runningThreads_ = 0;
virtual std::shared_ptr<NodeObject>
fetchNodeObject(
uint256 const& hash,
std::uint32_t ledgerSeq,
FetchReport& fetchReport,
bool duplicate) = 0;
/** Visit every object in the database
This is usually called during import.
@note This routine will not be called concurrently with itself
or other methods.
@see import
*/
virtual void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) = 0;
void
threadEntry();
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,63 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_DATABASEROTATING_H_INCLUDED
#define RIPPLE_NODESTORE_DATABASEROTATING_H_INCLUDED
#include <xrpl/nodestore/Database.h>
namespace ripple {
namespace NodeStore {
/* This class has two key-value store Backend objects for persisting SHAMap
* records. This facilitates online deletion of data. New backends are
* rotated in. Old ones are rotated out and deleted.
*/
class DatabaseRotating : public Database
{
public:
DatabaseRotating(
Scheduler& scheduler,
int readThreads,
Section const& config,
beast::Journal journal)
: Database(scheduler, readThreads, config, journal)
{
}
/** Rotates the backends.
@param newBackend New writable backend
@param f A function executed after the rotation outside of lock. The
values passed to f will be the new backend database names _after_
rotation.
*/
virtual void
rotate(
std::unique_ptr<NodeStore::Backend>&& newBackend,
std::function<void(
std::string const& writableName,
std::string const& archiveName)> const& f) = 0;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,45 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_DUMMYSCHEDULER_H_INCLUDED
#define RIPPLE_NODESTORE_DUMMYSCHEDULER_H_INCLUDED
#include <xrpl/nodestore/Scheduler.h>
namespace ripple {
namespace NodeStore {
/** Simple NodeStore Scheduler that just peforms the tasks synchronously. */
class DummyScheduler : public Scheduler
{
public:
DummyScheduler() = default;
~DummyScheduler() = default;
void
scheduleTask(Task& task) override;
void
onFetch(FetchReport const& report) override;
void
onBatchWrite(BatchWriteReport const& report) override;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,85 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_FACTORY_H_INCLUDED
#define RIPPLE_NODESTORE_FACTORY_H_INCLUDED
#include <xrpl/basics/BasicConfig.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/nodestore/Backend.h>
#include <xrpl/nodestore/Scheduler.h>
#include <nudb/store.hpp>
namespace ripple {
namespace NodeStore {
/** Base class for backend factories. */
class Factory
{
public:
virtual ~Factory() = default;
/** Retrieve the name of this factory. */
virtual std::string
getName() const = 0;
/** Create an instance of this factory's backend.
@param keyBytes The fixed number of bytes per key.
@param parameters A set of key/value configuration pairs.
@param burstSize Backend burst size in bytes.
@param scheduler The scheduler to use for running tasks.
@return A pointer to the Backend object.
*/
virtual std::unique_ptr<Backend>
createInstance(
size_t keyBytes,
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) = 0;
/** Create an instance of this factory's backend.
@param keyBytes The fixed number of bytes per key.
@param parameters A set of key/value configuration pairs.
@param burstSize Backend burst size in bytes.
@param scheduler The scheduler to use for running tasks.
@param context The context used by database.
@return A pointer to the Backend object.
*/
virtual std::unique_ptr<Backend>
createInstance(
size_t keyBytes,
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
nudb::context& context,
beast::Journal journal)
{
return {};
}
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,107 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_MANAGER_H_INCLUDED
#define RIPPLE_NODESTORE_MANAGER_H_INCLUDED
#include <xrpl/nodestore/DatabaseRotating.h>
#include <xrpl/nodestore/Factory.h>
namespace ripple {
namespace NodeStore {
/** Singleton for managing NodeStore factories and back ends. */
class Manager
{
public:
virtual ~Manager() = default;
Manager() = default;
Manager(Manager const&) = delete;
Manager&
operator=(Manager const&) = delete;
/** Returns the instance of the manager singleton. */
static Manager&
instance();
/** Add a factory. */
virtual void
insert(Factory& factory) = 0;
/** Remove a factory. */
virtual void
erase(Factory& factory) = 0;
/** Return a pointer to the matching factory if it exists.
@param name The name to match, performed case-insensitive.
@return `nullptr` if a match was not found.
*/
virtual Factory*
find(std::string const& name) = 0;
/** Create a backend. */
virtual std::unique_ptr<Backend>
make_Backend(
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) = 0;
/** Construct a NodeStore database.
The parameters are key value pairs passed to the backend. The
'type' key must exist, it defines the choice of backend. Most
backends also require a 'path' field.
Some choices for 'type' are:
HyperLevelDB, LevelDBFactory, SQLite, MDB
If the fastBackendParameter is omitted or empty, no ephemeral database
is used. If the scheduler parameter is omited or unspecified, a
synchronous scheduler is used which performs all tasks immediately on
the caller's thread.
@note If the database cannot be opened or created, an exception is
thrown.
@param name A diagnostic label for the database.
@param burstSize Backend burst size in bytes.
@param scheduler The scheduler to use for performing asynchronous tasks.
@param readThreads The number of async read threads to create
@param backendParameters The parameter string for the persistent
backend.
@param fastBackendParameters [optional] The parameter string for the
ephemeral backend.
@return The opened database.
*/
virtual std::unique_ptr<Database>
make_Database(
std::size_t burstSize,
Scheduler& scheduler,
int readThreads,
Section const& backendParameters,
beast::Journal journal) = 0;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,105 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_NODEOBJECT_H_INCLUDED
#define RIPPLE_NODESTORE_NODEOBJECT_H_INCLUDED
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/CountedObject.h>
#include <xrpl/basics/base_uint.h>
// VFALCO NOTE Intentionally not in the NodeStore namespace
namespace ripple {
/** The types of node objects. */
enum NodeObjectType : std::uint32_t {
hotUNKNOWN = 0,
hotLEDGER = 1,
hotACCOUNT_NODE = 3,
hotTRANSACTION_NODE = 4,
hotDUMMY = 512 // an invalid or missing object
};
/** A simple object that the Ledger uses to store entries.
NodeObjects are comprised of a type, a hash, and a blob.
They can be uniquely identified by the hash, which is a half-SHA512 of
the blob. The blob is a variable length block of serialized data. The
type identifies what the blob contains.
@note No checking is performed to make sure the hash matches the data.
@see SHAMap
*/
class NodeObject : public CountedObject<NodeObject>
{
public:
static constexpr std::size_t keyBytes = 32;
private:
// This hack is used to make the constructor effectively private
// except for when we use it in the call to make_shared.
// There's no portable way to make make_shared<> a friend work.
struct PrivateAccess
{
explicit PrivateAccess() = default;
};
public:
// This constructor is private, use createObject instead.
NodeObject(
NodeObjectType type,
Blob&& data,
uint256 const& hash,
PrivateAccess);
/** Create an object from fields.
The caller's variable is modified during this call. The
underlying storage for the Blob is taken over by the NodeObject.
@param type The type of object.
@param ledgerIndex The ledger in which this object appears.
@param data A buffer containing the payload. The caller's variable
is overwritten.
@param hash The 256-bit hash of the payload data.
*/
static std::shared_ptr<NodeObject>
createObject(NodeObjectType type, Blob&& data, uint256 const& hash);
/** Returns the type of this object. */
NodeObjectType
getType() const;
/** Returns the hash of the data. */
uint256 const&
getHash() const;
/** Returns the underlying data. */
Blob const&
getData() const;
private:
NodeObjectType const mType;
uint256 const mHash;
Blob const mData;
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,180 @@
# Database Documentation
- [NodeStore](#nodestore)
- [Benchmarks](#benchmarks)
# NodeStore
## Introduction
A `NodeObject` is a simple object that the Ledger uses to store entries. It is
comprised of a type, a hash and a blob. It can be uniquely
identified by the hash, which is a 256 bit hash of the blob. The blob is a
variable length block of serialized data. The type identifies what the blob
contains. The fields are as follows:
- `mType`
An enumeration that determines what the blob holds. There are four
different types of objects stored.
- **ledger**
A ledger header.
- **transaction**
A signed transaction.
- **account node**
A node in a ledger's account state tree.
- **transaction node**
A node in a ledger's transaction tree.
- `mHash`
A 256-bit hash of the blob.
- `mData`
A blob containing the payload. Stored in the following format.
| Byte | | |
| :------ | :----- | :------------------------- |
| 0...7 | unused | |
| 8 | type | NodeObjectType enumeration |
| 9...end | data | body of the object data |
---
The `NodeStore` provides an interface that stores, in a persistent database, a
collection of NodeObjects that rippled uses as its primary representation of
ledger entries. All ledger entries are stored as NodeObjects and as such, need
to be persisted between launches. If a NodeObject is accessed and is not in
memory, it will be retrieved from the database.
## Backend
The `NodeStore` implementation provides the `Backend` abstract interface,
which lets different key/value databases to be chosen at run-time. This allows
experimentation with different engines. Improvements in the performance of the
NodeStore are a constant area of research. The database can be specified in
the configuration file [node_db] section as follows.
One or more lines of key / value pairs
Example:
```
type=RocksDB
path=rocksdb
compression=1
```
Choices for 'type' (not case-sensitive)
- **HyperLevelDB**
An improved version of LevelDB (preferred).
- **LevelDB**
Google's LevelDB database (deprecated).
- **none**
Use no backend.
- **RocksDB**
Facebook's RocksDB database, builds on LevelDB.
- **SQLite**
Use SQLite.
'path' speficies where the backend will store its data files.
Choices for 'compression'
- **0** off
- **1** on (default)
# Benchmarks
The `NodeStore.Timing` test is used to execute a set of read/write workloads to
compare current available nodestore backends. It can be executed with:
```
$rippled --unittest=NodeStoreTiming
```
It is also possible to use alternate DB config params by passing config strings
as `--unittest-arg`.
## Addendum
The discussion below refers to a `RocksDBQuick` backend that has since been
removed from the code as it was not working and not maintained. That backend
primarily used one of the several rocks `Optimize*` methods to setup the
majority of the DB options/params, whereas the primary RocksDB backend exposes
many of the available config options directly. The code for RocksDBQuick can be
found in versions of this repo 1.2 and earlier if you need to refer back to it.
The conclusions below date from about 2014 and may need revisiting based on
newer versions of RocksDB (TBD).
## Discussion
RocksDBQuickFactory is intended to provide a testbed for comparing potential
rocksdb performance with the existing recommended configuration in rippled.cfg.
Through various executions and profiling some conclusions are presented below.
- If the write ahead log is enabled, insert speed soon clogs up under load. The
BatchWriter class intends to stop this from blocking the main threads by queuing
up writes and running them in a separate thread. However, rocksdb already has
separate threads dedicated to flushing the memtable to disk and the memtable is
itself an in-memory queue. The result is two queues with a guarantee of
durability in between. However if the memtable was used as the sole queue and
the rocksdb::Flush() call was manually triggered at opportune moments, possibly
just after ledger close, then that would provide similar, but more predictable
guarantees. It would also remove an unneeded thread and unnecessary memory
usage. An alternative point of view is that because there will always be many
other rippled instances running there is no need for such guarantees. The nodes
will always be available from another peer.
- Lookup in a block was previously using binary search. With rippled's use case
it is highly unlikely that two adjacent key/values will ever be requested one
after the other. Therefore hash indexing of blocks makes much more sense.
Rocksdb has a number of options for hash indexing both memtables and blocks and
these need more testing to find the best choice.
- The current Database implementation has two forms of caching, so the LRU cache
of blocks at Factory level does not make any sense. However, if the hash
indexing and potentially the new [bloom
filter](http://rocksdb.org/blog/1427/new-bloom-filter-format/) can provide
faster lookup for non-existent keys, then potentially the caching could exist at
Factory level.
- Multiple runs of the benchmarks can yield surprisingly different results. This
can perhaps be attributed to the asynchronous nature of rocksdb's compaction
process. The benchmarks are artifical and create highly unlikely write load to
create the dataset to measure different read access patterns. Therefore multiple
runs of the benchmarks are required to get a feel for the effectiveness of the
changes. This contrasts sharply with the keyvadb benchmarking were highly
repeatable timings were discovered. Also realistically sized datasets are
required to get a correct insight. The number of 2,000,000 key/values (actually
4,000,000 after the two insert benchmarks complete) is too low to get a full
picture.
- An interesting side effect of running the benchmarks in a profiler was that a
clear pattern of what RocksDB does under the hood was observable. This led to
the decision to trial hash indexing and also the discovery of the native CRC32
instruction not being used.
- Important point to note that is if this factory is tested with an existing set
of sst files none of the old sst files will benefit from indexing changes until
they are compacted at a future point in time.

View File

@@ -0,0 +1,90 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_SCHEDULER_H_INCLUDED
#define RIPPLE_NODESTORE_SCHEDULER_H_INCLUDED
#include <xrpl/nodestore/Task.h>
#include <chrono>
namespace ripple {
namespace NodeStore {
enum class FetchType { synchronous, async };
/** Contains information about a fetch operation. */
struct FetchReport
{
explicit FetchReport(FetchType fetchType_) : fetchType(fetchType_)
{
}
std::chrono::milliseconds elapsed;
FetchType const fetchType;
bool wasFound = false;
};
/** Contains information about a batch write operation. */
struct BatchWriteReport
{
explicit BatchWriteReport() = default;
std::chrono::milliseconds elapsed;
int writeCount;
};
/** Scheduling for asynchronous backend activity
For improved performance, a backend has the option of performing writes
in batches. These writes can be scheduled using the provided scheduler
object.
@see BatchWriter
*/
class Scheduler
{
public:
virtual ~Scheduler() = default;
/** Schedules a task.
Depending on the implementation, the task may be invoked either on
the current thread of execution, or an unspecified
implementation-defined foreign thread.
*/
virtual void
scheduleTask(Task& task) = 0;
/** Reports completion of a fetch
Allows the scheduler to monitor the node store's performance
*/
virtual void
onFetch(FetchReport const& report) = 0;
/** Reports the completion of a batch write
Allows the scheduler to monitor the node store's performance
*/
virtual void
onBatchWrite(BatchWriteReport const& report) = 0;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,41 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_TASK_H_INCLUDED
#define RIPPLE_NODESTORE_TASK_H_INCLUDED
namespace ripple {
namespace NodeStore {
/** Derived classes perform scheduled tasks. */
struct Task
{
virtual ~Task() = default;
/** Performs the task.
The call may take place on a foreign thread.
*/
virtual void
performScheduledTask() = 0;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,61 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_TYPES_H_INCLUDED
#define RIPPLE_NODESTORE_TYPES_H_INCLUDED
#include <xrpl/nodestore/NodeObject.h>
#include <vector>
namespace ripple {
namespace NodeStore {
enum {
// This is only used to pre-allocate the array for
// batch objects and does not affect the amount written.
//
batchWritePreallocationSize = 256,
// This sets a limit on the maximum number of writes
// in a batch. Actual usage can be twice this since
// we have a new batch growing as we write the old.
//
batchWriteLimitSize = 65536
};
/** Return codes from Backend operations. */
enum Status {
ok,
notFound,
dataCorrupt,
unknown,
backendError,
customCode = 100
};
/** A batch of NodeObjects to write at once. */
using Batch = std::vector<std::shared_ptr<NodeObject>>;
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,102 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_BATCHWRITER_H_INCLUDED
#define RIPPLE_NODESTORE_BATCHWRITER_H_INCLUDED
#include <xrpl/nodestore/Scheduler.h>
#include <xrpl/nodestore/Task.h>
#include <xrpl/nodestore/Types.h>
#include <condition_variable>
#include <mutex>
namespace ripple {
namespace NodeStore {
/** Batch-writing assist logic.
The batch writes are performed with a scheduled task. Use of the
class it not required. A backend can implement its own write batching,
or skip write batching if doing so yields a performance benefit.
@see Scheduler
*/
class BatchWriter : private Task
{
public:
/** This callback does the actual writing. */
struct Callback
{
virtual ~Callback() = default;
Callback() = default;
Callback(Callback const&) = delete;
Callback&
operator=(Callback const&) = delete;
virtual void
writeBatch(Batch const& batch) = 0;
};
/** Create a batch writer. */
BatchWriter(Callback& callback, Scheduler& scheduler);
/** Destroy a batch writer.
Anything pending in the batch is written out before this returns.
*/
~BatchWriter();
/** Store the object.
This will add to the batch and initiate a scheduled task to
write the batch out.
*/
void
store(std::shared_ptr<NodeObject> const& object);
/** Get an estimate of the amount of writing I/O pending. */
int
getWriteLoad();
private:
void
performScheduledTask() override;
void
writeBatch();
void
waitForWriting();
private:
using LockType = std::recursive_mutex;
using CondvarType = std::condition_variable_any;
Callback& m_callback;
Scheduler& m_scheduler;
LockType mWriteMutex;
CondvarType mWriteCondition;
int mWriteLoad;
bool mWritePending;
Batch mWriteSet;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,162 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_DATABASENODEIMP_H_INCLUDED
#define RIPPLE_NODESTORE_DATABASENODEIMP_H_INCLUDED
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/basics/chrono.h>
#include <xrpl/nodestore/Database.h>
namespace ripple {
namespace NodeStore {
class DatabaseNodeImp : public Database
{
public:
DatabaseNodeImp() = delete;
DatabaseNodeImp(DatabaseNodeImp const&) = delete;
DatabaseNodeImp&
operator=(DatabaseNodeImp const&) = delete;
DatabaseNodeImp(
Scheduler& scheduler,
int readThreads,
std::shared_ptr<Backend> backend,
Section const& config,
beast::Journal j)
: Database(scheduler, readThreads, config, j)
, backend_(std::move(backend))
{
std::optional<int> cacheSize, cacheAge;
if (config.exists("cache_size"))
{
cacheSize = get<int>(config, "cache_size");
if (cacheSize.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_size");
}
}
if (config.exists("cache_age"))
{
cacheAge = get<int>(config, "cache_age");
if (cacheAge.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_age");
}
}
if (cacheSize != 0 || cacheAge != 0)
{
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
"DatabaseNodeImp",
cacheSize.value_or(0),
std::chrono::minutes(cacheAge.value_or(0)),
stopwatch(),
j);
}
XRPL_ASSERT(
backend_,
"ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
"backend");
}
~DatabaseNodeImp()
{
stop();
}
std::string
getName() const override
{
return backend_->getName();
}
std::int32_t
getWriteLoad() const override
{
return backend_->getWriteLoad();
}
void
importDatabase(Database& source) override
{
importInternal(*backend_.get(), source);
}
void
store(NodeObjectType type, Blob&& data, uint256 const& hash, std::uint32_t)
override;
bool
isSameDB(std::uint32_t, std::uint32_t) override
{
// only one database
return true;
}
void
sync() override
{
backend_->sync();
}
std::vector<std::shared_ptr<NodeObject>>
fetchBatch(std::vector<uint256> const& hashes);
void
asyncFetch(
uint256 const& hash,
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
override;
void
sweep() override;
private:
// Cache for database objects. This cache is not always initialized. Check
// for null before using.
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
// Persistent key/value storage
std::shared_ptr<Backend> backend_;
std::shared_ptr<NodeObject>
fetchNodeObject(
uint256 const& hash,
std::uint32_t,
FetchReport& fetchReport,
bool duplicate) override;
void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
{
backend_->for_each(f);
}
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,103 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_DATABASEROTATINGIMP_H_INCLUDED
#define RIPPLE_NODESTORE_DATABASEROTATINGIMP_H_INCLUDED
#include <xrpl/nodestore/DatabaseRotating.h>
#include <mutex>
namespace ripple {
namespace NodeStore {
class DatabaseRotatingImp : public DatabaseRotating
{
public:
DatabaseRotatingImp() = delete;
DatabaseRotatingImp(DatabaseRotatingImp const&) = delete;
DatabaseRotatingImp&
operator=(DatabaseRotatingImp const&) = delete;
DatabaseRotatingImp(
Scheduler& scheduler,
int readThreads,
std::shared_ptr<Backend> writableBackend,
std::shared_ptr<Backend> archiveBackend,
Section const& config,
beast::Journal j);
~DatabaseRotatingImp()
{
stop();
}
void
rotate(
std::unique_ptr<NodeStore::Backend>&& newBackend,
std::function<void(
std::string const& writableName,
std::string const& archiveName)> const& f) override;
std::string
getName() const override;
std::int32_t
getWriteLoad() const override;
void
importDatabase(Database& source) override;
bool
isSameDB(std::uint32_t, std::uint32_t) override
{
// rotating store acts as one logical database
return true;
}
void
store(NodeObjectType type, Blob&& data, uint256 const& hash, std::uint32_t)
override;
void
sync() override;
void
sweep() override;
private:
std::shared_ptr<Backend> writableBackend_;
std::shared_ptr<Backend> archiveBackend_;
mutable std::mutex mutex_;
std::shared_ptr<NodeObject>
fetchNodeObject(
uint256 const& hash,
std::uint32_t,
FetchReport& fetchReport,
bool duplicate) override;
void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,67 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_DECODEDBLOB_H_INCLUDED
#define RIPPLE_NODESTORE_DECODEDBLOB_H_INCLUDED
#include <xrpl/nodestore/NodeObject.h>
namespace ripple {
namespace NodeStore {
/** Parsed key/value blob into NodeObject components.
This will extract the information required to construct a NodeObject. It
also does consistency checking and returns the result, so it is possible
to determine if the data is corrupted without throwing an exception. Not
all forms of corruption are detected so further analysis will be needed
to eliminate false negatives.
@note This defines the database format of a NodeObject!
*/
class DecodedBlob
{
public:
/** Construct the decoded blob from raw data. */
DecodedBlob(void const* key, void const* value, int valueBytes);
/** Determine if the decoding was successful. */
bool
wasOk() const noexcept
{
return m_success;
}
/** Create a NodeObject from this data. */
std::shared_ptr<NodeObject>
createObject();
private:
bool m_success;
void const* m_key;
NodeObjectType m_objectType;
unsigned char const* m_objectData;
int m_dataBytes;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,138 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_ENCODEDBLOB_H_INCLUDED
#define RIPPLE_NODESTORE_ENCODEDBLOB_H_INCLUDED
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/nodestore/NodeObject.h>
#include <boost/align/align_up.hpp>
#include <algorithm>
#include <array>
#include <cstdint>
namespace ripple {
namespace NodeStore {
/** Convert a NodeObject from in-memory to database format.
The (suboptimal) database format consists of:
- 8 prefix bytes which will typically be 0, but don't assume that's the
case; earlier versions of the code would use these bytes to store the
ledger index either once or twice.
- A single byte denoting the type of the object.
- The payload.
@note This class is typically instantiated on the stack, so the size of
the object does not matter as much as it normally would since the
allocation is, effectively, free.
We leverage that fact to preallocate enough memory to handle most
payloads as part of this object, eliminating the need for dynamic
allocation. As of this writing ~94% of objects require fewer than
1024 payload bytes.
*/
class EncodedBlob
{
/** The 32-byte key of the serialized object. */
std::array<std::uint8_t, 32> key_;
/** A pre-allocated buffer for the serialized object.
The buffer is large enough for the 9 byte prefix and at least
1024 more bytes. The precise size is calculated automatically
at compile time so as to avoid wasting space on padding bytes.
*/
std::array<
std::uint8_t,
boost::alignment::align_up(9 + 1024, alignof(std::uint32_t))>
payload_;
/** The size of the serialized data. */
std::uint32_t size_;
/** A pointer to the serialized data.
This may point to the pre-allocated buffer (if it is sufficiently
large) or to a dynamically allocated buffer.
*/
std::uint8_t* const ptr_;
public:
explicit EncodedBlob(std::shared_ptr<NodeObject> const& obj)
: size_([&obj]() {
XRPL_ASSERT(
obj,
"ripple::NodeStore::EncodedBlob::EncodedBlob : non-null input");
if (!obj)
throw std::runtime_error(
"EncodedBlob: unseated std::shared_ptr used.");
return obj->getData().size() + 9;
}())
, ptr_(
(size_ <= payload_.size()) ? payload_.data()
: new std::uint8_t[size_])
{
std::fill_n(ptr_, 8, std::uint8_t{0});
ptr_[8] = static_cast<std::uint8_t>(obj->getType());
std::copy_n(obj->getData().data(), obj->getData().size(), ptr_ + 9);
std::copy_n(obj->getHash().data(), obj->getHash().size(), key_.data());
}
~EncodedBlob()
{
XRPL_ASSERT(
((ptr_ == payload_.data()) && (size_ <= payload_.size())) ||
((ptr_ != payload_.data()) && (size_ > payload_.size())),
"ripple::NodeStore::EncodedBlob::~EncodedBlob : valid payload "
"pointer");
if (ptr_ != payload_.data())
delete[] ptr_;
}
[[nodiscard]] void const*
getKey() const noexcept
{
return static_cast<void const*>(key_.data());
}
[[nodiscard]] std::size_t
getSize() const noexcept
{
return size_;
}
[[nodiscard]] void const*
getData() const noexcept
{
return static_cast<void const*>(ptr_);
}
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,74 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_MANAGERIMP_H_INCLUDED
#define RIPPLE_NODESTORE_MANAGERIMP_H_INCLUDED
#include <xrpl/nodestore/Manager.h>
namespace ripple {
namespace NodeStore {
class ManagerImp : public Manager
{
private:
std::mutex mutex_;
std::vector<Factory*> list_;
public:
static ManagerImp&
instance();
static void
missing_backend();
ManagerImp();
~ManagerImp() = default;
Factory*
find(std::string const& name) override;
void
insert(Factory& factory) override;
void
erase(Factory& factory) override;
std::unique_ptr<Backend>
make_Backend(
Section const& parameters,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override;
std::unique_ptr<Database>
make_Database(
std::size_t burstSize,
Scheduler& scheduler,
int readThreads,
Section const& config,
beast::Journal journal) override;
};
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,346 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_NODESTORE_CODEC_H_INCLUDED
#define RIPPLE_NODESTORE_CODEC_H_INCLUDED
// Disable lz4 deprecation warning due to incompatibility with clang attributes
#define LZ4_DISABLE_DEPRECATE_WARNINGS
#include <xrpl/basics/contract.h>
#include <xrpl/basics/safe_cast.h>
#include <xrpl/nodestore/NodeObject.h>
#include <xrpl/nodestore/detail/varint.h>
#include <xrpl/protocol/HashPrefix.h>
#include <nudb/detail/field.hpp>
#include <lz4.h>
#include <cstddef>
#include <cstring>
#include <string>
namespace ripple {
namespace NodeStore {
template <class BufferFactory>
std::pair<void const*, std::size_t>
lz4_decompress(void const* in, std::size_t in_size, BufferFactory&& bf)
{
if (static_cast<int>(in_size) < 0)
Throw<std::runtime_error>("lz4_decompress: integer overflow (input)");
std::size_t outSize = 0;
auto const n = read_varint(
reinterpret_cast<std::uint8_t const*>(in), in_size, outSize);
if (n == 0 || n >= in_size)
Throw<std::runtime_error>("lz4_decompress: invalid blob");
if (static_cast<int>(outSize) <= 0)
Throw<std::runtime_error>("lz4_decompress: integer overflow (output)");
void* const out = bf(outSize);
if (LZ4_decompress_safe(
reinterpret_cast<char const*>(in) + n,
reinterpret_cast<char*>(out),
static_cast<int>(in_size - n),
static_cast<int>(outSize)) != static_cast<int>(outSize))
Throw<std::runtime_error>("lz4_decompress: LZ4_decompress_safe");
return {out, outSize};
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
lz4_compress(void const* in, std::size_t in_size, BufferFactory&& bf)
{
using std::runtime_error;
using namespace nudb::detail;
std::pair<void const*, std::size_t> result;
std::array<std::uint8_t, varint_traits<std::size_t>::max> vi;
auto const n = write_varint(vi.data(), in_size);
auto const out_max = LZ4_compressBound(in_size);
std::uint8_t* out = reinterpret_cast<std::uint8_t*>(bf(n + out_max));
result.first = out;
std::memcpy(out, vi.data(), n);
auto const out_size = LZ4_compress_default(
reinterpret_cast<char const*>(in),
reinterpret_cast<char*>(out + n),
in_size,
out_max);
if (out_size == 0)
Throw<std::runtime_error>("lz4 compress");
result.second = n + out_size;
return result;
}
//------------------------------------------------------------------------------
/*
object types:
0 = Uncompressed
1 = lz4 compressed
2 = inner node compressed
3 = full inner node
*/
template <class BufferFactory>
std::pair<void const*, std::size_t>
nodeobject_decompress(void const* in, std::size_t in_size, BufferFactory&& bf)
{
using namespace nudb::detail;
std::uint8_t const* p = reinterpret_cast<std::uint8_t const*>(in);
std::size_t type;
auto const vn = read_varint(p, in_size, type);
if (vn == 0)
Throw<std::runtime_error>("nodeobject decompress");
p += vn;
in_size -= vn;
std::pair<void const*, std::size_t> result;
switch (type)
{
case 0: // uncompressed
{
result.first = p;
result.second = in_size;
break;
}
case 1: // lz4
{
result = lz4_decompress(p, in_size, bf);
break;
}
case 2: // compressed v1 inner node
{
auto const hs = field<std::uint16_t>::size; // Mask
if (in_size < hs + 32)
Throw<std::runtime_error>(
"nodeobject codec v1: short inner node size: " +
std::string("in_size = ") + std::to_string(in_size) +
" hs = " + std::to_string(hs));
istream is(p, in_size);
std::uint16_t mask;
read<std::uint16_t>(is, mask); // Mask
in_size -= hs;
result.second = 525;
void* const out = bf(result.second);
result.first = out;
ostream os(out, result.second);
write<std::uint32_t>(os, 0);
write<std::uint32_t>(os, 0);
write<std::uint8_t>(os, hotUNKNOWN);
write<std::uint32_t>(
os, static_cast<std::uint32_t>(HashPrefix::innerNode));
if (mask == 0)
Throw<std::runtime_error>(
"nodeobject codec v1: empty inner node");
std::uint16_t bit = 0x8000;
for (int i = 16; i--; bit >>= 1)
{
if (mask & bit)
{
if (in_size < 32)
Throw<std::runtime_error>(
"nodeobject codec v1: short inner node subsize: " +
std::string("in_size = ") +
std::to_string(in_size) +
" i = " + std::to_string(i));
std::memcpy(os.data(32), is(32), 32);
in_size -= 32;
}
else
{
std::memset(os.data(32), 0, 32);
}
}
if (in_size > 0)
Throw<std::runtime_error>(
"nodeobject codec v1: long inner node, in_size = " +
std::to_string(in_size));
break;
}
case 3: // full v1 inner node
{
if (in_size != 16 * 32) // hashes
Throw<std::runtime_error>(
"nodeobject codec v1: short full inner node, in_size = " +
std::to_string(in_size));
istream is(p, in_size);
result.second = 525;
void* const out = bf(result.second);
result.first = out;
ostream os(out, result.second);
write<std::uint32_t>(os, 0);
write<std::uint32_t>(os, 0);
write<std::uint8_t>(os, hotUNKNOWN);
write<std::uint32_t>(
os, static_cast<std::uint32_t>(HashPrefix::innerNode));
write(os, is(512), 512);
break;
}
default:
Throw<std::runtime_error>(
"nodeobject codec: bad type=" + std::to_string(type));
};
return result;
}
template <class = void>
void const*
zero32()
{
static std::array<char, 32> v{};
return v.data();
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
nodeobject_compress(void const* in, std::size_t in_size, BufferFactory&& bf)
{
using std::runtime_error;
using namespace nudb::detail;
// Check for inner node v1
if (in_size == 525)
{
istream is(in, in_size);
std::uint32_t index;
std::uint32_t unused;
std::uint8_t kind;
std::uint32_t prefix;
read<std::uint32_t>(is, index);
read<std::uint32_t>(is, unused);
read<std::uint8_t>(is, kind);
read<std::uint32_t>(is, prefix);
if (safe_cast<HashPrefix>(prefix) == HashPrefix::innerNode)
{
std::size_t n = 0;
std::uint16_t mask = 0;
std::array<std::uint8_t, 512> vh;
for (unsigned bit = 0x8000; bit; bit >>= 1)
{
void const* const h = is(32);
if (std::memcmp(h, zero32(), 32) == 0)
continue;
std::memcpy(vh.data() + 32 * n, h, 32);
mask |= bit;
++n;
}
std::pair<void const*, std::size_t> result;
if (n < 16)
{
// 2 = v1 inner node compressed
auto const type = 2U;
auto const vs = size_varint(type);
result.second = vs + field<std::uint16_t>::size + // mask
n * 32; // hashes
std::uint8_t* out =
reinterpret_cast<std::uint8_t*>(bf(result.second));
result.first = out;
ostream os(out, result.second);
write<varint>(os, type);
write<std::uint16_t>(os, mask);
write(os, vh.data(), n * 32);
return result;
}
// 3 = full v1 inner node
auto const type = 3U;
auto const vs = size_varint(type);
result.second = vs + n * 32; // hashes
std::uint8_t* out =
reinterpret_cast<std::uint8_t*>(bf(result.second));
result.first = out;
ostream os(out, result.second);
write<varint>(os, type);
write(os, vh.data(), n * 32);
return result;
}
}
std::array<std::uint8_t, varint_traits<std::size_t>::max> vi;
constexpr std::size_t codecType = 1;
auto const vn = write_varint(vi.data(), codecType);
std::pair<void const*, std::size_t> result;
switch (codecType)
{
// case 0 was uncompressed data; we always compress now.
case 1: // lz4
{
std::uint8_t* p;
auto const lzr = NodeStore::lz4_compress(
in, in_size, [&p, &vn, &bf](std::size_t n) {
p = reinterpret_cast<std::uint8_t*>(bf(vn + n));
return p + vn;
});
std::memcpy(p, vi.data(), vn);
result.first = p;
result.second = vn + lzr.second;
break;
}
default:
Throw<std::logic_error>(
"nodeobject codec: unknown=" + std::to_string(codecType));
};
return result;
}
// Modifies an inner node to erase the ledger
// sequence and type information so the codec
// verification can pass.
//
template <class = void>
void
filter_inner(void* in, std::size_t in_size)
{
using namespace nudb::detail;
// Check for inner node
if (in_size == 525)
{
istream is(in, in_size);
std::uint32_t index;
std::uint32_t unused;
std::uint8_t kind;
std::uint32_t prefix;
read<std::uint32_t>(is, index);
read<std::uint32_t>(is, unused);
read<std::uint8_t>(is, kind);
read<std::uint32_t>(is, prefix);
if (safe_cast<HashPrefix>(prefix) == HashPrefix::innerNode)
{
ostream os(in, 9);
write<std::uint32_t>(os, 0);
write<std::uint32_t>(os, 0);
write<std::uint8_t>(os, hotUNKNOWN);
}
}
}
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -0,0 +1,142 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_VARINT_H_INCLUDED
#define BEAST_NUDB_VARINT_H_INCLUDED
#include <nudb/detail/stream.hpp>
#include <cstdint>
#include <type_traits>
namespace ripple {
namespace NodeStore {
// This is a variant of the base128 varint format from
// google protocol buffers:
// https://developers.google.com/protocol-buffers/docs/encoding#varints
// field tag
struct varint;
// Metafuncton to return largest
// possible size of T represented as varint.
// T must be unsigned
template <class T, bool = std::is_unsigned<T>::value>
struct varint_traits;
template <class T>
struct varint_traits<T, true>
{
explicit varint_traits() = default;
static std::size_t constexpr max = (8 * sizeof(T) + 6) / 7;
};
// Returns: Number of bytes consumed or 0 on error,
// if the buffer was too small or t overflowed.
//
template <class = void>
std::size_t
read_varint(void const* buf, std::size_t buflen, std::size_t& t)
{
if (buflen == 0)
return 0;
t = 0;
std::uint8_t const* p = reinterpret_cast<std::uint8_t const*>(buf);
std::size_t n = 0;
while (p[n] & 0x80)
if (++n >= buflen)
return 0;
if (++n > buflen)
return 0;
// Special case for 0
if (n == 1 && *p == 0)
{
t = 0;
return 1;
}
auto const used = n;
while (n--)
{
auto const d = p[n];
auto const t0 = t;
t *= 127;
t += d & 0x7f;
if (t <= t0)
return 0; // overflow
}
return used;
}
template <class T, std::enable_if_t<std::is_unsigned<T>::value>* = nullptr>
std::size_t
size_varint(T v)
{
std::size_t n = 0;
do
{
v /= 127;
++n;
} while (v != 0);
return n;
}
template <class = void>
std::size_t
write_varint(void* p0, std::size_t v)
{
std::uint8_t* p = reinterpret_cast<std::uint8_t*>(p0);
do
{
std::uint8_t d = v % 127;
v /= 127;
if (v != 0)
d |= 0x80;
*p++ = d;
} while (v != 0);
return p - reinterpret_cast<std::uint8_t*>(p0);
}
// input stream
template <class T, std::enable_if_t<std::is_same<T, varint>::value>* = nullptr>
void
read(nudb::detail::istream& is, std::size_t& u)
{
auto p0 = is(1);
auto p1 = p0;
while (*p1++ & 0x80)
is(1);
read_varint(p0, p1 - p0, u);
}
// output stream
template <class T, std::enable_if_t<std::is_same<T, varint>::value>* = nullptr>
void
write(nudb::detail::ostream& os, std::size_t t)
{
write_varint(os.data(size_varint(t)), t);
}
} // namespace NodeStore
} // namespace ripple
#endif

View File

@@ -73,14 +73,8 @@ static constexpr std::uint32_t XRP_LEDGER_EARLIEST_SEQ{32570u};
* used in asserts and tests. */
static constexpr std::uint32_t XRP_LEDGER_EARLIEST_FEES{562177u};
/** The minimum amount of support an amendment should have.
@note This value is used by legacy code and will become obsolete
once the fixAmendmentMajorityCalc amendment activates.
*/
constexpr std::ratio<204, 256> preFixAmendmentMajorityCalcThreshold;
constexpr std::ratio<80, 100> postFixAmendmentMajorityCalcThreshold;
/** The minimum amount of support an amendment should have. */
constexpr std::ratio<80, 100> amendmentMajorityCalcThreshold;
/** The minimum amount of time an amendment must hold a majority */
constexpr std::chrono::seconds const defaultAmendmentMajorityTime = weeks{2};

View File

@@ -225,8 +225,9 @@ enum TERcodes : TERUnderlyingType {
terQUEUED, // Transaction is being held in TxQ until fee drops
terPRE_TICKET, // Ticket is not yet in ledger but might be on its way
terNO_AMM, // AMM doesn't exist for the asset pair
terADDRESS_COLLISION, // Failed to allocate AccountID when trying to
// create a pseudo-account
terADDRESS_COLLISION, // Failed to allocate AccountID when trying to
// create a pseudo-account
terNO_DELEGATE_PERMISSION, // Delegate does not have permission
};
//------------------------------------------------------------------------------
@@ -361,6 +362,9 @@ enum TECcodes : TERUnderlyingType {
tecLIMIT_EXCEEDED = 195,
tecPSEUDO_ACCOUNT = 196,
tecPRECISION_LOSS = 197,
// DEPRECATED: This error code tecNO_DELEGATE_PERMISSION is reserved for
// backward compatibility with historical data on non-prod networks, can be
// reclaimed after those networks reset.
tecNO_DELEGATE_PERMISSION = 198,
};

View File

@@ -33,51 +33,35 @@ namespace ripple {
class TxMeta
{
private:
struct CtorHelper
{
explicit CtorHelper() = default;
};
template <class T>
TxMeta(
uint256 const& txID,
std::uint32_t ledger,
T const& data,
CtorHelper);
public:
TxMeta(
uint256 const& transactionID,
std::uint32_t ledger,
std::optional<uint256> parentBatchId = std::nullopt);
TxMeta(uint256 const& transactionID, std::uint32_t ledger);
TxMeta(uint256 const& txID, std::uint32_t ledger, Blob const&);
TxMeta(uint256 const& txID, std::uint32_t ledger, std::string const&);
TxMeta(uint256 const& txID, std::uint32_t ledger, STObject const&);
uint256 const&
getTxID() const
{
return mTransactionID;
return transactionID_;
}
std::uint32_t
getLgrSeq() const
{
return mLedger;
return ledgerSeq_;
}
int
getResult() const
{
return mResult;
return result_;
}
TER
getResultTER() const
{
return TER::fromInt(mResult);
return TER::fromInt(result_);
}
std::uint32_t
getIndex() const
{
return mIndex;
return index_;
}
void
@@ -104,66 +88,52 @@ public:
STArray&
getNodes()
{
return (mNodes);
return nodes_;
}
STArray const&
getNodes() const
{
return (mNodes);
return nodes_;
}
void
setDeliveredAmount(STAmount const& delivered)
setAdditionalFields(STObject const& obj)
{
mDelivered = delivered;
if (obj.isFieldPresent(sfDeliveredAmount))
deliveredAmount_ = obj.getFieldAmount(sfDeliveredAmount);
if (obj.isFieldPresent(sfParentBatchID))
parentBatchID_ = obj.getFieldH256(sfParentBatchID);
}
STAmount
std::optional<STAmount> const&
getDeliveredAmount() const
{
XRPL_ASSERT(
hasDeliveredAmount(),
"ripple::TxMeta::getDeliveredAmount : non-null delivered amount");
return *mDelivered;
}
bool
hasDeliveredAmount() const
{
return static_cast<bool>(mDelivered);
return deliveredAmount_;
}
void
setParentBatchId(uint256 const& parentBatchId)
setDeliveredAmount(std::optional<STAmount> const& amount)
{
mParentBatchId = parentBatchId;
deliveredAmount_ = amount;
}
uint256
getParentBatchId() const
void
setParentBatchID(std::optional<uint256> const& id)
{
XRPL_ASSERT(
hasParentBatchId(),
"ripple::TxMeta::getParentBatchId : non-null batch id");
return *mParentBatchId;
}
bool
hasParentBatchId() const
{
return static_cast<bool>(mParentBatchId);
parentBatchID_ = id;
}
private:
uint256 mTransactionID;
std::uint32_t mLedger;
std::uint32_t mIndex;
int mResult;
uint256 transactionID_;
std::uint32_t ledgerSeq_;
std::uint32_t index_;
int result_;
std::optional<STAmount> mDelivered;
std::optional<uint256> mParentBatchId;
std::optional<STAmount> deliveredAmount_;
std::optional<uint256> parentBatchID_;
STArray mNodes;
STArray nodes_;
};
} // namespace ripple

View File

@@ -33,11 +33,11 @@
// Keep it sorted in reverse chronological order.
XRPL_FEATURE(LendingProtocol, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
@@ -47,7 +47,6 @@ XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
@@ -81,13 +80,11 @@ XRPL_FIX (DisallowIncomingV1, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(XChainBridge, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(AMM, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (ReducedOffersV1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (NFTokenRemint, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (NonFungibleTokensV1_2, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(DisallowIncoming, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(ImmediateOfferKilled, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (TrustLinesToSelf, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(NonFungibleTokensV1_1, Supported::yes, VoteBehavior::DefaultNo)
@@ -96,12 +93,10 @@ XRPL_FEATURE(CheckCashMakesTrustLine, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(TicketBatch, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(NegativeUNL, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (AmendmentMajorityCalc, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(HardenedValidations, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (PayChanRecipientOwnerDir, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (MasterKeyAsRegularKey, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(MultiSignReserve, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DepositPreauth, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
@@ -143,8 +138,11 @@ XRPL_RETIRE(fix1571)
XRPL_RETIRE(fix1578)
XRPL_RETIRE(fix1623)
XRPL_RETIRE(fix1781)
XRPL_RETIRE(fixAmendmentMajorityCalc)
XRPL_RETIRE(fixCheckThreading)
XRPL_RETIRE(fixMasterKeyAsRegularKey)
XRPL_RETIRE(fixQualityUpperBound)
XRPL_RETIRE(fixReducedOffersV1)
XRPL_RETIRE(fixRmSmallIncreasedQOffers)
XRPL_RETIRE(fixSTAmountCanonicalize)
XRPL_RETIRE(fixTakerDryOfferRemoval)
@@ -153,6 +151,7 @@ XRPL_RETIRE(Escrow)
XRPL_RETIRE(EnforceInvariants)
XRPL_RETIRE(FeeEscalation)
XRPL_RETIRE(FlowCross)
XRPL_RETIRE(ImmediateOfferKilled)
XRPL_RETIRE(MultiSign)
XRPL_RETIRE(PayChan)
XRPL_RETIRE(SortedDirectories)

View File

@@ -316,7 +316,7 @@ TRANSACTION(ttTRUST_SET, 20, TrustSet,
#endif
TRANSACTION(ttACCOUNT_DELETE, 21, AccountDelete,
Delegation::notDelegatable,
uint256{},
featureDeletableAccounts,
mustDeleteAcct,
({
{sfDestination, soeREQUIRED},
@@ -837,7 +837,7 @@ TRANSACTION(ttPERMISSIONED_DOMAIN_DELETE, 63, PermissionedDomainDelete,
#endif
TRANSACTION(ttDELEGATE_SET, 64, DelegateSet,
Delegation::notDelegatable,
featurePermissionDelegation,
featurePermissionDelegationV1_1,
noPriv,
({
{sfAuthorize, soeREQUIRED},

View File

@@ -0,0 +1,89 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2015 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_FAMILY_H_INCLUDED
#define RIPPLE_SHAMAP_FAMILY_H_INCLUDED
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/nodestore/Database.h>
#include <xrpl/shamap/FullBelowCache.h>
#include <xrpl/shamap/TreeNodeCache.h>
#include <cstdint>
namespace ripple {
class Family
{
public:
Family(Family const&) = delete;
Family(Family&&) = delete;
Family&
operator=(Family const&) = delete;
Family&
operator=(Family&&) = delete;
explicit Family() = default;
virtual ~Family() = default;
virtual NodeStore::Database&
db() = 0;
virtual NodeStore::Database const&
db() const = 0;
virtual beast::Journal const&
journal() = 0;
/** Return a pointer to the Family Full Below Cache */
virtual std::shared_ptr<FullBelowCache>
getFullBelowCache() = 0;
/** Return a pointer to the Family Tree Node Cache */
virtual std::shared_ptr<TreeNodeCache>
getTreeNodeCache() = 0;
virtual void
sweep() = 0;
/** Acquire ledger that has a missing node by ledger sequence
*
* @param refNum Sequence of ledger to acquire.
* @param nodeHash Hash of missing node to report in throw.
*/
virtual void
missingNodeAcquireBySeq(std::uint32_t refNum, uint256 const& nodeHash) = 0;
/** Acquire ledger that has a missing node by ledger hash
*
* @param refHash Hash of ledger to acquire.
* @param refNum Ledger sequence with missing node.
*/
virtual void
missingNodeAcquireByHash(uint256 const& refHash, std::uint32_t refNum) = 0;
virtual void
reset() = 0;
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,153 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_FULLBELOWCACHE_H_INCLUDED
#define RIPPLE_SHAMAP_FULLBELOWCACHE_H_INCLUDED
#include <xrpl/basics/KeyCache.h>
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/beast/insight/Collector.h>
#include <xrpl/beast/utility/Journal.h>
#include <atomic>
#include <string>
namespace ripple {
namespace detail {
/** Remembers which tree keys have all descendants resident.
This optimizes the process of acquiring a complete tree.
*/
class BasicFullBelowCache
{
private:
using CacheType = KeyCache;
public:
enum { defaultCacheTargetSize = 0 };
using key_type = uint256;
using clock_type = typename CacheType::clock_type;
/** Construct the cache.
@param name A label for diagnostics and stats reporting.
@param collector The collector to use for reporting stats.
@param targetSize The cache target size.
@param targetExpirationSeconds The expiration time for items.
*/
BasicFullBelowCache(
std::string const& name,
clock_type& clock,
beast::Journal j,
beast::insight::Collector::ptr const& collector =
beast::insight::NullCollector::New(),
std::size_t target_size = defaultCacheTargetSize,
std::chrono::seconds expiration = std::chrono::minutes{2})
: m_cache(name, target_size, expiration, clock, j, collector), m_gen(1)
{
}
/** Return the clock associated with the cache. */
clock_type&
clock()
{
return m_cache.clock();
}
/** Return the number of elements in the cache.
Thread safety:
Safe to call from any thread.
*/
std::size_t
size() const
{
return m_cache.size();
}
/** Remove expired cache items.
Thread safety:
Safe to call from any thread.
*/
void
sweep()
{
m_cache.sweep();
}
/** Refresh the last access time of an item, if it exists.
Thread safety:
Safe to call from any thread.
@param key The key to refresh.
@return `true` If the key exists.
*/
bool
touch_if_exists(key_type const& key)
{
return m_cache.touch_if_exists(key);
}
/** Insert a key into the cache.
If the key already exists, the last access time will still
be refreshed.
Thread safety:
Safe to call from any thread.
@param key The key to insert.
*/
void
insert(key_type const& key)
{
m_cache.insert(key);
}
/** generation determines whether cached entry is valid */
std::uint32_t
getGeneration(void) const
{
return m_gen;
}
void
clear()
{
m_cache.clear();
++m_gen;
}
void
reset()
{
m_cache.clear();
m_gen = 1;
}
private:
CacheType m_cache;
std::atomic<std::uint32_t> m_gen;
};
} // namespace detail
using FullBelowCache = detail::BasicFullBelowCache;
} // namespace ripple
#endif

View File

@@ -0,0 +1,336 @@
# SHAMap Introduction
March 2020
The `SHAMap` is a Merkle tree (http://en.wikipedia.org/wiki/Merkle_tree).
The `SHAMap` is also a radix trie of radix 16
(http://en.wikipedia.org/wiki/Radix_tree).
The Merkle trie data structure is important because subtrees and even the entire
tree can be compared with other trees in O(1) time by simply comparing the hashes.
This makes it very efficient to determine if two `SHAMap`s contain the same set of
transactions or account state modifications.
The radix trie property is helpful in that a key (hash) of a transaction
or account state can be used to navigate the trie.
A `SHAMap` is a trie with two node types:
1. SHAMapInnerNode
2. SHAMapLeafNode
Both of these nodes directly inherit from SHAMapTreeNode which holds data
common to both of the node types.
All non-leaf nodes have type SHAMapInnerNode.
All leaf nodes have type SHAMapLeafNode.
The root node is always a SHAMapInnerNode.
A given `SHAMap` always stores only one of three kinds of data:
- Transactions with metadata
- Transactions without metadata, or
- Account states.
So all of the leaf nodes of a particular `SHAMap` will always have a uniform type.
The inner nodes carry no data other than the hash of the nodes beneath them.
All nodes are owned by shared_ptrs resident in either other nodes, or in case of
the root node, a shared_ptr in the `SHAMap` itself. The use of shared_ptrs
permits more than one `SHAMap` at a time to share ownership of a node. This
occurs (for example), when a copy of a `SHAMap` is made.
Copies are made with the `snapShot` function as opposed to the `SHAMap` copy
constructor. See the section on `SHAMap` creation for more details about
`snapShot`.
Sequence numbers are used to further customize the node ownership strategy. See
the section on sequence numbers for details on sequence numbers.
![node diagram](https://user-images.githubusercontent.com/46455409/77350005-1ef12c80-6cf9-11ea-9c8d-56410f442859.png)
## Mutability
There are two different ways of building and using a `SHAMap`:
1. A mutable `SHAMap` and
2. An immutable `SHAMap`
The distinction here is not of the classic C++ immutable-means-unchanging sense.
An immutable `SHAMap` contains _nodes_ that are immutable. Also, once a node has
been located in an immutable `SHAMap`, that node is guaranteed to persist in that
`SHAMap` for the lifetime of the `SHAMap`.
So, somewhat counter-intuitively, an immutable `SHAMap` may grow as new nodes are
introduced. But an immutable `SHAMap` will never get smaller (until it entirely
evaporates when it is destroyed). Nodes, once introduced to the immutable
`SHAMap`, also never change their location in memory. So nodes in an immutable
`SHAMap` can be handled using raw pointers (if you're careful).
One consequence of this design is that an immutable `SHAMap` can never be
"trimmed". There is no way to identify unnecessary nodes in an immutable `SHAMap`
that could be removed. Once a node has been brought into the in-memory `SHAMap`,
that node stays in memory for the life of the `SHAMap`.
Most `SHAMap`s are immutable, in the sense that they don't modify or remove their
contained nodes.
An example where a mutable `SHAMap` is required is when we want to apply
transactions to the last closed ledger. To do so we'd make a mutable snapshot
of the state trie and then start applying transactions to it. Because the
snapshot is mutable, changes to nodes in the snapshot will not affect nodes in
other `SHAMap`s.
An example using a immutable ledger would be when there's an open ledger and
some piece of code wishes to query the state of the ledger. In this case we
don't wish to change the state of the `SHAMap`, so we'd use an immutable snapshot.
## Sequence numbers
Both `SHAMap`s and their nodes carry a sequence number. This is simply an
unsigned number that indicates ownership or membership, or a non-membership.
`SHAMap`s sequence numbers normally start out as 1. However when a snap-shot of
a `SHAMap` is made, the copy's sequence number is 1 greater than the original.
The nodes of a `SHAMap` have their own copy of a sequence number. If the `SHAMap`
is mutable, meaning it can change, then all of its nodes must have the
same sequence number as the `SHAMap` itself. This enforces an invariant that none
of the nodes are shared with other `SHAMap`s.
When a `SHAMap` needs to have a private copy of a node, not shared by any other
`SHAMap`, it first clones it and then sets the new copy to have a sequence number
equal to the `SHAMap` sequence number. The `unshareNode` is a private utility
which automates the task of first checking if the node is already sharable, and
if so, cloning it and giving it the proper sequence number. An example case
where a private copy is needed is when an inner node needs to have a child
pointer altered. Any modification to a node will require a non-shared node.
When a `SHAMap` decides that it is safe to share a node of its own, it sets the
node's sequence number to 0 (a `SHAMap` never has a sequence number of 0). This
is done for every node in the trie when `SHAMap::walkSubTree` is executed.
Note that other objects in rippled also have sequence numbers (e.g. ledgers).
The `SHAMap` and node sequence numbers should not be confused with these other
sequence numbers (no relation).
## SHAMap Creation
A `SHAMap` is usually not created from vacuum. Once an initial `SHAMap` is
constructed, later `SHAMap`s are usually created by calling snapShot(bool
isMutable) on the original `SHAMap`. The returned `SHAMap` has the expected
characteristics (mutable or immutable) based on the passed in flag.
It is cheaper to make an immutable snapshot of a `SHAMap` than to make a mutable
snapshot. If the `SHAMap` snapshot is mutable then sharable nodes must be
copied before they are placed in the mutable map.
A new `SHAMap` is created with each new ledger round. Transactions not executed
in the previous ledger populate the `SHAMap` for the new ledger.
## Storing SHAMap data in the database
When consensus is reached, the ledger is closed. As part of this process, the
`SHAMap` is stored to the database by calling `SHAMap::flushDirty`.
Both `unshare()` and `flushDirty` walk the `SHAMap` by calling
`SHAMap::walkSubTree`. As `unshare()` walks the trie, nodes are not written to
the database, and as `flushDirty` walks the trie nodes are written to the
database. `walkSubTree` visits every node in the trie. This process must ensure
that each node is only owned by this trie, and so "unshares" as it walks each
node (from the root down). This is done in the `preFlushNode` function by
ensuring that the node has a sequence number equal to that of the `SHAMap`. If
the node doesn't, it is cloned.
For each inner node encountered (starting with the root node), each of the
children are inspected (from 1 to 16). For each child, if it has a non-zero
sequence number (unshareable), the child is first copied. Then if the child is
an inner node, we recurse down to that node's children. Otherwise we've found a
leaf node and that node is written to the database. A count of each leaf node
that is visited is kept. The hash of the data in the leaf node is computed at
this time, and the child is reassigned back into the parent inner node just in
case the COW operation created a new pointer to this leaf node.
After processing each node, the node is then marked as sharable again by setting
its sequence number to 0.
After all of an inner node's children are processed, then its hash is updated
and the inner node is written to the database. Then this inner node is assigned
back into it's parent node, again in case the COW operation created a new
pointer to it.
## Walking a SHAMap
The private function `SHAMap::walkTowardsKey` is a good example of _how_ to walk
a `SHAMap`, and the various functions that call `walkTowardsKey` are good examples
of _why_ one would want to walk a `SHAMap` (e.g. `SHAMap::findKey`).
`walkTowardsKey` always starts at the root of the `SHAMap` and traverses down
through the inner nodes, looking for a leaf node along a path in the trie
designated by a `uint256`.
As one walks the trie, one can _optionally_ keep a stack of nodes that one has
passed through. This isn't necessary for walking the trie, but many clients
will use the stack after finding the desired node. For example if one is
deleting a node from the trie, the stack is handy for repairing invariants in
the trie after the deletion.
To assist in walking the trie, `SHAMap::walkTowardsKey` uses a `SHAMapNodeID`
that identifies a node by its path from the root and its depth in the trie. The
path is just a "list" of numbers, each in the range [0 .. 15], depicting which
child was chosen at each node starting from the root. Each choice is represented
by 4 bits, and then packed in sequence into a `uint256` (such that the longest
path possible has 256 / 4 = 64 steps). The high 4 bits of the first byte
identify which child of the root is chosen, the lower 4 bits of the first byte
identify the child of that node, and so on. The `SHAMapNodeID` identifying the
root node has an ID of 0 and a depth of 0. See `selectBranch` for details of
how we use a `SHAMapNodeID` to select a "branch" (child) by indexing into a
path at a given depth.
While the current node is an inner node, traversing down the trie from the root
continues, unless the path indicates a child that does not exist. And in this
case, `nullptr` is returned to indicate no leaf node along the given path
exists. Otherwise a leaf node is found and a (non-owning) pointer to it is
returned. At each step, if a stack is requested, a
`pair<shared_ptr<SHAMapTreeNode>, SHAMapNodeID>` is pushed onto the stack.
When a child node is found by `selectBranch`, the traversal to that node
consists of two steps:
1. Update the `shared_ptr` to the current node.
2. Update the `SHAMapNodeID`.
The first step consists of several attempts to find the node in various places:
1. In the trie itself.
2. In the node cache.
3. In the database.
If the node is not found in the trie, then it is installed into the trie as part
of the traversal process.
## Late-arriving Nodes
As we noted earlier, `SHAMap`s (even immutable ones) may grow. If a `SHAMap` is
searching for a node and runs into an empty spot in the trie, then the `SHAMap`
looks to see if the node exists but has not yet been made part of the map. This
operation is performed in the `SHAMap::fetchNodeNT()` method. The _NT_
is this case stands for 'No Throw'.
The `fetchNodeNT()` method goes through three phases:
1. By calling `cacheLookup()` we attempt to locate the missing node in the
TreeNodeCache. The TreeNodeCache is a cache of immutable SHAMapTreeNodes
that are shared across all `SHAMap`s.
Any SHAMapLeafNode that is immutable has a sequence number of zero
(sharable). When a mutable `SHAMap` is created then its SHAMapTreeNodes are
given non-zero sequence numbers (unsharable). But all nodes in the
TreeNodeCache are immutable, so if one is found here, its sequence number
will be 0.
2. If the node is not in the TreeNodeCache, we attempt to locate the node
in the historic data stored by the data base. The call to to
`fetchNodeFromDB(hash)` does that work for us.
3. Finally if a filter exists, we check if it can supply the node. This is
typically the LedgerMaster which tracks the current ledger and ledgers
in the process of closing.
## Canonicalize
`canonicalize()` is called every time a node is introduced into the `SHAMap`.
A call to `canonicalize()` stores the node in the `TreeNodeCache` if it does not
already exist in the `TreeNodeCache`.
The calls to `canonicalize()` make sure that if the resulting node is already in
the `SHAMap`, node `TreeNodeCache` or database, then we don't create duplicates
by favoring the copy already in the `TreeNodeCache`.
By using `canonicalize()` we manage a thread race condition where two different
threads might both recognize the lack of a SHAMapLeafNode at the same time
(during a fetch). If they both attempt to insert the node into the `SHAMap`, then
`canonicalize` makes sure that the first node in wins and the slower thread
receives back a pointer to the node inserted by the faster thread. Recall
that these two `SHAMap`s will share the same `TreeNodeCache`.
## `TreeNodeCache`
The `TreeNodeCache` is a `std::unordered_map` keyed on the hash of the
`SHAMap` node. The stored type consists of `shared_ptr<SHAMapTreeNode>`,
`weak_ptr<SHAMapTreeNode>`, and a time point indicating the most recent
access of this node in the cache. The time point is based on
`std::chrono::steady_clock`.
The container uses a cryptographically secure hash that is randomly seeded.
The `TreeNodeCache` also carries with it various data used for statistics
and logging, and a target age for the contained nodes. When the target age
for a node is exceeded, and there are no more references to the node, the
node is removed from the `TreeNodeCache`.
## `FullBelowCache`
This cache remembers which trie keys have all of their children resident in a
`SHAMap`. This optimizes the process of acquiring a complete trie. This is used
when creating the missing nodes list. Missing nodes are those nodes that a
`SHAMap` refers to but that are not stored in the local database.
As a depth-first walk of a `SHAMap` is performed, if an inner node answers true to
`isFullBelow()` then it is known that none of this node's children are missing
nodes, and thus that subtree does not need to be walked. These nodes are stored
in the FullBelowCache. Subsequent walks check the FullBelowCache first when
encountering a node, and ignore that subtree if found.
## `SHAMapTreeNode`
This is an abstract base class for the concrete node types. It holds the
following common data:
1. A hash
2. An identifier used to perform copy-on-write operations
### `SHAMapInnerNode`
`SHAMapInnerNode` publicly inherits directly from `SHAMapTreeNode`. It holds
the following data:
1. Up to 16 child nodes, each held with a shared_ptr.
2. A hash for each child.
3. A bitset to indicate which of the 16 children exist.
4. An identifier used to determine whether the map below this node is
fully populated
### `SHAMapLeafNode`
`SHAMapLeafNode` is an abstract class which publicly inherits directly from
`SHAMapTreeNode`. It isIt holds the
following data:
1. A shared_ptr to a const SHAMapItem.
#### `SHAMapAccountStateLeafNode`
`SHAMapAccountStateLeafNode` is a class which publicly inherits directly from
`SHAMapLeafNode`. It is used to represent entries (i.e. account objects, escrow
objects, trust lines, etc.) in a state map.
#### `SHAMapTxLeafNode`
`SHAMapTxLeafNode` is a class which publicly inherits directly from
`SHAMapLeafNode`. It is used to represent transactions in a state map.
#### `SHAMapTxPlusMetaLeafNode`
`SHAMapTxPlusMetaLeafNode` is a class which publicly inherits directly from
`SHAMapLeafNode`. It is used to represent transactions along with metadata
associated with this transaction in a state map.
## SHAMapItem
This holds the following data:
1. uint256. The hash of the data.
2. vector<unsigned char>. The data (transactions, account info).

View File

@@ -0,0 +1,768 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAP_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAP_H_INCLUDED
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/basics/UnorderedContainers.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/nodestore/Database.h>
#include <xrpl/nodestore/NodeObject.h>
#include <xrpl/shamap/Family.h>
#include <xrpl/shamap/SHAMapAddNode.h>
#include <xrpl/shamap/SHAMapInnerNode.h>
#include <xrpl/shamap/SHAMapItem.h>
#include <xrpl/shamap/SHAMapLeafNode.h>
#include <xrpl/shamap/SHAMapMissingNode.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <set>
#include <stack>
#include <vector>
namespace ripple {
class SHAMapNodeID;
class SHAMapSyncFilter;
/** Describes the current state of a given SHAMap */
enum class SHAMapState {
/** The map is in flux and objects can be added and removed.
Example: map underlying the open ledger.
*/
Modifying = 0,
/** The map is set in stone and cannot be changed.
Example: a map underlying a given closed ledger.
*/
Immutable = 1,
/** The map's hash is fixed but valid nodes may be missing and can be added.
Example: a map that's syncing a given peer's closing ledger.
*/
Synching = 2,
/** The map is known to not be valid.
Example: usually synching a corrupt ledger.
*/
Invalid = 3,
};
/** A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
A radix tree is a tree with two properties:
1. The key for a node is represented by the node's position in the tree
(the "prefix property").
2. A node with only one child is merged with that child
(the "merge property")
These properties result in a significantly smaller memory footprint for
a radix tree.
A fan-out of 16 means that each node in the tree has at most 16
children. See https://en.wikipedia.org/wiki/Radix_tree
A Merkle tree is a tree where each non-leaf node is labelled with the hash
of the combined labels of its children nodes.
A key property of a Merkle tree is that testing for node inclusion is
O(log(N)) where N is the number of nodes in the tree.
See https://en.wikipedia.org/wiki/Merkle_tree
*/
class SHAMap
{
private:
Family& f_;
beast::Journal journal_;
/** ID to distinguish this map for all others we're sharing nodes with. */
std::uint32_t cowid_ = 1;
/** The sequence of the ledger that this map references, if any. */
std::uint32_t ledgerSeq_ = 0;
intr_ptr::SharedPtr<SHAMapTreeNode> root_;
mutable SHAMapState state_;
SHAMapType const type_;
bool backed_ = true; // Map is backed by the database
mutable bool full_ = false; // Map is believed complete in database
public:
/** Number of children each non-leaf node has (the 'radix tree' part of the
* map) */
static inline constexpr unsigned int branchFactor =
SHAMapInnerNode::branchFactor;
/** The depth of the hash map: data is only present in the leaves */
static inline constexpr unsigned int leafDepth = 64;
using DeltaItem = std::pair<
boost::intrusive_ptr<SHAMapItem const>,
boost::intrusive_ptr<SHAMapItem const>>;
using Delta = std::map<uint256, DeltaItem>;
SHAMap() = delete;
SHAMap(SHAMap const&) = delete;
SHAMap&
operator=(SHAMap const&) = delete;
// Take a snapshot of the given map:
SHAMap(SHAMap const& other, bool isMutable);
// build new map
SHAMap(SHAMapType t, Family& f);
SHAMap(SHAMapType t, uint256 const& hash, Family& f);
~SHAMap() = default;
Family const&
family() const
{
return f_;
}
Family&
family()
{
return f_;
}
//--------------------------------------------------------------------------
/** Iterator to a SHAMap's leaves
This is always a const iterator.
Meets the requirements of ForwardRange.
*/
class const_iterator;
const_iterator
begin() const;
const_iterator
end() const;
//--------------------------------------------------------------------------
// Returns a new map that's a snapshot of this one.
// Handles copy on write for mutable snapshots.
std::shared_ptr<SHAMap>
snapShot(bool isMutable) const;
/* Mark this SHAMap as "should be full", indicating
that the local server wants all the corresponding nodes
in durable storage.
*/
void
setFull();
void
setLedgerSeq(std::uint32_t lseq);
bool
fetchRoot(SHAMapHash const& hash, SHAMapSyncFilter* filter);
// normal hash access functions
/** Does the tree have an item with the given ID? */
bool
hasItem(uint256 const& id) const;
bool
delItem(uint256 const& id);
bool
addItem(SHAMapNodeType type, boost::intrusive_ptr<SHAMapItem const> item);
SHAMapHash
getHash() const;
// save a copy if you have a temporary anyway
bool
updateGiveItem(
SHAMapNodeType type,
boost::intrusive_ptr<SHAMapItem const> item);
bool
addGiveItem(
SHAMapNodeType type,
boost::intrusive_ptr<SHAMapItem const> item);
// Save a copy if you need to extend the life
// of the SHAMapItem beyond this SHAMap
boost::intrusive_ptr<SHAMapItem const> const&
peekItem(uint256 const& id) const;
boost::intrusive_ptr<SHAMapItem const> const&
peekItem(uint256 const& id, SHAMapHash& hash) const;
// traverse functions
/** Find the first item after the given item.
@param id the identifier of the item.
@note The item does not need to exist.
*/
const_iterator
upper_bound(uint256 const& id) const;
/** Find the object with the greatest object id smaller than the input id.
@param id the identifier of the item.
@note The item does not need to exist.
*/
const_iterator
lower_bound(uint256 const& id) const;
/** Visit every node in this SHAMap
@param function called with every node visited.
If function returns false, visitNodes exits.
*/
void
visitNodes(std::function<bool(SHAMapTreeNode&)> const& function) const;
/** Visit every node in this SHAMap that
is not present in the specified SHAMap
@param function called with every node visited.
If function returns false, visitDifferences exits.
*/
void
visitDifferences(
SHAMap const* have,
std::function<bool(SHAMapTreeNode const&)> const&) const;
/** Visit every leaf node in this SHAMap
@param function called with every non inner node visited.
*/
void
visitLeaves(
std::function<
void(boost::intrusive_ptr<SHAMapItem const> const&)> const&) const;
// comparison/sync functions
/** Check for nodes in the SHAMap not available
Traverse the SHAMap efficiently, maximizing I/O
concurrency, to discover nodes referenced in the
SHAMap but not available locally.
@param maxNodes The maximum number of found nodes to return
@param filter The filter to use when retrieving nodes
@param return The nodes known to be missing
*/
std::vector<std::pair<SHAMapNodeID, uint256>>
getMissingNodes(int maxNodes, SHAMapSyncFilter* filter);
bool
getNodeFat(
SHAMapNodeID const& wanted,
std::vector<std::pair<SHAMapNodeID, Blob>>& data,
bool fatLeaves,
std::uint32_t depth) const;
/**
* Get the proof path of the key. The proof path is every node on the path
* from leaf to root. Sibling hashes are stored in the parent nodes.
* @param key key of the leaf
* @return the proof path if found
*/
std::optional<std::vector<Blob>>
getProofPath(uint256 const& key) const;
/**
* Verify the proof path
* @param rootHash root hash of the map
* @param key key of the leaf
* @param path the proof path
* @return true if verified successfully
*/
static bool
verifyProofPath(
uint256 const& rootHash,
uint256 const& key,
std::vector<Blob> const& path);
/** Serializes the root in a format appropriate for sending over the wire */
void
serializeRoot(Serializer& s) const;
SHAMapAddNode
addRootNode(
SHAMapHash const& hash,
Slice const& rootNode,
SHAMapSyncFilter* filter);
SHAMapAddNode
addKnownNode(
SHAMapNodeID const& nodeID,
Slice const& rawNode,
SHAMapSyncFilter* filter);
// status functions
void
setImmutable();
bool
isSynching() const;
void
setSynching();
void
clearSynching();
bool
isValid() const;
// caution: otherMap must be accessed only by this function
// return value: true=successfully completed, false=too different
bool
compare(SHAMap const& otherMap, Delta& differences, int maxCount) const;
/** Convert any modified nodes to shared. */
int
unshare();
/** Flush modified nodes to the nodestore and convert them to shared. */
int
flushDirty(NodeObjectType t);
void
walkMap(std::vector<SHAMapMissingNode>& missingNodes, int maxMissing) const;
bool
walkMapParallel(
std::vector<SHAMapMissingNode>& missingNodes,
int maxMissing) const;
bool
deepCompare(SHAMap& other) const; // Intended for debug/test only
void
setUnbacked();
void
dump(bool withHashes = false) const;
void
invariants() const;
private:
using SharedPtrNodeStack = std::stack<
std::pair<intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapNodeID>>;
using DeltaRef = std::pair<
boost::intrusive_ptr<SHAMapItem const>,
boost::intrusive_ptr<SHAMapItem const>>;
// tree node cache operations
intr_ptr::SharedPtr<SHAMapTreeNode>
cacheLookup(SHAMapHash const& hash) const;
void
canonicalize(SHAMapHash const& hash, intr_ptr::SharedPtr<SHAMapTreeNode>&)
const;
// database operations
intr_ptr::SharedPtr<SHAMapTreeNode>
fetchNodeFromDB(SHAMapHash const& hash) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
fetchNodeNT(SHAMapHash const& hash) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
fetchNode(SHAMapHash const& hash) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const;
/** Update hashes up to the root */
void
dirtyUp(
SharedPtrNodeStack& stack,
uint256 const& target,
intr_ptr::SharedPtr<SHAMapTreeNode> terminal);
/** Walk towards the specified id, returning the node. Caller must check
if the return is nullptr, and if not, if the node->peekItem()->key() ==
id */
SHAMapLeafNode*
walkTowardsKey(uint256 const& id, SharedPtrNodeStack* stack = nullptr)
const;
/** Return nullptr if key not found */
SHAMapLeafNode*
findKey(uint256 const& id) const;
/** Unshare the node, allowing it to be modified */
template <class Node>
intr_ptr::SharedPtr<Node>
unshareNode(intr_ptr::SharedPtr<Node>, SHAMapNodeID const& nodeID);
/** prepare a node to be modified before flushing */
template <class Node>
intr_ptr::SharedPtr<Node>
preFlushNode(intr_ptr::SharedPtr<Node> node) const;
/** write and canonicalize modified node */
intr_ptr::SharedPtr<SHAMapTreeNode>
writeNode(NodeObjectType t, intr_ptr::SharedPtr<SHAMapTreeNode> node) const;
// returns the first item at or below this node
SHAMapLeafNode*
firstBelow(
intr_ptr::SharedPtr<SHAMapTreeNode>,
SharedPtrNodeStack& stack,
int branch = 0) const;
// returns the last item at or below this node
SHAMapLeafNode*
lastBelow(
intr_ptr::SharedPtr<SHAMapTreeNode> node,
SharedPtrNodeStack& stack,
int branch = branchFactor) const;
// helper function for firstBelow and lastBelow
SHAMapLeafNode*
belowHelper(
intr_ptr::SharedPtr<SHAMapTreeNode> node,
SharedPtrNodeStack& stack,
int branch,
std::tuple<
int,
std::function<bool(int)>,
std::function<void(int&)>> const& loopParams) const;
// Simple descent
// Get a child of the specified node
SHAMapTreeNode*
descend(SHAMapInnerNode*, int branch) const;
SHAMapTreeNode*
descendThrow(SHAMapInnerNode*, int branch) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
descend(SHAMapInnerNode&, int branch) const;
intr_ptr::SharedPtr<SHAMapTreeNode>
descendThrow(SHAMapInnerNode&, int branch) const;
// Descend with filter
// If pending, callback is called as if it called fetchNodeNT
using descendCallback = std::function<
void(intr_ptr::SharedPtr<SHAMapTreeNode>, SHAMapHash const&)>;
SHAMapTreeNode*
descendAsync(
SHAMapInnerNode* parent,
int branch,
SHAMapSyncFilter* filter,
bool& pending,
descendCallback&&) const;
std::pair<SHAMapTreeNode*, SHAMapNodeID>
descend(
SHAMapInnerNode* parent,
SHAMapNodeID const& parentID,
int branch,
SHAMapSyncFilter* filter) const;
// Non-storing
// Does not hook the returned node to its parent
intr_ptr::SharedPtr<SHAMapTreeNode>
descendNoStore(SHAMapInnerNode&, int branch) const;
/** If there is only one leaf below this node, get its contents */
boost::intrusive_ptr<SHAMapItem const> const&
onlyBelow(SHAMapTreeNode*) const;
bool
hasInnerNode(SHAMapNodeID const& nodeID, SHAMapHash const& hash) const;
bool
hasLeafNode(uint256 const& tag, SHAMapHash const& hash) const;
SHAMapLeafNode const*
peekFirstItem(SharedPtrNodeStack& stack) const;
SHAMapLeafNode const*
peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const;
bool
walkBranch(
SHAMapTreeNode* node,
boost::intrusive_ptr<SHAMapItem const> const& otherMapItem,
bool isFirstMap,
Delta& differences,
int& maxCount) const;
int
walkSubTree(bool doWrite, NodeObjectType t);
// Structure to track information about call to
// getMissingNodes while it's in progress
struct MissingNodes
{
MissingNodes() = delete;
MissingNodes(MissingNodes const&) = delete;
MissingNodes&
operator=(MissingNodes const&) = delete;
// basic parameters
int max_;
SHAMapSyncFilter* filter_;
int const maxDefer_;
std::uint32_t generation_;
// nodes we have discovered to be missing
std::vector<std::pair<SHAMapNodeID, uint256>> missingNodes_;
std::set<SHAMapHash> missingHashes_;
// nodes we are in the process of traversing
using StackEntry = std::tuple<
SHAMapInnerNode*, // pointer to the node
SHAMapNodeID, // the node's ID
int, // while child we check first
int, // which child we check next
bool>; // whether we've found any missing children yet
// We explicitly choose to specify the use of std::deque here, because
// we need to ensure that pointers and/or references to existing
// elements will not be invalidated during the course of element
// insertion and removal. Containers that do not offer this guarantee,
// such as std::vector, can't be used here.
std::stack<StackEntry, std::deque<StackEntry>> stack_;
// nodes we may have acquired from deferred reads
using DeferredNode = std::tuple<
SHAMapInnerNode*, // parent node
SHAMapNodeID, // parent node ID
int, // branch
intr_ptr::SharedPtr<SHAMapTreeNode>>; // node
int deferred_;
std::mutex deferLock_;
std::condition_variable deferCondVar_;
std::vector<DeferredNode> finishedReads_;
// nodes we need to resume after we get their children from deferred
// reads
std::map<SHAMapInnerNode*, SHAMapNodeID> resumes_;
MissingNodes(
int max,
SHAMapSyncFilter* filter,
int maxDefer,
std::uint32_t generation)
: max_(max)
, filter_(filter)
, maxDefer_(maxDefer)
, generation_(generation)
, deferred_(0)
{
missingNodes_.reserve(max);
finishedReads_.reserve(maxDefer);
}
};
// getMissingNodes helper functions
void
gmn_ProcessNodes(MissingNodes&, MissingNodes::StackEntry& node);
void
gmn_ProcessDeferredReads(MissingNodes&);
// fetch from DB helper function
intr_ptr::SharedPtr<SHAMapTreeNode>
finishFetch(
SHAMapHash const& hash,
std::shared_ptr<NodeObject> const& object) const;
};
inline void
SHAMap::setFull()
{
full_ = true;
}
inline void
SHAMap::setLedgerSeq(std::uint32_t lseq)
{
ledgerSeq_ = lseq;
}
inline void
SHAMap::setImmutable()
{
XRPL_ASSERT(
state_ != SHAMapState::Invalid,
"ripple::SHAMap::setImmutable : state is valid");
state_ = SHAMapState::Immutable;
}
inline bool
SHAMap::isSynching() const
{
return state_ == SHAMapState::Synching;
}
inline void
SHAMap::setSynching()
{
state_ = SHAMapState::Synching;
}
inline void
SHAMap::clearSynching()
{
state_ = SHAMapState::Modifying;
}
inline bool
SHAMap::isValid() const
{
return state_ != SHAMapState::Invalid;
}
inline void
SHAMap::setUnbacked()
{
backed_ = false;
}
//------------------------------------------------------------------------------
class SHAMap::const_iterator
{
public:
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = SHAMapItem;
using reference = value_type const&;
using pointer = value_type const*;
private:
SharedPtrNodeStack stack_;
SHAMap const* map_ = nullptr;
pointer item_ = nullptr;
public:
const_iterator() = delete;
const_iterator(const_iterator const& other) = default;
const_iterator&
operator=(const_iterator const& other) = default;
~const_iterator() = default;
reference
operator*() const;
pointer
operator->() const;
const_iterator&
operator++();
const_iterator
operator++(int);
private:
explicit const_iterator(SHAMap const* map);
const_iterator(SHAMap const* map, std::nullptr_t);
const_iterator(SHAMap const* map, pointer item, SharedPtrNodeStack&& stack);
friend bool
operator==(const_iterator const& x, const_iterator const& y);
friend class SHAMap;
};
inline SHAMap::const_iterator::const_iterator(SHAMap const* map) : map_(map)
{
XRPL_ASSERT(
map_,
"ripple::SHAMap::const_iterator::const_iterator : non-null input");
if (auto temp = map_->peekFirstItem(stack_))
item_ = temp->peekItem().get();
}
inline SHAMap::const_iterator::const_iterator(SHAMap const* map, std::nullptr_t)
: map_(map)
{
}
inline SHAMap::const_iterator::const_iterator(
SHAMap const* map,
pointer item,
SharedPtrNodeStack&& stack)
: stack_(std::move(stack)), map_(map), item_(item)
{
}
inline SHAMap::const_iterator::reference
SHAMap::const_iterator::operator*() const
{
return *item_;
}
inline SHAMap::const_iterator::pointer
SHAMap::const_iterator::operator->() const
{
return item_;
}
inline SHAMap::const_iterator&
SHAMap::const_iterator::operator++()
{
if (auto temp = map_->peekNextItem(item_->key(), stack_))
item_ = temp->peekItem().get();
else
item_ = nullptr;
return *this;
}
inline SHAMap::const_iterator
SHAMap::const_iterator::operator++(int)
{
auto tmp = *this;
++(*this);
return tmp;
}
inline bool
operator==(SHAMap::const_iterator const& x, SHAMap::const_iterator const& y)
{
XRPL_ASSERT(
x.map_ == y.map_,
"ripple::operator==(SHAMap::const_iterator, SHAMap::const_iterator) : "
"inputs map do match");
return x.item_ == y.item_;
}
inline bool
operator!=(SHAMap::const_iterator const& x, SHAMap::const_iterator const& y)
{
return !(x == y);
}
inline SHAMap::const_iterator
SHAMap::begin() const
{
return const_iterator(this);
}
inline SHAMap::const_iterator
SHAMap::end() const
{
return const_iterator(this, nullptr);
}
} // namespace ripple
#endif

View File

@@ -0,0 +1,92 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPACCOUNTSTATELEAFNODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPACCOUNTSTATELEAFNODE_H_INCLUDED
#include <xrpl/basics/CountedObject.h>
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/digest.h>
#include <xrpl/shamap/SHAMapItem.h>
#include <xrpl/shamap/SHAMapLeafNode.h>
namespace ripple {
/** A leaf node for a state object. */
class SHAMapAccountStateLeafNode final
: public SHAMapLeafNode,
public CountedObject<SHAMapAccountStateLeafNode>
{
public:
SHAMapAccountStateLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid)
: SHAMapLeafNode(std::move(item), cowid)
{
updateHash();
}
SHAMapAccountStateLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid,
SHAMapHash const& hash)
: SHAMapLeafNode(std::move(item), cowid, hash)
{
}
intr_ptr::SharedPtr<SHAMapTreeNode>
clone(std::uint32_t cowid) const final override
{
return intr_ptr::make_shared<SHAMapAccountStateLeafNode>(
item_, cowid, hash_);
}
SHAMapNodeType
getType() const final override
{
return SHAMapNodeType::tnACCOUNT_STATE;
}
void
updateHash() final override
{
hash_ = SHAMapHash{
sha512Half(HashPrefix::leafNode, item_->slice(), item_->key())};
}
void
serializeForWire(Serializer& s) const final override
{
s.addRaw(item_->slice());
s.addBitString(item_->key());
s.add8(wireTypeAccountState);
}
void
serializeWithPrefix(Serializer& s) const final override
{
s.add32(HashPrefix::leafNode);
s.addRaw(item_->slice());
s.addBitString(item_->key());
}
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,185 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPADDNODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPADDNODE_H_INCLUDED
#include <string>
namespace ripple {
// results of adding nodes
class SHAMapAddNode
{
private:
int mGood;
int mBad;
int mDuplicate;
public:
SHAMapAddNode();
void
incInvalid();
void
incUseful();
void
incDuplicate();
void
reset();
int
getGood() const;
bool
isGood() const;
bool
isInvalid() const;
bool
isUseful() const;
std::string
get() const;
SHAMapAddNode&
operator+=(SHAMapAddNode const& n);
static SHAMapAddNode
duplicate();
static SHAMapAddNode
useful();
static SHAMapAddNode
invalid();
private:
SHAMapAddNode(int good, int bad, int duplicate);
};
inline SHAMapAddNode::SHAMapAddNode() : mGood(0), mBad(0), mDuplicate(0)
{
}
inline SHAMapAddNode::SHAMapAddNode(int good, int bad, int duplicate)
: mGood(good), mBad(bad), mDuplicate(duplicate)
{
}
inline void
SHAMapAddNode::incInvalid()
{
++mBad;
}
inline void
SHAMapAddNode::incUseful()
{
++mGood;
}
inline void
SHAMapAddNode::incDuplicate()
{
++mDuplicate;
}
inline void
SHAMapAddNode::reset()
{
mGood = mBad = mDuplicate = 0;
}
inline int
SHAMapAddNode::getGood() const
{
return mGood;
}
inline bool
SHAMapAddNode::isInvalid() const
{
return mBad > 0;
}
inline bool
SHAMapAddNode::isUseful() const
{
return mGood > 0;
}
inline SHAMapAddNode&
SHAMapAddNode::operator+=(SHAMapAddNode const& n)
{
mGood += n.mGood;
mBad += n.mBad;
mDuplicate += n.mDuplicate;
return *this;
}
inline bool
SHAMapAddNode::isGood() const
{
return (mGood + mDuplicate) > mBad;
}
inline SHAMapAddNode
SHAMapAddNode::duplicate()
{
return SHAMapAddNode(0, 0, 1);
}
inline SHAMapAddNode
SHAMapAddNode::useful()
{
return SHAMapAddNode(1, 0, 0);
}
inline SHAMapAddNode
SHAMapAddNode::invalid()
{
return SHAMapAddNode(0, 1, 0);
}
inline std::string
SHAMapAddNode::get() const
{
std::string ret;
if (mGood > 0)
{
ret.append("good:");
ret.append(std::to_string(mGood));
}
if (mBad > 0)
{
if (!ret.empty())
ret.append(" ");
ret.append("bad:");
ret.append(std::to_string(mBad));
}
if (mDuplicate > 0)
{
if (!ret.empty())
ret.append(" ");
ret.append("dupe:");
ret.append(std::to_string(mDuplicate));
}
if (ret.empty())
ret = "no nodes processed";
return ret;
}
} // namespace ripple
#endif

View File

@@ -0,0 +1,225 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPINNERNODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPINNERNODE_H_INCLUDED
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/shamap/SHAMapNodeID.h>
#include <xrpl/shamap/detail/TaggedPointer.h>
#include <atomic>
#include <cstdint>
#include <optional>
#include <string>
namespace ripple {
class SHAMapInnerNode final : public SHAMapTreeNode,
public CountedObject<SHAMapInnerNode>
{
public:
/** Each inner node has 16 children (the 'radix tree' part of the map) */
static inline constexpr unsigned int branchFactor = 16;
private:
/** Opaque type that contains the `hashes` array (array of type
`SHAMapHash`) and the `children` array (array of type
`intr_ptr::SharedPtr<SHAMapInnerNode>`).
*/
TaggedPointer hashesAndChildren_;
std::uint32_t fullBelowGen_ = 0;
std::uint16_t isBranch_ = 0;
/** A bitlock for the children of this node, with one bit per child */
mutable std::atomic<std::uint16_t> lock_ = 0;
/** Convert arrays stored in `hashesAndChildren_` so they can store the
requested number of children.
@param toAllocate allocate space for at least this number of children
(must be <= branchFactor)
@note the arrays may allocate more than the requested value in
`toAllocate`. This is due to the implementation of TagPointer, which
only supports allocating arrays of 4 different sizes.
*/
void
resizeChildArrays(std::uint8_t toAllocate);
/** Get the child's index inside the `hashes` or `children` array (stored in
`hashesAndChildren_`.
These arrays may or may not be sparse). The optional will be empty is an
empty branch is requested and the arrays are sparse.
@param i index of the requested child
*/
std::optional<int>
getChildIndex(int i) const;
/** Call the `f` callback for all 16 (branchFactor) branches - even if
the branch is empty.
@param f a one parameter callback function. The parameter is the
child's hash.
*/
template <class F>
void
iterChildren(F&& f) const;
/** Call the `f` callback for all non-empty branches.
@param f a two parameter callback function. The first parameter is
the branch number, the second parameter is the index into the array.
For dense formats these are the same, for sparse they may be
different.
*/
template <class F>
void
iterNonEmptyChildIndexes(F&& f) const;
public:
explicit SHAMapInnerNode(
std::uint32_t cowid,
std::uint8_t numAllocatedChildren = 2);
SHAMapInnerNode(SHAMapInnerNode const&) = delete;
SHAMapInnerNode&
operator=(SHAMapInnerNode const&) = delete;
~SHAMapInnerNode();
// Needed to support intrusive weak pointers
void
partialDestructor() override;
intr_ptr::SharedPtr<SHAMapTreeNode>
clone(std::uint32_t cowid) const override;
SHAMapNodeType
getType() const override
{
return SHAMapNodeType::tnINNER;
}
bool
isLeaf() const override
{
return false;
}
bool
isInner() const override
{
return true;
}
bool
isEmpty() const;
bool
isEmptyBranch(int m) const;
int
getBranchCount() const;
SHAMapHash const&
getChildHash(int m) const;
void
setChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> child);
void
shareChild(int m, intr_ptr::SharedPtr<SHAMapTreeNode> const& child);
SHAMapTreeNode*
getChildPointer(int branch);
intr_ptr::SharedPtr<SHAMapTreeNode>
getChild(int branch);
intr_ptr::SharedPtr<SHAMapTreeNode>
canonicalizeChild(int branch, intr_ptr::SharedPtr<SHAMapTreeNode> node);
// sync functions
bool
isFullBelow(std::uint32_t generation) const;
void
setFullBelowGen(std::uint32_t gen);
void
updateHash() override;
/** Recalculate the hash of all children and this node. */
void
updateHashDeep();
void
serializeForWire(Serializer&) const override;
void
serializeWithPrefix(Serializer&) const override;
std::string
getString(SHAMapNodeID const&) const override;
void
invariants(bool is_root = false) const override;
static intr_ptr::SharedPtr<SHAMapTreeNode>
makeFullInner(Slice data, SHAMapHash const& hash, bool hashValid);
static intr_ptr::SharedPtr<SHAMapTreeNode>
makeCompressedInner(Slice data);
};
inline bool
SHAMapInnerNode::isEmpty() const
{
return isBranch_ == 0;
}
inline bool
SHAMapInnerNode::isEmptyBranch(int m) const
{
return (isBranch_ & (1 << m)) == 0;
}
inline int
SHAMapInnerNode::getBranchCount() const
{
return popcnt16(isBranch_);
}
inline bool
SHAMapInnerNode::isFullBelow(std::uint32_t generation) const
{
return fullBelowGen_ == generation;
}
inline void
SHAMapInnerNode::setFullBelowGen(std::uint32_t gen)
{
fullBelowGen_ = gen;
}
} // namespace ripple
#endif

View File

@@ -0,0 +1,192 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPITEM_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPITEM_H_INCLUDED
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/CountedObject.h>
#include <xrpl/basics/SlabAllocator.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/smart_ptr/intrusive_ptr.hpp>
namespace ripple {
// an item stored in a SHAMap
class SHAMapItem : public CountedObject<SHAMapItem>
{
// These are used to support boost::intrusive_ptr reference counting
// These functions are used internally by boost::intrusive_ptr to handle
// lifetime management.
friend void
intrusive_ptr_add_ref(SHAMapItem const* x);
friend void
intrusive_ptr_release(SHAMapItem const* x);
// This is the interface for creating new instances of this class.
friend boost::intrusive_ptr<SHAMapItem>
make_shamapitem(uint256 const& tag, Slice data);
private:
uint256 const tag_;
// We use std::uint32_t to minimize the size; there's no SHAMapItem whose
// size exceeds 4GB and there won't ever be (famous last words?), so this
// is safe.
std::uint32_t const size_;
// This is the reference count used to support boost::intrusive_ptr
mutable std::atomic<std::uint32_t> refcount_ = 1;
// Because of the unusual way in which SHAMapItem objects are constructed
// the only way to properly create one is to first allocate enough memory
// so we limit this constructor to codepaths that do this right and limit
// arbitrary construction.
SHAMapItem(uint256 const& tag, Slice data)
: tag_(tag), size_(static_cast<std::uint32_t>(data.size()))
{
std::memcpy(
reinterpret_cast<std::uint8_t*>(this) + sizeof(*this),
data.data(),
data.size());
}
public:
SHAMapItem() = delete;
SHAMapItem(SHAMapItem const& other) = delete;
SHAMapItem&
operator=(SHAMapItem const& other) = delete;
SHAMapItem(SHAMapItem&& other) = delete;
SHAMapItem&
operator=(SHAMapItem&&) = delete;
uint256 const&
key() const
{
return tag_;
}
std::size_t
size() const
{
return size_;
}
void const*
data() const
{
return reinterpret_cast<std::uint8_t const*>(this) + sizeof(*this);
}
Slice
slice() const
{
return {data(), size()};
}
};
namespace detail {
// clang-format off
// The slab cutoffs and the number of megabytes per allocation are customized
// based on the number of objects of each size we expect to need at any point
// in time and with an eye to minimize the number of slack bytes in a block.
inline SlabAllocatorSet<SHAMapItem> slabber({
{ 128, megabytes(std::size_t(60)) },
{ 192, megabytes(std::size_t(46)) },
{ 272, megabytes(std::size_t(60)) },
{ 384, megabytes(std::size_t(56)) },
{ 564, megabytes(std::size_t(40)) },
{ 772, megabytes(std::size_t(46)) },
{ 1052, megabytes(std::size_t(60)) },
});
// clang-format on
} // namespace detail
inline void
intrusive_ptr_add_ref(SHAMapItem const* x)
{
// This can only happen if someone releases the last reference to the
// item while we were trying to increment the refcount.
if (x->refcount_++ == 0)
LogicError("SHAMapItem: the reference count is 0!");
}
inline void
intrusive_ptr_release(SHAMapItem const* x)
{
if (--x->refcount_ == 0)
{
auto p = reinterpret_cast<std::uint8_t const*>(x);
// The SHAMapItem constuctor isn't trivial (because the destructor
// for CountedObject isn't) so we can't avoid calling it here, but
// plan for a future where we might not need to.
if constexpr (!std::is_trivially_destructible_v<SHAMapItem>)
std::destroy_at(x);
// If the slabber doens't claim this pointer, it was allocated
// manually, so we free it manually.
if (!detail::slabber.deallocate(const_cast<std::uint8_t*>(p)))
delete[] p;
}
}
inline boost::intrusive_ptr<SHAMapItem>
make_shamapitem(uint256 const& tag, Slice data)
{
XRPL_ASSERT(
data.size() <= megabytes<std::size_t>(16),
"ripple::make_shamapitem : maximum input size");
std::uint8_t* raw = detail::slabber.allocate(data.size());
// If we can't grab memory from the slab allocators, we fall back to
// the standard library and try to grab a precisely-sized memory block:
if (raw == nullptr)
raw = new std::uint8_t[sizeof(SHAMapItem) + data.size()];
// We do not increment the reference count here on purpose: the
// constructor of SHAMapItem explicitly sets it to 1. We use the fact
// that the refcount can never be zero before incrementing as an
// invariant.
return {new (raw) SHAMapItem{tag, data}, false};
}
static_assert(alignof(SHAMapItem) != 40);
static_assert(alignof(SHAMapItem) == 8 || alignof(SHAMapItem) == 4);
inline boost::intrusive_ptr<SHAMapItem>
make_shamapitem(SHAMapItem const& other)
{
return make_shamapitem(other.key(), other.slice());
}
} // namespace ripple
#endif

View File

@@ -0,0 +1,83 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPLEAFNODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPLEAFNODE_H_INCLUDED
#include <xrpl/shamap/SHAMapItem.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <cstdint>
namespace ripple {
class SHAMapLeafNode : public SHAMapTreeNode
{
protected:
boost::intrusive_ptr<SHAMapItem const> item_;
SHAMapLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid);
SHAMapLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid,
SHAMapHash const& hash);
public:
SHAMapLeafNode(SHAMapLeafNode const&) = delete;
SHAMapLeafNode&
operator=(SHAMapLeafNode const&) = delete;
bool
isLeaf() const final override
{
return true;
}
bool
isInner() const final override
{
return false;
}
void
invariants(bool is_root = false) const final override;
public:
boost::intrusive_ptr<SHAMapItem const> const&
peekItem() const;
/** Set the item that this node points to and update the node's hash.
@param i the new item
@return false if the change was, effectively, a noop (that is, if the
hash was unchanged); true otherwise.
*/
bool
setItem(boost::intrusive_ptr<SHAMapItem const> i);
std::string
getString(SHAMapNodeID const&) const final override;
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,74 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPMISSINGNODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPMISSINGNODE_H_INCLUDED
#include <xrpl/basics/base_uint.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <iosfwd>
#include <stdexcept>
#include <string>
#include <type_traits>
namespace ripple {
enum class SHAMapType {
TRANSACTION = 1, // A tree of transactions
STATE = 2, // A tree of state nodes
FREE = 3, // A tree not part of a ledger
};
inline std::string
to_string(SHAMapType t)
{
switch (t)
{
case SHAMapType::TRANSACTION:
return "Transaction Tree";
case SHAMapType::STATE:
return "State Tree";
case SHAMapType::FREE:
return "Free Tree";
default:
return std::to_string(
safe_cast<std::underlying_type_t<SHAMapType>>(t));
}
}
class SHAMapMissingNode : public std::runtime_error
{
public:
SHAMapMissingNode(SHAMapType t, SHAMapHash const& hash)
: std::runtime_error(
"Missing Node: " + to_string(t) + ": hash " + to_string(hash))
{
}
SHAMapMissingNode(SHAMapType t, uint256 const& id)
: std::runtime_error(
"Missing Node: " + to_string(t) + ": id " + to_string(id))
{
}
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,163 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPNODEID_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPNODEID_H_INCLUDED
#include <xrpl/basics/CountedObject.h>
#include <xrpl/basics/base_uint.h>
#include <optional>
#include <string>
#include <tuple>
namespace ripple {
/** Identifies a node inside a SHAMap */
class SHAMapNodeID : public CountedObject<SHAMapNodeID>
{
private:
uint256 id_;
unsigned int depth_ = 0;
public:
SHAMapNodeID() = default;
SHAMapNodeID(SHAMapNodeID const& other) = default;
SHAMapNodeID(unsigned int depth, uint256 const& hash);
SHAMapNodeID&
operator=(SHAMapNodeID const& other) = default;
bool
isRoot() const
{
return depth_ == 0;
}
// Get the wire format (256-bit nodeID, 1-byte depth)
std::string
getRawString() const;
unsigned int
getDepth() const
{
return depth_;
}
uint256 const&
getNodeID() const
{
return id_;
}
SHAMapNodeID
getChildNodeID(unsigned int m) const;
/**
* Create a SHAMapNodeID of a node with the depth of the node and
* the key of a leaf
*
* @param depth the depth of the node
* @param key the key of a leaf
* @return SHAMapNodeID of the node
*/
static SHAMapNodeID
createID(int depth, uint256 const& key);
// FIXME-C++20: use spaceship and operator synthesis
/** Comparison operators */
bool
operator<(SHAMapNodeID const& n) const
{
return std::tie(depth_, id_) < std::tie(n.depth_, n.id_);
}
bool
operator>(SHAMapNodeID const& n) const
{
return n < *this;
}
bool
operator<=(SHAMapNodeID const& n) const
{
return !(n < *this);
}
bool
operator>=(SHAMapNodeID const& n) const
{
return !(*this < n);
}
bool
operator==(SHAMapNodeID const& n) const
{
return (depth_ == n.depth_) && (id_ == n.id_);
}
bool
operator!=(SHAMapNodeID const& n) const
{
return !(*this == n);
}
};
inline std::string
to_string(SHAMapNodeID const& node)
{
if (node.isRoot())
return "NodeID(root)";
return "NodeID(" + std::to_string(node.getDepth()) + "," +
to_string(node.getNodeID()) + ")";
}
inline std::ostream&
operator<<(std::ostream& out, SHAMapNodeID const& node)
{
return out << to_string(node);
}
/** Return an object representing a serialized SHAMap Node ID
*
* @param s A string of bytes
* @param data a non-null pointer to a buffer of @param size bytes.
* @param size the size, in bytes, of the buffer pointed to by @param data.
* @return A seated optional if the buffer contained a serialized SHAMap
* node ID and an unseated optional otherwise.
*/
/** @{ */
[[nodiscard]] std::optional<SHAMapNodeID>
deserializeSHAMapNodeID(void const* data, std::size_t size);
[[nodiscard]] inline std::optional<SHAMapNodeID>
deserializeSHAMapNodeID(std::string const& s)
{
return deserializeSHAMapNodeID(s.data(), s.size());
}
/** @} */
/** Returns the branch that would contain the given hash */
[[nodiscard]] unsigned int
selectBranch(SHAMapNodeID const& id, uint256 const& hash);
} // namespace ripple
#endif

View File

@@ -0,0 +1,54 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPSYNCFILTER_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPSYNCFILTER_H_INCLUDED
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <optional>
/** Callback for filtering SHAMap during sync. */
namespace ripple {
class SHAMapSyncFilter
{
public:
virtual ~SHAMapSyncFilter() = default;
SHAMapSyncFilter() = default;
SHAMapSyncFilter(SHAMapSyncFilter const&) = delete;
SHAMapSyncFilter&
operator=(SHAMapSyncFilter const&) = delete;
// Note that the nodeData is overwritten by this call
virtual void
gotNode(
bool fromFilter,
SHAMapHash const& nodeHash,
std::uint32_t ledgerSeq,
Blob&& nodeData,
SHAMapNodeType type) const = 0;
virtual std::optional<Blob>
getNode(SHAMapHash const& nodeHash) const = 0;
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,194 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPTREENODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPTREENODE_H_INCLUDED
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/basics/IntrusiveRefCounts.h>
#include <xrpl/basics/SHAMapHash.h>
#include <xrpl/protocol/Serializer.h>
#include <xrpl/shamap/SHAMapItem.h>
#include <xrpl/shamap/SHAMapNodeID.h>
#include <cstdint>
#include <string>
namespace ripple {
// These are wire-protocol identifiers used during serialization to encode the
// type of a node. They should not be arbitrarily be changed.
static constexpr unsigned char const wireTypeTransaction = 0;
static constexpr unsigned char const wireTypeAccountState = 1;
static constexpr unsigned char const wireTypeInner = 2;
static constexpr unsigned char const wireTypeCompressedInner = 3;
static constexpr unsigned char const wireTypeTransactionWithMeta = 4;
enum class SHAMapNodeType {
tnINNER = 1,
tnTRANSACTION_NM = 2, // transaction, no metadata
tnTRANSACTION_MD = 3, // transaction, with metadata
tnACCOUNT_STATE = 4
};
class SHAMapTreeNode : public IntrusiveRefCounts
{
protected:
SHAMapHash hash_;
/** Determines the owning SHAMap, if any. Used for copy-on-write semantics.
If this value is 0, the node is not dirty and does not need to be
flushed. It is eligible for sharing and may be included multiple
SHAMap instances.
*/
std::uint32_t cowid_;
protected:
SHAMapTreeNode(SHAMapTreeNode const&) = delete;
SHAMapTreeNode&
operator=(SHAMapTreeNode const&) = delete;
/** Construct a node
@param cowid The identifier of a SHAMap. For more, see #cowid_
@param hash The hash associated with this node, if any.
*/
/** @{ */
explicit SHAMapTreeNode(std::uint32_t cowid) noexcept : cowid_(cowid)
{
}
explicit SHAMapTreeNode(
std::uint32_t cowid,
SHAMapHash const& hash) noexcept
: hash_(hash), cowid_(cowid)
{
}
/** @} */
public:
virtual ~SHAMapTreeNode() noexcept = default;
// Needed to support weak intrusive pointers
virtual void
partialDestructor() {};
/** \defgroup SHAMap Copy-on-Write Support
By nature, a node may appear in multiple SHAMap instances. Rather
than actually duplicating these nodes, SHAMap opts to be memory
efficient and uses copy-on-write semantics for nodes.
Only nodes that are not modified and don't need to be flushed back
can be shared. Once a node needs to be changed, it must first be
copied and the copy must marked as not shareable.
Note that just because a node may not be *owned* by a given SHAMap
instance does not mean that the node is NOT a part of any SHAMap. It
only means that the node is not owned exclusively by any one SHAMap.
For more on copy-on-write, check out:
https://en.wikipedia.org/wiki/Copy-on-write
*/
/** @{ */
/** Returns the SHAMap that owns this node.
@return the ID of the SHAMap that owns this node, or 0 if the
node is not owned by any SHAMap and is a candidate for sharing.
*/
std::uint32_t
cowid() const
{
return cowid_;
}
/** If this node is shared with another map, mark it as no longer shared.
Only nodes that are not modified and do not need to be flushed back
should be marked as unshared.
*/
void
unshare()
{
cowid_ = 0;
}
/** Make a copy of this node, setting the owner. */
virtual intr_ptr::SharedPtr<SHAMapTreeNode>
clone(std::uint32_t cowid) const = 0;
/** @} */
/** Recalculate the hash of this node. */
virtual void
updateHash() = 0;
/** Return the hash of this node. */
SHAMapHash const&
getHash() const
{
return hash_;
}
/** Determines the type of node. */
virtual SHAMapNodeType
getType() const = 0;
/** Determines if this is a leaf node. */
virtual bool
isLeaf() const = 0;
/** Determines if this is an inner node. */
virtual bool
isInner() const = 0;
/** Serialize the node in a format appropriate for sending over the wire */
virtual void
serializeForWire(Serializer&) const = 0;
/** Serialize the node in a format appropriate for hashing */
virtual void
serializeWithPrefix(Serializer&) const = 0;
virtual std::string
getString(SHAMapNodeID const&) const;
virtual void
invariants(bool is_root = false) const = 0;
static intr_ptr::SharedPtr<SHAMapTreeNode>
makeFromPrefix(Slice rawNode, SHAMapHash const& hash);
static intr_ptr::SharedPtr<SHAMapTreeNode>
makeFromWire(Slice rawNode);
private:
static intr_ptr::SharedPtr<SHAMapTreeNode>
makeTransaction(Slice data, SHAMapHash const& hash, bool hashValid);
static intr_ptr::SharedPtr<SHAMapTreeNode>
makeAccountState(Slice data, SHAMapHash const& hash, bool hashValid);
static intr_ptr::SharedPtr<SHAMapTreeNode>
makeTransactionWithMeta(Slice data, SHAMapHash const& hash, bool hashValid);
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,88 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPTXLEAFNODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPTXLEAFNODE_H_INCLUDED
#include <xrpl/basics/CountedObject.h>
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/digest.h>
#include <xrpl/shamap/SHAMapItem.h>
#include <xrpl/shamap/SHAMapLeafNode.h>
namespace ripple {
/** A leaf node for a transaction. No metadata is included. */
class SHAMapTxLeafNode final : public SHAMapLeafNode,
public CountedObject<SHAMapTxLeafNode>
{
public:
SHAMapTxLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid)
: SHAMapLeafNode(std::move(item), cowid)
{
updateHash();
}
SHAMapTxLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid,
SHAMapHash const& hash)
: SHAMapLeafNode(std::move(item), cowid, hash)
{
}
intr_ptr::SharedPtr<SHAMapTreeNode>
clone(std::uint32_t cowid) const final override
{
return intr_ptr::make_shared<SHAMapTxLeafNode>(item_, cowid, hash_);
}
SHAMapNodeType
getType() const final override
{
return SHAMapNodeType::tnTRANSACTION_NM;
}
void
updateHash() final override
{
hash_ =
SHAMapHash{sha512Half(HashPrefix::transactionID, item_->slice())};
}
void
serializeForWire(Serializer& s) const final override
{
s.addRaw(item_->slice());
s.add8(wireTypeTransaction);
}
void
serializeWithPrefix(Serializer& s) const final override
{
s.add32(HashPrefix::transactionID);
s.addRaw(item_->slice());
}
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,92 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_SHAMAPLEAFTXPLUSMETANODE_H_INCLUDED
#define RIPPLE_SHAMAP_SHAMAPLEAFTXPLUSMETANODE_H_INCLUDED
#include <xrpl/basics/CountedObject.h>
#include <xrpl/protocol/HashPrefix.h>
#include <xrpl/protocol/digest.h>
#include <xrpl/shamap/SHAMapItem.h>
#include <xrpl/shamap/SHAMapLeafNode.h>
namespace ripple {
/** A leaf node for a transaction and its associated metadata. */
class SHAMapTxPlusMetaLeafNode final
: public SHAMapLeafNode,
public CountedObject<SHAMapTxPlusMetaLeafNode>
{
public:
SHAMapTxPlusMetaLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid)
: SHAMapLeafNode(std::move(item), cowid)
{
updateHash();
}
SHAMapTxPlusMetaLeafNode(
boost::intrusive_ptr<SHAMapItem const> item,
std::uint32_t cowid,
SHAMapHash const& hash)
: SHAMapLeafNode(std::move(item), cowid, hash)
{
}
intr_ptr::SharedPtr<SHAMapTreeNode>
clone(std::uint32_t cowid) const override
{
return intr_ptr::make_shared<SHAMapTxPlusMetaLeafNode>(
item_, cowid, hash_);
}
SHAMapNodeType
getType() const override
{
return SHAMapNodeType::tnTRANSACTION_MD;
}
void
updateHash() final override
{
hash_ = SHAMapHash{
sha512Half(HashPrefix::txNode, item_->slice(), item_->key())};
}
void
serializeForWire(Serializer& s) const final override
{
s.addRaw(item_->slice());
s.addBitString(item_->key());
s.add8(wireTypeTransactionWithMeta);
}
void
serializeWithPrefix(Serializer& s) const final override
{
s.add32(HashPrefix::txNode);
s.addRaw(item_->slice());
s.addBitString(item_->key());
}
};
} // namespace ripple
#endif

View File

@@ -0,0 +1,37 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_TREENODECACHE_H_INCLUDED
#define RIPPLE_SHAMAP_TREENODECACHE_H_INCLUDED
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
namespace ripple {
using TreeNodeCache = TaggedCache<
uint256,
SHAMapTreeNode,
/*IsKeyCache*/ false,
intr_ptr::SharedWeakUnionPtr<SHAMapTreeNode>,
intr_ptr::SharedPtr<SHAMapTreeNode>>;
} // namespace ripple
#endif

View File

@@ -0,0 +1,250 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_SHAMAP_TAGGEDPOINTER_H_INCLUDED
#define RIPPLE_SHAMAP_TAGGEDPOINTER_H_INCLUDED
#include <xrpl/basics/IntrusivePointer.h>
#include <xrpl/shamap/SHAMapTreeNode.h>
#include <array>
#include <cstdint>
#include <optional>
namespace ripple {
/** TaggedPointer is a combination of a pointer and a mask stored in the
lowest two bits.
Since pointers do not have arbitrary alignment, the lowest bits in the
pointer are guaranteed to be zero. TaggedPointer stores information in these
low bits. When dereferencing the pointer, these low "tag" bits are set to
zero. When accessing the tag bits, the high "pointer" bits are set to zero.
The "pointer" part points to the equivalent to an array of
`SHAMapHash` followed immediately by an array of
`shared_ptr<SHAMapTreeNode>`. The sizes of these arrays are
determined by the tag. The tag is an index into an array (`boundaries`,
defined in the cpp file) that specifies the size. Both arrays are the
same size. Note that the sizes may be smaller than the full 16 elements
needed to explicitly store all the children. In this case, the arrays
only store the non-empty children. The non-empty children are stored in
index order. For example, if only children `2` and `14` are non-empty, a
two-element array would store child `2` in array index 0 and child `14`
in array index 1. There are functions to convert between a child's tree
index and the child's index in a sparse array.
The motivation for this class is saving RAM. A large percentage of inner
nodes only store a small number of children. Memory can be saved by
storing the inner node's children in sparse arrays. Measurements show
that on average a typical SHAMap's inner nodes can be stored using only
25% of the original space.
*/
class TaggedPointer
{
private:
static_assert(
alignof(SHAMapHash) >= 4,
"Bad alignment: Tag pointer requires low two bits to be zero.");
/** Upper bits are the pointer, lowest two bits are the tag
A moved-from object will have a tp_ of zero.
*/
std::uintptr_t tp_ = 0;
/** bit-and with this mask to get the tag bits (lowest two bits) */
static constexpr std::uintptr_t tagMask = 3;
/** bit-and with this mask to get the pointer bits (mask out the tag) */
static constexpr std::uintptr_t ptrMask = ~tagMask;
/** Deallocate memory and run destructors */
void
destroyHashesAndChildren();
struct RawAllocateTag
{
};
/** This constructor allocates space for the hashes and children, but
does not run constructors.
@param RawAllocateTag used to select overload only
@param numChildren allocate space for at least this number of children
(must be <= branchFactor)
@note Since the hashes/children destructors are always run in the
TaggedPointer destructor, this means those constructors _must_ be run
after this constructor is run. This constructor is private and only used
in places where the hashes/children constructor are subsequently run.
*/
explicit TaggedPointer(RawAllocateTag, std::uint8_t numChildren);
public:
TaggedPointer() = delete;
explicit TaggedPointer(std::uint8_t numChildren);
/** Constructor is used change the number of allocated children.
Existing children from `other` are copied (toAllocate must be >= the
number of children). The motivation for making this a constructor is it
saves unneeded copying and zeroing out of hashes if this were
implemented directly in the SHAMapInnerNode class.
@param other children and hashes are moved from this param
@param isBranch bitset of non-empty children in `other`
@param toAllocate allocate space for at least this number of children
(must be <= branchFactor)
*/
explicit TaggedPointer(
TaggedPointer&& other,
std::uint16_t isBranch,
std::uint8_t toAllocate);
/** Given `other` with the specified children in `srcBranches`, create a
new TaggedPointer with the allocated number of children and the
children specified in `dstBranches`.
@param other children and hashes are moved from this param
@param srcBranches bitset of non-empty children in `other`
@param dstBranches bitset of children to copy from `other` (or space to
leave in a sparse array - see note below)
@param toAllocate allocate space for at least this number of children
(must be <= branchFactor)
@note a child may be absent in srcBranches but present in dstBranches
(if dst has a sparse representation, space for the new child will be
left in the sparse array). Typically, srcBranches and dstBranches will
differ by at most one bit. The function works correctly if they differ
by more, but there are likely more efficient algorithms to consider if
this becomes a common use-case.
*/
explicit TaggedPointer(
TaggedPointer&& other,
std::uint16_t srcBranches,
std::uint16_t dstBranches,
std::uint8_t toAllocate);
TaggedPointer(TaggedPointer const&) = delete;
TaggedPointer(TaggedPointer&&);
TaggedPointer&
operator=(TaggedPointer&&);
~TaggedPointer();
/** Decode the tagged pointer into its tag and pointer */
[[nodiscard]] std::pair<std::uint8_t, void*>
decode() const;
/** Get the number of elements allocated for each array */
[[nodiscard]] std::uint8_t
capacity() const;
/** Check if the arrays have a dense format.
@note The dense format is when there is an array element for all 16
(branchFactor) possible children.
*/
[[nodiscard]] bool
isDense() const;
/** Get the number of elements in each array and a pointer to the start
of each array.
*/
[[nodiscard]] std::
tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
getHashesAndChildren() const;
/** Get the `hashes` array */
[[nodiscard]] SHAMapHash*
getHashes() const;
/** Get the `children` array */
[[nodiscard]] intr_ptr::SharedPtr<SHAMapTreeNode>*
getChildren() const;
/** Call the `f` callback for all 16 (branchFactor) branches - even if
the branch is empty.
@param isBranch bitset of non-empty children
@param f a one parameter callback function. The parameter is the
child's hash.
*/
template <class F>
void
iterChildren(std::uint16_t isBranch, F&& f) const;
/** Call the `f` callback for all non-empty branches.
@param isBranch bitset of non-empty children
@param f a two parameter callback function. The first parameter is
the branch number, the second parameter is the index into the array.
For dense formats these are the same, for sparse they may be
different.
*/
template <class F>
void
iterNonEmptyChildIndexes(std::uint16_t isBranch, F&& f) const;
/** Get the child's index inside the `hashes` or `children` array (which
may or may not be sparse). The optional will be empty if an empty
branch is requested and the children are sparse.
@param isBranch bitset of non-empty children
@param i index of the requested child
*/
std::optional<int>
getChildIndex(std::uint16_t isBranch, int i) const;
};
[[nodiscard]] inline int
popcnt16(std::uint16_t a)
{
#if __cpp_lib_bitops
return std::popcount(a);
#elif defined(__clang__) || defined(__GNUC__)
return __builtin_popcount(a);
#else
// fallback to table lookup
static auto constexpr const tbl = []() {
std::array<std::uint8_t, 256> ret{};
for (int i = 0; i != 256; ++i)
{
for (int j = 0; j != 8; ++j)
{
if (i & (1 << j))
ret[i]++;
}
}
return ret;
}();
return tbl[a & 0xff] + tbl[a >> 8];
#endif
}
} // namespace ripple
#endif

View File

@@ -0,0 +1,597 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/shamap/SHAMapInnerNode.h>
#include <xrpl/shamap/detail/TaggedPointer.h>
#include <boost/pool/pool_alloc.hpp>
#include <array>
namespace ripple {
namespace {
// Sparse array size boundaries.
// Given n children, an array of size `*std::lower_bound(boundaries.begin(),
// boundaries.end(), n);` is used to store the children. Note that the last
// element must be the number of children in a dense array.
constexpr std::array<std::uint8_t, 4> boundaries{
2,
4,
6,
SHAMapInnerNode::branchFactor};
static_assert(
boundaries.size() <= 4,
"The hashesAndChildren member uses a tagged array format with two bits "
"reserved for the tag. This supports at most 4 values.");
static_assert(
boundaries.back() == SHAMapInnerNode::branchFactor,
"Last element of boundaries must be number of children in a dense array");
// Terminology: A chunk is the memory being allocated from a block. A block
// contains multiple chunks. This is the terminology the boost documentation
// uses. Pools use "Simple Segregated Storage" as their storage format.
constexpr size_t elementSizeBytes =
(sizeof(SHAMapHash) + sizeof(intr_ptr::SharedPtr<SHAMapTreeNode>));
constexpr size_t blockSizeBytes = kilobytes(512);
template <std::size_t... I>
constexpr std::array<size_t, boundaries.size()>
initArrayChunkSizeBytes(std::index_sequence<I...>)
{
return std::array<size_t, boundaries.size()>{
boundaries[I] * elementSizeBytes...,
};
}
constexpr auto arrayChunkSizeBytes =
initArrayChunkSizeBytes(std::make_index_sequence<boundaries.size()>{});
template <std::size_t... I>
constexpr std::array<size_t, boundaries.size()>
initArrayChunksPerBlock(std::index_sequence<I...>)
{
return std::array<size_t, boundaries.size()>{
blockSizeBytes / arrayChunkSizeBytes[I]...,
};
}
constexpr auto chunksPerBlock =
initArrayChunksPerBlock(std::make_index_sequence<boundaries.size()>{});
[[nodiscard]] inline std::uint8_t
numAllocatedChildren(std::uint8_t n)
{
XRPL_ASSERT(
n <= SHAMapInnerNode::branchFactor,
"ripple::numAllocatedChildren : valid input");
return *std::lower_bound(boundaries.begin(), boundaries.end(), n);
}
[[nodiscard]] inline std::size_t
boundariesIndex(std::uint8_t numChildren)
{
XRPL_ASSERT(
numChildren <= SHAMapInnerNode::branchFactor,
"ripple::boundariesIndex : valid input");
return std::distance(
boundaries.begin(),
std::lower_bound(boundaries.begin(), boundaries.end(), numChildren));
}
template <std::size_t... I>
std::array<std::function<void*()>, boundaries.size()>
initAllocateArrayFuns(std::index_sequence<I...>)
{
return std::array<std::function<void*()>, boundaries.size()>{
boost::singleton_pool<
boost::fast_pool_allocator_tag,
arrayChunkSizeBytes[I],
boost::default_user_allocator_new_delete,
std::mutex,
chunksPerBlock[I],
chunksPerBlock[I]>::malloc...,
};
}
std::array<std::function<void*()>, boundaries.size()> const allocateArrayFuns =
initAllocateArrayFuns(std::make_index_sequence<boundaries.size()>{});
template <std::size_t... I>
std::array<std::function<void(void*)>, boundaries.size()>
initFreeArrayFuns(std::index_sequence<I...>)
{
return std::array<std::function<void(void*)>, boundaries.size()>{
static_cast<void (*)(void*)>(boost::singleton_pool<
boost::fast_pool_allocator_tag,
arrayChunkSizeBytes[I],
boost::default_user_allocator_new_delete,
std::mutex,
chunksPerBlock[I],
chunksPerBlock[I]>::free)...,
};
}
std::array<std::function<void(void*)>, boundaries.size()> const freeArrayFuns =
initFreeArrayFuns(std::make_index_sequence<boundaries.size()>{});
template <std::size_t... I>
std::array<std::function<bool(void*)>, boundaries.size()>
initIsFromArrayFuns(std::index_sequence<I...>)
{
return std::array<std::function<bool(void*)>, boundaries.size()>{
boost::singleton_pool<
boost::fast_pool_allocator_tag,
arrayChunkSizeBytes[I],
boost::default_user_allocator_new_delete,
std::mutex,
chunksPerBlock[I],
chunksPerBlock[I]>::is_from...,
};
}
std::array<std::function<bool(void*)>, boundaries.size()> const
isFromArrayFuns =
initIsFromArrayFuns(std::make_index_sequence<boundaries.size()>{});
// This function returns an untagged pointer
[[nodiscard]] inline std::pair<std::uint8_t, void*>
allocateArrays(std::uint8_t numChildren)
{
auto const i = boundariesIndex(numChildren);
return {i, allocateArrayFuns[i]()};
}
// This function takes an untagged pointer
inline void
deallocateArrays(std::uint8_t boundaryIndex, void* p)
{
XRPL_ASSERT(
isFromArrayFuns[boundaryIndex](p),
"ripple::deallocateArrays : valid inputs");
freeArrayFuns[boundaryIndex](p);
}
// Used in `iterChildren` and elsewhere as the hash value for sparse arrays when
// the hash isn't actually stored in the array.
static SHAMapHash const zeroSHAMapHash;
} // namespace
template <class F>
void
TaggedPointer::iterChildren(std::uint16_t isBranch, F&& f) const
{
auto [numAllocated, hashes, _] = getHashesAndChildren();
if (numAllocated == SHAMapInnerNode::branchFactor)
{
// dense case
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
f(hashes[i]);
}
else
{
// sparse case
int curHashI = 0;
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
{
if ((1 << i) & isBranch)
{
f(hashes[curHashI++]);
}
else
{
f(zeroSHAMapHash);
}
}
}
}
template <class F>
void
TaggedPointer::iterNonEmptyChildIndexes(std::uint16_t isBranch, F&& f) const
{
if (capacity() == SHAMapInnerNode::branchFactor)
{
// dense case
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
{
if ((1 << i) & isBranch)
{
f(i, i);
}
}
}
else
{
// sparse case
int curHashI = 0;
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
{
if ((1 << i) & isBranch)
{
f(i, curHashI++);
}
}
}
}
inline void
TaggedPointer::destroyHashesAndChildren()
{
if (!tp_)
return;
auto [numAllocated, hashes, children] = getHashesAndChildren();
for (std::size_t i = 0; i < numAllocated; ++i)
{
hashes[i].~SHAMapHash();
std::destroy_at(&children[i]);
}
auto [tag, ptr] = decode();
deallocateArrays(tag, ptr);
}
inline std::optional<int>
TaggedPointer::getChildIndex(std::uint16_t isBranch, int i) const
{
if (isDense())
return i;
// Sparse case
if ((isBranch & (1 << i)) == 0)
{
// Empty branch. Sparse children do not store empty branches
return {};
}
// Sparse children are stored sorted. This means the index
// of a child in the array is the number of non-empty children
// before it. Since `isBranch_` is a bitset of the stored
// children, we simply need to mask out (and set to zero) all
// the bits in `isBranch_` equal to higher than `i` and count
// the bits.
// mask sets all the bits >=i to zero and all the bits <i to
// one.
auto const mask = (1u << i) - 1;
return popcnt16(isBranch & mask);
}
inline TaggedPointer::TaggedPointer(RawAllocateTag, std::uint8_t numChildren)
{
auto [tag, p] = allocateArrays(numChildren);
XRPL_ASSERT(
tag < boundaries.size(),
"ripple::TaggedPointer::TaggedPointer(RawAllocateTag, std::uint8_t) : "
"maximum tag");
XRPL_ASSERT(
(reinterpret_cast<std::uintptr_t>(p) & ptrMask) ==
reinterpret_cast<std::uintptr_t>(p),
"ripple::TaggedPointer::TaggedPointer(RawAllocateTag, std::uint8_t) : "
"valid pointer");
tp_ = reinterpret_cast<std::uintptr_t>(p) + tag;
}
inline TaggedPointer::TaggedPointer(
TaggedPointer&& other,
std::uint16_t srcBranches,
std::uint16_t dstBranches,
std::uint8_t toAllocate)
{
XRPL_ASSERT(
toAllocate >= popcnt16(dstBranches),
"ripple::TaggedPointer::TaggedPointer(TaggedPointer&& ...) : minimum "
"toAllocate input");
if (other.capacity() == numAllocatedChildren(toAllocate))
{
// in place
*this = std::move(other);
auto [srcDstNumAllocated, srcDstHashes, srcDstChildren] =
getHashesAndChildren();
bool const srcDstIsDense = isDense();
int srcDstIndex = 0;
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
{
auto const mask = (1 << i);
bool const inSrc = (srcBranches & mask);
bool const inDst = (dstBranches & mask);
if (inSrc && inDst)
{
// keep
++srcDstIndex;
}
else if (inSrc && !inDst)
{
// remove
if (srcDstIsDense)
{
srcDstHashes[srcDstIndex].zero();
srcDstChildren[srcDstIndex].reset();
++srcDstIndex;
}
else
{
// sparse
// need to shift all the elements to the left by
// one
for (int c = srcDstIndex; c < srcDstNumAllocated - 1; ++c)
{
srcDstHashes[c] = srcDstHashes[c + 1];
srcDstChildren[c] = std::move(srcDstChildren[c + 1]);
}
srcDstHashes[srcDstNumAllocated - 1].zero();
srcDstChildren[srcDstNumAllocated - 1].reset();
// do not increment the index
}
}
else if (!inSrc && inDst)
{
// add
if (srcDstIsDense)
{
// nothing to do, child is already present in the dense rep
++srcDstIndex;
}
else
{
// sparse
// need to create a hole by shifting all the elements to the
// right by one
for (int c = srcDstNumAllocated - 1; c > srcDstIndex; --c)
{
srcDstHashes[c] = srcDstHashes[c - 1];
srcDstChildren[c] = std::move(srcDstChildren[c - 1]);
}
srcDstHashes[srcDstIndex].zero();
srcDstChildren[srcDstIndex].reset();
++srcDstIndex;
}
}
else if (!inSrc && !inDst)
{
// in neither
if (srcDstIsDense)
{
++srcDstIndex;
}
}
}
}
else
{
// not in place
TaggedPointer dst{RawAllocateTag{}, toAllocate};
auto [dstNumAllocated, dstHashes, dstChildren] =
dst.getHashesAndChildren();
// Move `other` into a local var so it's not in a partially moved from
// state after this function runs
TaggedPointer src(std::move(other));
auto [srcNumAllocated, srcHashes, srcChildren] =
src.getHashesAndChildren();
bool const srcIsDense = src.isDense();
bool const dstIsDense = dst.isDense();
int srcIndex = 0, dstIndex = 0;
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
{
auto const mask = (1 << i);
bool const inSrc = (srcBranches & mask);
bool const inDst = (dstBranches & mask);
if (inSrc && inDst)
{
// keep
new (&dstHashes[dstIndex]) SHAMapHash{srcHashes[srcIndex]};
new (&dstChildren[dstIndex])
intr_ptr::SharedPtr<SHAMapTreeNode>{
std::move(srcChildren[srcIndex])};
++dstIndex;
++srcIndex;
}
else if (inSrc && !inDst)
{
// remove
++srcIndex;
if (dstIsDense)
{
new (&dstHashes[dstIndex]) SHAMapHash{};
new (&dstChildren[dstIndex])
intr_ptr::SharedPtr<SHAMapTreeNode>{};
++dstIndex;
}
}
else if (!inSrc && inDst)
{
// add
new (&dstHashes[dstIndex]) SHAMapHash{};
new (&dstChildren[dstIndex])
intr_ptr::SharedPtr<SHAMapTreeNode>{};
++dstIndex;
if (srcIsDense)
{
++srcIndex;
}
}
else if (!inSrc && !inDst)
{
// in neither
if (dstIsDense)
{
new (&dstHashes[dstIndex]) SHAMapHash{};
new (&dstChildren[dstIndex])
intr_ptr::SharedPtr<SHAMapTreeNode>{};
++dstIndex;
}
if (srcIsDense)
{
++srcIndex;
}
}
}
// If sparse, may need to run additional constructors
XRPL_ASSERT(
!dstIsDense || dstIndex == dstNumAllocated,
"ripple::TaggedPointer::TaggedPointer(TaggedPointer&& ...) : "
"non-sparse or valid sparse");
for (int i = dstIndex; i < dstNumAllocated; ++i)
{
new (&dstHashes[i]) SHAMapHash{};
new (&dstChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
}
*this = std::move(dst);
}
}
inline TaggedPointer::TaggedPointer(
TaggedPointer&& other,
std::uint16_t isBranch,
std::uint8_t toAllocate)
: TaggedPointer(std::move(other))
{
auto const oldNumAllocated = capacity();
toAllocate = numAllocatedChildren(toAllocate);
if (toAllocate == oldNumAllocated)
return;
// allocate hashes and children, but do not run constructors
TaggedPointer newHashesAndChildren{RawAllocateTag{}, toAllocate};
SHAMapHash *newHashes, *oldHashes;
intr_ptr::SharedPtr<SHAMapTreeNode>*newChildren, *oldChildren;
std::uint8_t newNumAllocated;
// structured bindings can't be captured in c++ 17; use tie instead
std::tie(newNumAllocated, newHashes, newChildren) =
newHashesAndChildren.getHashesAndChildren();
std::tie(std::ignore, oldHashes, oldChildren) = getHashesAndChildren();
if (newNumAllocated == SHAMapInnerNode::branchFactor)
{
// new arrays are dense, old arrays are sparse
iterNonEmptyChildIndexes(isBranch, [&](auto branchNum, auto indexNum) {
new (&newHashes[branchNum]) SHAMapHash{oldHashes[indexNum]};
new (&newChildren[branchNum]) intr_ptr::SharedPtr<SHAMapTreeNode>{
std::move(oldChildren[indexNum])};
});
// Run the constructors for the remaining elements
for (int i = 0; i < SHAMapInnerNode::branchFactor; ++i)
{
if ((1 << i) & isBranch)
continue;
new (&newHashes[i]) SHAMapHash{};
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
}
}
else
{
// new arrays are sparse, old arrays may be sparse or dense
int curCompressedIndex = 0;
iterNonEmptyChildIndexes(isBranch, [&](auto branchNum, auto indexNum) {
new (&newHashes[curCompressedIndex])
SHAMapHash{oldHashes[indexNum]};
new (&newChildren[curCompressedIndex])
intr_ptr::SharedPtr<SHAMapTreeNode>{
std::move(oldChildren[indexNum])};
++curCompressedIndex;
});
// Run the constructors for the remaining elements
for (int i = curCompressedIndex; i < newNumAllocated; ++i)
{
new (&newHashes[i]) SHAMapHash{};
new (&newChildren[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
}
}
*this = std::move(newHashesAndChildren);
}
inline TaggedPointer::TaggedPointer(std::uint8_t numChildren)
: TaggedPointer(TaggedPointer::RawAllocateTag{}, numChildren)
{
auto [numAllocated, hashes, children] = getHashesAndChildren();
for (std::size_t i = 0; i < numAllocated; ++i)
{
new (&hashes[i]) SHAMapHash{};
new (&children[i]) intr_ptr::SharedPtr<SHAMapTreeNode>{};
}
}
inline TaggedPointer::TaggedPointer(TaggedPointer&& other) : tp_{other.tp_}
{
other.tp_ = 0;
}
inline TaggedPointer&
TaggedPointer::operator=(TaggedPointer&& other)
{
if (this == &other)
return *this;
destroyHashesAndChildren();
tp_ = other.tp_;
other.tp_ = 0;
return *this;
}
[[nodiscard]] inline std::pair<std::uint8_t, void*>
TaggedPointer::decode() const
{
return {tp_ & tagMask, reinterpret_cast<void*>(tp_ & ptrMask)};
}
[[nodiscard]] inline std::uint8_t
TaggedPointer::capacity() const
{
return boundaries[tp_ & tagMask];
}
[[nodiscard]] inline bool
TaggedPointer::isDense() const
{
return (tp_ & tagMask) == boundaries.size() - 1;
}
[[nodiscard]] inline std::
tuple<std::uint8_t, SHAMapHash*, intr_ptr::SharedPtr<SHAMapTreeNode>*>
TaggedPointer::getHashesAndChildren() const
{
auto const [tag, ptr] = decode();
auto const hashes = reinterpret_cast<SHAMapHash*>(ptr);
std::uint8_t numAllocated = boundaries[tag];
auto const children =
reinterpret_cast<intr_ptr::SharedPtr<SHAMapTreeNode>*>(
hashes + numAllocated);
return {numAllocated, hashes, children};
};
[[nodiscard]] inline SHAMapHash*
TaggedPointer::getHashes() const
{
return reinterpret_cast<SHAMapHash*>(tp_ & ptrMask);
};
[[nodiscard]] inline intr_ptr::SharedPtr<SHAMapTreeNode>*
TaggedPointer::getChildren() const
{
auto [unused1, unused2, result] = getHashesAndChildren();
return result;
};
inline TaggedPointer::~TaggedPointer()
{
destroyHashesAndChildren();
}
} // namespace ripple