mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-20 02:55:50 +00:00
Compare commits
18 Commits
dangell7/c
...
bthomee/di
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4306d9ccc3 | ||
|
|
1a4d9732ca | ||
|
|
9494fc9668 | ||
|
|
aad6edb6b1 | ||
|
|
a4a1c4eecf | ||
|
|
fca6a8768f | ||
|
|
d96c4164b9 | ||
|
|
965fc75e8a | ||
|
|
2fa1c711d3 | ||
|
|
4650e7d2c6 | ||
|
|
a213127852 | ||
|
|
6e7537dada | ||
|
|
0777f7c64b | ||
|
|
39bfcaf95c | ||
|
|
61c9a19868 | ||
|
|
d01851bc5a | ||
|
|
d1703842e7 | ||
|
|
8d31b1739d |
2
.github/scripts/strategy-matrix/windows.json
vendored
2
.github/scripts/strategy-matrix/windows.json
vendored
@@ -2,7 +2,7 @@
|
||||
"architecture": [
|
||||
{
|
||||
"platform": "windows/amd64",
|
||||
"runner": ["windows-latest"]
|
||||
"runner": ["self-hosted", "Windows", "devbox"]
|
||||
}
|
||||
],
|
||||
"os": [
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -111,5 +111,3 @@ bld.rippled/
|
||||
|
||||
# Suggested in-tree build directory
|
||||
/.build*/
|
||||
|
||||
cmake-build-debug-event-trace/*
|
||||
@@ -119,7 +119,6 @@ endif()
|
||||
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(date REQUIRED)
|
||||
find_package(BLAKE3 REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
|
||||
@@ -940,23 +940,7 @@
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Optional keys
|
||||
#
|
||||
# cache_size Size of cache for database records. Default is 16384.
|
||||
# Setting this value to 0 will use the default value.
|
||||
#
|
||||
# cache_age Length of time in minutes to keep database records
|
||||
# cached. Default is 5 minutes. Setting this value to
|
||||
# 0 will use the default value.
|
||||
#
|
||||
# Note: if neither cache_size nor cache_age is
|
||||
# specified, the cache for database records will not
|
||||
# be created. If only one of cache_size or cache_age
|
||||
# is specified, the cache will be created using the
|
||||
# default value for the unspecified parameter.
|
||||
#
|
||||
# Note: the cache will not be created if online_delete
|
||||
# is specified.
|
||||
# Optional keys for NuDB and RocksDB:
|
||||
#
|
||||
# fast_load Boolean. If set, load the last persisted ledger
|
||||
# from disk upon process start before syncing to
|
||||
@@ -964,8 +948,6 @@
|
||||
# if sufficient IOPS capacity is available.
|
||||
# Default 0.
|
||||
#
|
||||
# Optional keys for NuDB or RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP ledger
|
||||
# network's earliest allowed sequence. Alternate
|
||||
# networks may set this value. Minimum value of 1.
|
||||
|
||||
@@ -61,7 +61,6 @@ target_link_libraries(xrpl.imports.main
|
||||
absl::random_random
|
||||
date::date
|
||||
ed25519::ed25519
|
||||
BLAKE3::blake3
|
||||
secp256k1::secp256k1
|
||||
xrpl.libpb
|
||||
xxHash::xxhash
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
"version": "0.5",
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1743678659.187",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
|
||||
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1744110653.645",
|
||||
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"openssl/3.5.2#0c5a5e15ae569f45dff57adcf1770cf7%1756234259.61",
|
||||
@@ -17,12 +17,11 @@
|
||||
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
|
||||
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
|
||||
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1681601797.282",
|
||||
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1748457219.54",
|
||||
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
|
||||
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
|
||||
"boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
|
||||
"blake3/1.5.0#af8dc8cf8dc55bfca24686b52dc137db%1758046610.912948",
|
||||
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
|
||||
],
|
||||
"build_requires": [
|
||||
|
||||
@@ -30,7 +30,6 @@ class Xrpl(ConanFile):
|
||||
'openssl/3.5.2',
|
||||
'soci/4.0.3',
|
||||
'zlib/1.3.1',
|
||||
'blake3/1.5.0',
|
||||
]
|
||||
|
||||
test_requires = [
|
||||
|
||||
10
external/blake3/conandata.yml
vendored
10
external/blake3/conandata.yml
vendored
@@ -1,10 +0,0 @@
|
||||
sources:
|
||||
"1.5.0":
|
||||
url: "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.5.0.tar.gz"
|
||||
sha256: "f506140bc3af41d3432a4ce18b3b83b08eaa240e94ef161eb72b2e57cdc94c69"
|
||||
"1.4.1":
|
||||
url: "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.4.1.tar.gz"
|
||||
sha256: "33020ac83a8169b2e847cc6fb1dd38806ffab6efe79fe6c320e322154a3bea2c"
|
||||
"1.4.0":
|
||||
url: "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.4.0.tar.gz"
|
||||
sha256: "658e1c75e2d9bbed9f426385f02d2a188dc19978a39e067ba93e837861e5fe58"
|
||||
102
external/blake3/conanfile.py
vendored
102
external/blake3/conanfile.py
vendored
@@ -1,102 +0,0 @@
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
from conan.tools.files import copy, get
|
||||
from conan.tools.scm import Version
|
||||
import os
|
||||
|
||||
required_conan_version = ">=1.54.0"
|
||||
|
||||
|
||||
class Blake3Conan(ConanFile):
|
||||
name = "blake3"
|
||||
version = "1.5.0"
|
||||
description = "BLAKE3 cryptographic hash function"
|
||||
topics = ("blake3", "hash", "cryptography")
|
||||
url = "https://github.com/BLAKE3-team/BLAKE3"
|
||||
homepage = "https://github.com/BLAKE3-team/BLAKE3"
|
||||
license = "CC0-1.0 OR Apache-2.0"
|
||||
|
||||
package_type = "library"
|
||||
settings = "os", "arch", "compiler", "build_type"
|
||||
options = {
|
||||
"shared": [True, False],
|
||||
"fPIC": [True, False],
|
||||
"simd": [True, False],
|
||||
}
|
||||
default_options = {
|
||||
"shared": False,
|
||||
"fPIC": True,
|
||||
"simd": True,
|
||||
}
|
||||
|
||||
def config_options(self):
|
||||
if self.settings.os == 'Windows':
|
||||
del self.options.fPIC
|
||||
|
||||
def configure(self):
|
||||
if self.options.shared:
|
||||
self.options.rm_safe("fPIC")
|
||||
# BLAKE3 is C code
|
||||
self.settings.rm_safe("compiler.cppstd")
|
||||
self.settings.rm_safe("compiler.libcxx")
|
||||
|
||||
def layout(self):
|
||||
cmake_layout(self, src_folder="src")
|
||||
|
||||
def source(self):
|
||||
get(self, **self.conan_data["sources"][self.version], strip_root=True)
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
# BLAKE3's CMake options
|
||||
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
|
||||
if not self.options.simd:
|
||||
tc.variables["BLAKE3_NO_SSE2"] = True
|
||||
tc.variables["BLAKE3_NO_SSE41"] = True
|
||||
tc.variables["BLAKE3_NO_AVX2"] = True
|
||||
tc.variables["BLAKE3_NO_AVX512"] = True
|
||||
tc.variables["BLAKE3_NO_NEON"] = True
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
# BLAKE3's C implementation has its CMakeLists.txt in the c/ subdirectory
|
||||
cmake.configure(build_script_folder=os.path.join(self.source_folder, "c"))
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
# Copy license files
|
||||
copy(self, "LICENSE*", src=self.source_folder,
|
||||
dst=os.path.join(self.package_folder, "licenses"))
|
||||
# Copy header
|
||||
copy(self, "blake3.h",
|
||||
src=os.path.join(self.source_folder, "c"),
|
||||
dst=os.path.join(self.package_folder, "include"))
|
||||
# Copy library
|
||||
copy(self, "*.a", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.lib", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.dylib", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.so*", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.dll", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "bin"), keep_path=False)
|
||||
|
||||
def package_info(self):
|
||||
self.cpp_info.set_property("cmake_file_name", "BLAKE3")
|
||||
self.cpp_info.set_property("cmake_target_name", "BLAKE3::blake3")
|
||||
|
||||
# IMPORTANT: Explicitly set include directories to fix Conan CMakeDeps generation
|
||||
self.cpp_info.includedirs = ["include"]
|
||||
self.cpp_info.libs = ["blake3"]
|
||||
|
||||
# System libraries
|
||||
if self.settings.os in ["Linux", "FreeBSD"]:
|
||||
self.cpp_info.system_libs.append("m")
|
||||
self.cpp_info.system_libs.append("pthread")
|
||||
|
||||
# TODO: to remove in conan v2 once cmake_find_package* generators removed
|
||||
self.cpp_info.names["cmake_find_package"] = "BLAKE3"
|
||||
self.cpp_info.names["cmake_find_package_multi"] = "BLAKE3"
|
||||
@@ -32,7 +32,6 @@
|
||||
// If you add an amendment here, then do not forget to increment `numFeatures`
|
||||
// in include/xrpl/protocol/Feature.h.
|
||||
|
||||
XRPL_FIX (CanonicalTxSet, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo)
|
||||
|
||||
@@ -558,23 +558,8 @@ public:
|
||||
Env env(*this, envconfig(onlineDelete));
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Create the backend. Normally, SHAMapStoreImp handles all these
|
||||
// details
|
||||
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
||||
|
||||
// Provide default values:
|
||||
if (!nscfg.exists("cache_size"))
|
||||
nscfg.set(
|
||||
"cache_size",
|
||||
std::to_string(env.app().config().getValueFor(
|
||||
SizedItem::treeCacheSize, std::nullopt)));
|
||||
|
||||
if (!nscfg.exists("cache_age"))
|
||||
nscfg.set(
|
||||
"cache_age",
|
||||
std::to_string(env.app().config().getValueFor(
|
||||
SizedItem::treeCacheAge, std::nullopt)));
|
||||
|
||||
// Create NodeStore with two backends to allow online deletion of data.
|
||||
// Normally, SHAMapStoreImp handles all these details.
|
||||
NodeStoreScheduler scheduler(env.app().getJobQueue());
|
||||
|
||||
std::string const writableDb = "write";
|
||||
@@ -582,9 +567,8 @@ public:
|
||||
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
|
||||
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
|
||||
|
||||
// Create NodeStore with two backends to allow online deletion of
|
||||
// data
|
||||
constexpr int readThreads = 4;
|
||||
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
||||
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
|
||||
scheduler,
|
||||
readThreads,
|
||||
|
||||
@@ -1,438 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012-2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/jtx.h>
|
||||
|
||||
#include <xrpld/app/misc/CanonicalTXSet.h>
|
||||
|
||||
#include <xrpl/protocol/STTx.h>
|
||||
#include <xrpl/protocol/SeqProxy.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
class CanonicalTXSet_test : public beast::unit_test::suite
|
||||
{
|
||||
// Helper function to create a test transaction with sequence
|
||||
std::shared_ptr<STTx const>
|
||||
makeSeqTx(
|
||||
AccountID const& account,
|
||||
std::uint32_t seq,
|
||||
std::uint32_t salt = 0)
|
||||
{
|
||||
using namespace jtx;
|
||||
|
||||
STObject tx(sfTransaction);
|
||||
tx.setAccountID(sfAccount, account);
|
||||
tx.setFieldU32(sfSequence, seq);
|
||||
tx.setFieldU16(sfTransactionType, ttPAYMENT);
|
||||
tx.setAccountID(sfDestination, AccountID(1));
|
||||
tx.setFieldAmount(sfAmount, STAmount(100));
|
||||
tx.setFieldAmount(sfFee, STAmount(10));
|
||||
tx.setFieldVL(sfSigningPubKey, Slice{});
|
||||
|
||||
// Add salt to make unique transaction IDs
|
||||
if (salt != 0)
|
||||
tx.setFieldU32(sfSourceTag, salt);
|
||||
|
||||
return std::make_shared<STTx const>(std::move(tx));
|
||||
}
|
||||
|
||||
// Helper function to create a test transaction with ticket
|
||||
std::shared_ptr<STTx const>
|
||||
makeTicketTx(
|
||||
AccountID const& account,
|
||||
std::uint32_t ticketSeq,
|
||||
std::uint32_t salt = 0)
|
||||
{
|
||||
using namespace jtx;
|
||||
|
||||
STObject tx(sfTransaction);
|
||||
tx.setAccountID(sfAccount, account);
|
||||
tx.setFieldU32(sfSequence, 0);
|
||||
tx.setFieldU32(sfTicketSequence, ticketSeq);
|
||||
tx.setFieldU16(sfTransactionType, ttPAYMENT);
|
||||
tx.setAccountID(sfDestination, AccountID(1));
|
||||
tx.setFieldAmount(sfAmount, STAmount(100));
|
||||
tx.setFieldAmount(sfFee, STAmount(10));
|
||||
tx.setFieldVL(sfSigningPubKey, Slice{});
|
||||
|
||||
// Add salt to make unique transaction IDs
|
||||
if (salt != 0)
|
||||
tx.setFieldU32(sfSourceTag, salt);
|
||||
|
||||
return std::make_shared<STTx const>(std::move(tx));
|
||||
}
|
||||
|
||||
void
|
||||
testInsertAndIteration(bool hasFix)
|
||||
{
|
||||
testcase("Insert and Iteration");
|
||||
|
||||
AccountID alice{1};
|
||||
AccountID bob{2};
|
||||
AccountID carol{3};
|
||||
AccountID dave{4};
|
||||
|
||||
std::vector<uint256> ledgerHashes = {
|
||||
uint256(
|
||||
"9FCD278D5D77B4D5AF88EB9F0B2028C188975F7C75B548A137339EB6CF8C9A"
|
||||
"69"),
|
||||
uint256(
|
||||
"71FF372D8189A93B70D1705D698A34FF7315131CAC6E043D1CE20FE26FC323"
|
||||
"2A"),
|
||||
};
|
||||
|
||||
std::vector<std::vector<AccountID>> goodData = {
|
||||
{{carol}, {alice}, {dave}, {bob}},
|
||||
{{bob}, {carol}, {dave}, {alice}},
|
||||
};
|
||||
|
||||
std::vector<std::vector<AccountID>> badData = {
|
||||
{{dave}, {alice}, {bob}, {carol}},
|
||||
{{dave}, {alice}, {bob}, {carol}},
|
||||
};
|
||||
|
||||
for (int i = 0; i < 2; ++i)
|
||||
{
|
||||
CanonicalTXSet set(ledgerHashes[i], hasFix);
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
auto tx2 = makeTicketTx(bob, 100, 2);
|
||||
auto tx3 = makeTicketTx(carol, 100, 3);
|
||||
auto tx4 = makeTicketTx(dave, 100, 4);
|
||||
set.insert(tx4); // dave
|
||||
set.insert(tx1); // alice
|
||||
set.insert(tx3); // carol
|
||||
set.insert(tx2); // bob
|
||||
|
||||
BEAST_EXPECT(set.size() == 4);
|
||||
|
||||
// Iterate and check the canonical order
|
||||
std::vector<AccountID> orderedAccounts;
|
||||
for (auto it = set.begin(); it != set.end(); ++it)
|
||||
{
|
||||
auto accountID = it->second->getAccountID(sfAccount);
|
||||
orderedAccounts.push_back(accountID);
|
||||
}
|
||||
|
||||
auto const& testData = hasFix ? goodData : badData;
|
||||
BEAST_EXPECT(orderedAccounts.size() == 4);
|
||||
BEAST_EXPECT(orderedAccounts[0] == testData[i][0]);
|
||||
BEAST_EXPECT(orderedAccounts[1] == testData[i][1]);
|
||||
BEAST_EXPECT(orderedAccounts[2] == testData[i][2]);
|
||||
BEAST_EXPECT(orderedAccounts[3] == testData[i][3]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testErase()
|
||||
{
|
||||
testcase("Erase");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
|
||||
AccountID alice(1);
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
auto tx2 = makeSeqTx(alice, 101, 2);
|
||||
auto tx3 = makeSeqTx(alice, 102, 3);
|
||||
|
||||
set.insert(tx1);
|
||||
set.insert(tx2);
|
||||
set.insert(tx3);
|
||||
BEAST_EXPECT(set.size() == 3);
|
||||
|
||||
// Find and erase a transaction
|
||||
auto it = set.begin();
|
||||
while (it != set.end() && it->second != tx2)
|
||||
++it;
|
||||
|
||||
BEAST_EXPECT(it != set.end());
|
||||
BEAST_EXPECT(it->second == tx2);
|
||||
|
||||
it = set.erase(it);
|
||||
BEAST_EXPECT(set.size() == 2);
|
||||
|
||||
// Verify tx2 is gone
|
||||
bool foundTx1 = false;
|
||||
bool foundTx2 = false;
|
||||
bool foundTx3 = false;
|
||||
|
||||
for (auto const& item : set)
|
||||
{
|
||||
if (item.second == tx1)
|
||||
foundTx1 = true;
|
||||
if (item.second == tx2)
|
||||
foundTx2 = true;
|
||||
if (item.second == tx3)
|
||||
foundTx3 = true;
|
||||
}
|
||||
|
||||
BEAST_EXPECT(foundTx1);
|
||||
BEAST_EXPECT(!foundTx2);
|
||||
BEAST_EXPECT(foundTx3);
|
||||
}
|
||||
|
||||
void
|
||||
testReset()
|
||||
{
|
||||
testcase("Reset");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
BEAST_EXPECT(set.key() == uint256(42));
|
||||
|
||||
AccountID alice(1);
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
auto tx2 = makeSeqTx(alice, 101, 2);
|
||||
|
||||
set.insert(tx1);
|
||||
set.insert(tx2);
|
||||
BEAST_EXPECT(set.size() == 2);
|
||||
|
||||
// Reset with new salt
|
||||
set.reset(uint256(99));
|
||||
BEAST_EXPECT(set.key() == uint256(99));
|
||||
BEAST_EXPECT(set.empty());
|
||||
BEAST_EXPECT(set.size() == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testPopAcctTransactionSequence()
|
||||
{
|
||||
testcase("Pop account transaction - sequences");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
AccountID alice(1);
|
||||
AccountID bob(2);
|
||||
|
||||
// Insert transactions with sequences
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
auto tx2 = makeSeqTx(alice, 101, 2);
|
||||
auto tx3 = makeSeqTx(alice, 102, 3);
|
||||
auto tx4 = makeSeqTx(alice, 104, 4); // Gap in sequence
|
||||
auto tx5 = makeSeqTx(bob, 200, 5);
|
||||
|
||||
set.insert(tx1);
|
||||
set.insert(tx2);
|
||||
set.insert(tx3);
|
||||
set.insert(tx4);
|
||||
set.insert(tx5);
|
||||
|
||||
// Create a "processed" transaction (not in set) with seq 99
|
||||
auto processedTx = makeSeqTx(alice, 99, 99);
|
||||
|
||||
// Pop consecutive sequences
|
||||
auto popped = set.popAcctTransaction(processedTx);
|
||||
BEAST_EXPECT(popped == tx1); // Returns tx with seq 100
|
||||
BEAST_EXPECT(set.size() == 4); // tx1 removed
|
||||
|
||||
// Now "process" tx1 (seq 100) to get tx2 (seq 101)
|
||||
popped = set.popAcctTransaction(tx1);
|
||||
BEAST_EXPECT(popped == tx2); // Returns tx with seq 101
|
||||
BEAST_EXPECT(set.size() == 3); // tx2 removed
|
||||
|
||||
// Now "process" tx2 (seq 101) to get tx3 (seq 102)
|
||||
popped = set.popAcctTransaction(tx2);
|
||||
BEAST_EXPECT(popped == tx3); // Returns tx with seq 102
|
||||
BEAST_EXPECT(set.size() == 2); // tx3 removed
|
||||
|
||||
// Now "process" tx3 (seq 102) - gap at 103, so no return
|
||||
popped = set.popAcctTransaction(tx3);
|
||||
BEAST_EXPECT(
|
||||
!popped); // Gap in sequence (103 missing), returns nullptr
|
||||
BEAST_EXPECT(set.size() == 2); // Nothing removed (tx4 and tx5 remain)
|
||||
}
|
||||
|
||||
void
|
||||
testPopAcctTransactionTickets()
|
||||
{
|
||||
testcase("Pop account transaction - tickets");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
AccountID alice(1);
|
||||
|
||||
// Insert transactions with tickets
|
||||
auto tx1 = makeTicketTx(alice, 100, 1);
|
||||
auto tx2 = makeTicketTx(alice, 105, 2);
|
||||
auto tx3 = makeTicketTx(alice, 103, 3);
|
||||
|
||||
set.insert(tx1);
|
||||
set.insert(tx2);
|
||||
set.insert(tx3);
|
||||
BEAST_EXPECT(set.size() == 3);
|
||||
|
||||
// Create a "processed" ticket transaction (not in set)
|
||||
// This represents a transaction that was just processed
|
||||
auto processedTx = makeTicketTx(alice, 95, 99);
|
||||
|
||||
// Pop ticket transactions (should return lowest ticket ID)
|
||||
auto popped = set.popAcctTransaction(processedTx);
|
||||
BEAST_EXPECT(popped == tx1); // Ticket 100 is the lowest
|
||||
BEAST_EXPECT(set.size() == 2); // tx1 removed
|
||||
|
||||
// Now "process" tx1 (ticket 100) to get the next lowest ticket
|
||||
popped = set.popAcctTransaction(tx1);
|
||||
BEAST_EXPECT(popped == tx3); // Ticket 103 is next lowest
|
||||
BEAST_EXPECT(set.size() == 1); // tx3 removed
|
||||
|
||||
// Now "process" tx3 (ticket 103) to get the next ticket
|
||||
popped = set.popAcctTransaction(tx3);
|
||||
BEAST_EXPECT(popped == tx2); // Ticket 105 is the last one
|
||||
BEAST_EXPECT(set.size() == 0); // tx2 removed, set is empty
|
||||
|
||||
// Try to pop when set is empty
|
||||
popped = set.popAcctTransaction(tx2);
|
||||
BEAST_EXPECT(!popped); // No more transactions
|
||||
BEAST_EXPECT(set.size() == 0); // Still empty
|
||||
}
|
||||
|
||||
void
|
||||
testPopAcctTransactionMixed()
|
||||
{
|
||||
testcase("Pop account transaction - mixed sequences and tickets");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
AccountID alice(1);
|
||||
|
||||
// Insert mix of sequence and ticket transactions
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
auto tx2 = makeSeqTx(alice, 101, 2);
|
||||
auto tx3 = makeTicketTx(alice, 50, 3); // Lower ticket
|
||||
auto tx4 = makeTicketTx(alice, 150, 4); // Higher ticket
|
||||
|
||||
set.insert(tx1);
|
||||
set.insert(tx2);
|
||||
set.insert(tx3);
|
||||
set.insert(tx4);
|
||||
BEAST_EXPECT(set.size() == 4);
|
||||
|
||||
// Create a "processed" transaction with seq 99 (not in set)
|
||||
// This represents the last processed sequential transaction
|
||||
auto processedTx = makeSeqTx(alice, 99, 99);
|
||||
|
||||
// Sequences should be processed first (in order)
|
||||
auto popped = set.popAcctTransaction(processedTx);
|
||||
BEAST_EXPECT(popped == tx1); // Gets seq 100
|
||||
BEAST_EXPECT(set.size() == 3);
|
||||
|
||||
// Use tx1 (just processed) to get the next one
|
||||
popped = set.popAcctTransaction(tx1);
|
||||
BEAST_EXPECT(popped == tx2); // Gets seq 101
|
||||
BEAST_EXPECT(set.size() == 2);
|
||||
|
||||
// After seq 101, there are no more sequential transactions
|
||||
// So now it should move to tickets (lowest first)
|
||||
popped = set.popAcctTransaction(tx2);
|
||||
BEAST_EXPECT(popped == tx3); // Gets ticket 50 (lowest)
|
||||
BEAST_EXPECT(set.size() == 1);
|
||||
|
||||
// Use tx3 (ticket 50) to get the next ticket
|
||||
popped = set.popAcctTransaction(tx3);
|
||||
BEAST_EXPECT(popped == tx4); // Gets ticket 150 (only one left)
|
||||
BEAST_EXPECT(set.size() == 0);
|
||||
|
||||
// Try to pop when empty
|
||||
popped = set.popAcctTransaction(tx4);
|
||||
BEAST_EXPECT(!popped); // No more transactions
|
||||
BEAST_EXPECT(set.size() == 0);
|
||||
}
|
||||
|
||||
void
|
||||
testDuplicateTransactions()
|
||||
{
|
||||
testcase("Duplicate transactions");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
|
||||
AccountID alice(1);
|
||||
|
||||
// Create identical transactions
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
auto tx2 = makeSeqTx(alice, 100, 1); // Same parameters
|
||||
|
||||
set.insert(tx1);
|
||||
set.insert(tx2);
|
||||
|
||||
// Map should have unique keys
|
||||
BEAST_EXPECT(set.size() == 1);
|
||||
|
||||
// The first insert wins with std::map
|
||||
auto it = set.begin();
|
||||
BEAST_EXPECT(it->second == tx1); // Should be tx1, not tx2
|
||||
|
||||
// Verify they have the same transaction ID
|
||||
BEAST_EXPECT(tx1->getTransactionID() == tx2->getTransactionID());
|
||||
}
|
||||
|
||||
void
|
||||
testEmptyPop()
|
||||
{
|
||||
testcase("Empty pop");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
|
||||
AccountID alice(1);
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
|
||||
// Try to pop from empty set
|
||||
auto popped = set.popAcctTransaction(tx1);
|
||||
BEAST_EXPECT(!popped);
|
||||
BEAST_EXPECT(set.empty());
|
||||
}
|
||||
|
||||
void
|
||||
testLargeGapInSequence()
|
||||
{
|
||||
testcase("Large gap in sequence");
|
||||
|
||||
CanonicalTXSet set(uint256(42));
|
||||
|
||||
AccountID alice(1);
|
||||
|
||||
auto tx1 = makeSeqTx(alice, 100, 1);
|
||||
auto tx2 = makeSeqTx(alice, 200, 2); // Large gap
|
||||
|
||||
set.insert(tx1);
|
||||
set.insert(tx2);
|
||||
|
||||
auto popped = set.popAcctTransaction(tx1);
|
||||
BEAST_EXPECT(!popped); // Gap too large, no consecutive sequence
|
||||
BEAST_EXPECT(set.size() == 2);
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
// testInsertAndIteration(false);
|
||||
testInsertAndIteration(true);
|
||||
// testErase();
|
||||
// testReset();
|
||||
// testPopAcctTransactionSequence();
|
||||
// testPopAcctTransactionTickets();
|
||||
// testPopAcctTransactionMixed();
|
||||
// testDuplicateTransactions();
|
||||
// testEmptyPop();
|
||||
// testLargeGapInSequence();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(CanonicalTXSet, app, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
@@ -181,7 +181,7 @@ class TransactionEntry_test : public beast::unit_test::suite
|
||||
|
||||
BEAST_EXPECT(resIndex[jss::validated] == true);
|
||||
BEAST_EXPECT(resIndex[jss::ledger_index] == index);
|
||||
// BEAST_EXPECT(resIndex[jss::ledger_hash] == expected_ledger_hash);
|
||||
BEAST_EXPECT(resIndex[jss::ledger_hash] == expected_ledger_hash);
|
||||
if (apiVersion > 1)
|
||||
{
|
||||
BEAST_EXPECT(resIndex[jss::hash] == txhash);
|
||||
|
||||
@@ -509,10 +509,7 @@ RCLConsensus::Adaptor::doAccept(
|
||||
// we use the hash of the set.
|
||||
//
|
||||
// FIXME: Use a std::vector and a custom sorter instead of CanonicalTXSet?
|
||||
bool useCanonicalTxSet =
|
||||
ledgerMaster_.getValidatedRules().enabled(fixCanonicalTxSet);
|
||||
CanonicalTXSet retriableTxs{
|
||||
result.txns.map_->getHash().as_uint256(), useCanonicalTxSet};
|
||||
CanonicalTXSet retriableTxs{result.txns.map_->getHash().as_uint256()};
|
||||
|
||||
JLOG(j_.debug()) << "Building canonical tx set: " << retriableTxs.key();
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@
|
||||
|
||||
#include <xrpld/app/misc/CanonicalTXSet.h>
|
||||
|
||||
#include <blake3.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
bool
|
||||
@@ -44,35 +42,20 @@ operator<(CanonicalTXSet::Key const& lhs, CanonicalTXSet::Key const& rhs)
|
||||
uint256
|
||||
CanonicalTXSet::accountKey(AccountID const& account)
|
||||
{
|
||||
if (canonicalFix_)
|
||||
{
|
||||
blake3_hasher hasher;
|
||||
blake3_hasher_init(&hasher);
|
||||
blake3_hasher_update(&hasher, account.data(), account.size());
|
||||
blake3_hasher_update(&hasher, salt_.data(), salt_.size());
|
||||
|
||||
uint256 result;
|
||||
blake3_hasher_finalize(&hasher, result.data(), 32);
|
||||
return result;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint256 ret = beast::zero;
|
||||
memcpy(ret.begin(), account.begin(), account.size());
|
||||
ret ^= salt_;
|
||||
return ret;
|
||||
}
|
||||
uint256 ret = beast::zero;
|
||||
memcpy(ret.begin(), account.begin(), account.size());
|
||||
ret ^= salt_;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
CanonicalTXSet::insert(std::shared_ptr<STTx const> const& txn)
|
||||
{
|
||||
map_.insert(
|
||||
std::make_pair(
|
||||
Key(accountKey(txn->getAccountID(sfAccount)),
|
||||
txn->getSeqProxy(),
|
||||
txn->getTransactionID()),
|
||||
txn));
|
||||
map_.insert(std::make_pair(
|
||||
Key(accountKey(txn->getAccountID(sfAccount)),
|
||||
txn->getSeqProxy(),
|
||||
txn->getTransactionID()),
|
||||
txn));
|
||||
}
|
||||
|
||||
std::shared_ptr<STTx const>
|
||||
|
||||
@@ -113,11 +113,6 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
explicit CanonicalTXSet(LedgerHash const& saltHash, bool canonicalFix)
|
||||
: salt_(saltHash), canonicalFix_(canonicalFix)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
insert(std::shared_ptr<STTx const> const& txn);
|
||||
|
||||
@@ -178,7 +173,6 @@ private:
|
||||
|
||||
// Used to salt the accounts so people can't mine for low account numbers
|
||||
uint256 salt_;
|
||||
bool canonicalFix_ = false;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -162,20 +162,6 @@ std::unique_ptr<NodeStore::Database>
|
||||
SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||
{
|
||||
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
|
||||
|
||||
// Provide default values:
|
||||
if (!nscfg.exists("cache_size"))
|
||||
nscfg.set(
|
||||
"cache_size",
|
||||
std::to_string(app_.config().getValueFor(
|
||||
SizedItem::treeCacheSize, std::nullopt)));
|
||||
|
||||
if (!nscfg.exists("cache_age"))
|
||||
nscfg.set(
|
||||
"cache_age",
|
||||
std::to_string(app_.config().getValueFor(
|
||||
SizedItem::treeCacheAge, std::nullopt)));
|
||||
|
||||
std::unique_ptr<NodeStore::Database> db;
|
||||
|
||||
if (deleteInterval_)
|
||||
@@ -269,8 +255,6 @@ SHAMapStoreImp::run()
|
||||
LedgerIndex lastRotated = state_db_.getState().lastRotated;
|
||||
netOPs_ = &app_.getOPs();
|
||||
ledgerMaster_ = &app_.getLedgerMaster();
|
||||
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
|
||||
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
|
||||
|
||||
if (advisoryDelete_)
|
||||
canDelete_ = state_db_.getCanDelete();
|
||||
@@ -563,16 +547,13 @@ void
|
||||
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
|
||||
{
|
||||
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
|
||||
fullBelowCache_->clear();
|
||||
}
|
||||
|
||||
void
|
||||
SHAMapStoreImp::freshenCaches()
|
||||
{
|
||||
if (freshenCache(*treeNodeCache_))
|
||||
return;
|
||||
if (freshenCache(app_.getMasterTransaction().getCache()))
|
||||
return;
|
||||
freshenCache(*app_.getNodeFamily().getTreeNodeCache());
|
||||
freshenCache(app_.getMasterTransaction().getCache());
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -112,8 +112,6 @@ private:
|
||||
// as of run() or before
|
||||
NetworkOPs* netOPs_ = nullptr;
|
||||
LedgerMaster* ledgerMaster_ = nullptr;
|
||||
FullBelowCache* fullBelowCache_ = nullptr;
|
||||
TreeNodeCache* treeNodeCache_ = nullptr;
|
||||
|
||||
static constexpr auto nodeStoreName_ = "NodeStore";
|
||||
|
||||
|
||||
@@ -33,14 +33,6 @@ DatabaseNodeImp::store(
|
||||
|
||||
auto obj = NodeObject::createObject(type, std::move(data), hash);
|
||||
backend_->store(obj);
|
||||
if (cache_)
|
||||
{
|
||||
// After the store, replace a negative cache entry if there is one
|
||||
cache_->canonicalize(
|
||||
hash, obj, [](std::shared_ptr<NodeObject> const& n) {
|
||||
return n->getType() == hotDUMMY;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -49,23 +41,12 @@ DatabaseNodeImp::asyncFetch(
|
||||
std::uint32_t ledgerSeq,
|
||||
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
|
||||
{
|
||||
if (cache_)
|
||||
{
|
||||
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
|
||||
if (obj)
|
||||
{
|
||||
callback(obj->getType() == hotDUMMY ? nullptr : obj);
|
||||
return;
|
||||
}
|
||||
}
|
||||
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
|
||||
}
|
||||
|
||||
void
|
||||
DatabaseNodeImp::sweep()
|
||||
{
|
||||
if (cache_)
|
||||
cache_->sweep();
|
||||
}
|
||||
|
||||
std::shared_ptr<NodeObject>
|
||||
@@ -75,64 +56,33 @@ DatabaseNodeImp::fetchNodeObject(
|
||||
FetchReport& fetchReport,
|
||||
bool duplicate)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nodeObject =
|
||||
cache_ ? cache_->fetch(hash) : nullptr;
|
||||
std::shared_ptr<NodeObject> nodeObject = nullptr;
|
||||
Status status;
|
||||
|
||||
if (!nodeObject)
|
||||
try
|
||||
{
|
||||
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not "
|
||||
<< (cache_ ? "cached" : "found");
|
||||
|
||||
Status status;
|
||||
|
||||
try
|
||||
{
|
||||
status = backend_->fetch(hash.data(), &nodeObject);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.fatal())
|
||||
<< "fetchNodeObject " << hash
|
||||
<< ": Exception fetching from backend: " << e.what();
|
||||
Rethrow();
|
||||
}
|
||||
|
||||
switch (status)
|
||||
{
|
||||
case ok:
|
||||
if (cache_)
|
||||
{
|
||||
if (nodeObject)
|
||||
cache_->canonicalize_replace_client(hash, nodeObject);
|
||||
else
|
||||
{
|
||||
auto notFound =
|
||||
NodeObject::createObject(hotDUMMY, {}, hash);
|
||||
cache_->canonicalize_replace_client(hash, notFound);
|
||||
if (notFound->getType() != hotDUMMY)
|
||||
nodeObject = notFound;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case notFound:
|
||||
break;
|
||||
case dataCorrupt:
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash
|
||||
<< ": nodestore data is corrupted";
|
||||
break;
|
||||
default:
|
||||
JLOG(j_.warn())
|
||||
<< "fetchNodeObject " << hash
|
||||
<< ": backend returns unknown result " << status;
|
||||
break;
|
||||
}
|
||||
status = backend_->fetch(hash.data(), &nodeObject);
|
||||
}
|
||||
else
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.trace()) << "fetchNodeObject " << hash
|
||||
<< ": record found in cache";
|
||||
if (nodeObject->getType() == hotDUMMY)
|
||||
nodeObject.reset();
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash
|
||||
<< ": Exception fetching from backend: " << e.what();
|
||||
Rethrow();
|
||||
}
|
||||
|
||||
switch (status)
|
||||
{
|
||||
case ok:
|
||||
case notFound:
|
||||
break;
|
||||
case dataCorrupt:
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash
|
||||
<< ": nodestore data is corrupted";
|
||||
break;
|
||||
default:
|
||||
JLOG(j_.warn()) << "fetchNodeObject " << hash
|
||||
<< ": backend returns unknown result " << status;
|
||||
break;
|
||||
}
|
||||
|
||||
if (nodeObject)
|
||||
@@ -144,71 +94,33 @@ DatabaseNodeImp::fetchNodeObject(
|
||||
std::vector<std::shared_ptr<NodeObject>>
|
||||
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
|
||||
using namespace std::chrono;
|
||||
auto const before = steady_clock::now();
|
||||
std::unordered_map<uint256 const*, size_t> indexMap;
|
||||
std::vector<uint256 const*> cacheMisses;
|
||||
uint64_t hits = 0;
|
||||
uint64_t fetches = 0;
|
||||
|
||||
std::vector<uint256 const*> batch{hashes.size()};
|
||||
for (size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
auto const& hash = hashes[i];
|
||||
// See if the object already exists in the cache
|
||||
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
|
||||
++fetches;
|
||||
if (!nObj)
|
||||
{
|
||||
// Try the database
|
||||
indexMap[&hash] = i;
|
||||
cacheMisses.push_back(&hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
|
||||
// It was in the cache.
|
||||
++hits;
|
||||
}
|
||||
batch.push_back(&hash);
|
||||
}
|
||||
|
||||
JLOG(j_.debug()) << "fetchBatch - cache hits = "
|
||||
<< (hashes.size() - cacheMisses.size())
|
||||
<< " - cache misses = " << cacheMisses.size();
|
||||
auto dbResults = backend_->fetchBatch(cacheMisses).first;
|
||||
|
||||
for (size_t i = 0; i < dbResults.size(); ++i)
|
||||
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
|
||||
results = backend_->fetchBatch(batch).first;
|
||||
for (size_t i = 0; i < results.size(); ++i)
|
||||
{
|
||||
auto nObj = std::move(dbResults[i]);
|
||||
size_t index = indexMap[cacheMisses[i]];
|
||||
auto const& hash = hashes[index];
|
||||
|
||||
if (nObj)
|
||||
{
|
||||
// Ensure all threads get the same object
|
||||
if (cache_)
|
||||
cache_->canonicalize_replace_client(hash, nObj);
|
||||
}
|
||||
else
|
||||
if (!results[i])
|
||||
{
|
||||
JLOG(j_.error())
|
||||
<< "fetchBatch - "
|
||||
<< "record not found in db or cache. hash = " << strHex(hash);
|
||||
if (cache_)
|
||||
{
|
||||
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
|
||||
cache_->canonicalize_replace_client(hash, notFound);
|
||||
if (notFound->getType() != hotDUMMY)
|
||||
nObj = std::move(notFound);
|
||||
}
|
||||
<< "record not found in db. hash = " << strHex(hashes[i]);
|
||||
}
|
||||
results[index] = std::move(nObj);
|
||||
}
|
||||
|
||||
auto fetchDurationUs =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
steady_clock::now() - before)
|
||||
.count();
|
||||
updateFetchMetrics(fetches, hits, fetchDurationUs);
|
||||
updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
|
||||
return results;
|
||||
}
|
||||
|
||||
|
||||
@@ -45,38 +45,6 @@ public:
|
||||
: Database(scheduler, readThreads, config, j)
|
||||
, backend_(std::move(backend))
|
||||
{
|
||||
std::optional<int> cacheSize, cacheAge;
|
||||
|
||||
if (config.exists("cache_size"))
|
||||
{
|
||||
cacheSize = get<int>(config, "cache_size");
|
||||
if (cacheSize.value() < 0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"Specified negative value for cache_size");
|
||||
}
|
||||
}
|
||||
|
||||
if (config.exists("cache_age"))
|
||||
{
|
||||
cacheAge = get<int>(config, "cache_age");
|
||||
if (cacheAge.value() < 0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"Specified negative value for cache_age");
|
||||
}
|
||||
}
|
||||
|
||||
if (cacheSize != 0 || cacheAge != 0)
|
||||
{
|
||||
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
|
||||
"DatabaseNodeImp",
|
||||
cacheSize.value_or(0),
|
||||
std::chrono::minutes(cacheAge.value_or(0)),
|
||||
stopwatch(),
|
||||
j);
|
||||
}
|
||||
|
||||
XRPL_ASSERT(
|
||||
backend_,
|
||||
"ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
|
||||
@@ -137,9 +105,6 @@ public:
|
||||
sweep() override;
|
||||
|
||||
private:
|
||||
// Cache for database objects. This cache is not always initialized. Check
|
||||
// for null before using.
|
||||
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
|
||||
// Persistent key/value storage
|
||||
std::shared_ptr<Backend> backend_;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user