mirror of
https://github.com/Xahau/xahaud.git
synced 2026-01-09 01:05:15 +00:00
Compare commits
26 Commits
strict-bui
...
fixup-rwdb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
317549ec54 | ||
|
|
3e57c72dc5 | ||
|
|
28e20215c7 | ||
|
|
a6f9c700ee | ||
|
|
849413eb1b | ||
|
|
c5f6242187 | ||
|
|
deeb8a0ec8 | ||
|
|
de1d5a3a2d | ||
|
|
559d32f04d | ||
|
|
9c2cc9f5f3 | ||
|
|
7bd82eef55 | ||
|
|
472c19418f | ||
|
|
2b225977e2 | ||
|
|
58b22901cb | ||
|
|
8ba37a3138 | ||
|
|
8cffd3054d | ||
|
|
6b26045cbc | ||
|
|
08f13b7cfe | ||
|
|
766f5d7ee1 | ||
|
|
287c01ad04 | ||
|
|
4239124750 | ||
|
|
1e45d4120c | ||
|
|
9e446bcc85 | ||
|
|
376727d20c | ||
|
|
d921c87c88 | ||
|
|
7b94d3d99d |
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -3,7 +3,7 @@
|
||||
"C_Cpp.clang_format_path": ".clang-format",
|
||||
"C_Cpp.clang_format_fallbackStyle": "{ ColumnLimit: 0 }",
|
||||
"[cpp]":{
|
||||
"editor.wordBasedSuggestions": false,
|
||||
"editor.wordBasedSuggestions": "off",
|
||||
"editor.suggest.insertMode": "replace",
|
||||
"editor.semanticHighlighting.enabled": true,
|
||||
"editor.tabSize": 4,
|
||||
|
||||
@@ -538,7 +538,9 @@ target_sources (rippled PRIVATE
|
||||
subdir: nodestore
|
||||
#]===============================]
|
||||
src/ripple/nodestore/backend/CassandraFactory.cpp
|
||||
src/ripple/nodestore/backend/RWDBFactory.cpp
|
||||
src/ripple/nodestore/backend/MemoryFactory.cpp
|
||||
src/ripple/nodestore/backend/FlatmapFactory.cpp
|
||||
src/ripple/nodestore/backend/NuDBFactory.cpp
|
||||
src/ripple/nodestore/backend/NullFactory.cpp
|
||||
src/ripple/nodestore/backend/RocksDBFactory.cpp
|
||||
|
||||
@@ -44,7 +44,7 @@ else()
|
||||
endif()
|
||||
# TBD:
|
||||
# Boost_USE_DEBUG_RUNTIME: When ON, uses Boost libraries linked against the
|
||||
find_package(Boost 1.70 REQUIRED
|
||||
find_package(Boost 1.86 REQUIRED
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
|
||||
@@ -90,11 +90,11 @@ echo "-- Install Cmake 3.23.1 --" &&
|
||||
pwd &&
|
||||
( wget -nc -q https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz; echo "" ) &&
|
||||
tar -xzf cmake-3.23.1-linux-x86_64.tar.gz -C /hbb/ &&
|
||||
echo "-- Install Boost 1.75.0 --" &&
|
||||
echo "-- Install Boost 1.86.0 --" &&
|
||||
pwd &&
|
||||
( wget -nc -q https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz; echo "" ) &&
|
||||
tar -xzf boost_1_75_0.tar.gz &&
|
||||
cd boost_1_75_0 && ./bootstrap.sh && ./b2 link=static -j$3 && ./b2 install &&
|
||||
( wget -nc -q https://boostorg.jfrog.io/artifactory/main/release/1.86.0/source/boost_1_86_0.tar.gz; echo "" ) &&
|
||||
tar -xzf boost_1_86_0.tar.gz &&
|
||||
cd boost_1_86_0 && ./bootstrap.sh && ./b2 link=static -j$3 && ./b2 install &&
|
||||
cd ../ &&
|
||||
echo "-- Install Protobuf 3.20.0 --" &&
|
||||
pwd &&
|
||||
@@ -127,9 +127,9 @@ echo "-- Build WasmEdge --" &&
|
||||
cd WasmEdge-0.11.2 &&
|
||||
( mkdir build; echo "" ) &&
|
||||
cd build &&
|
||||
export BOOST_ROOT="/usr/local/src/boost_1_75_0" &&
|
||||
export BOOST_ROOT="/usr/local/src/boost_1_86_0" &&
|
||||
export Boost_LIBRARY_DIRS="/usr/local/lib" &&
|
||||
export BOOST_INCLUDEDIR="/usr/local/src/boost_1_75_0" &&
|
||||
export BOOST_INCLUDEDIR="/usr/local/src/boost_1_86_0" &&
|
||||
export PATH=`echo $PATH | sed -E "s/devtoolset-7/devtoolset-9/g"` &&
|
||||
cmake .. \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
|
||||
@@ -1056,7 +1056,18 @@
|
||||
# Cassandra is an alternative backend to be used only with Reporting Mode.
|
||||
# See the Reporting Mode section for more details about Reporting Mode.
|
||||
#
|
||||
# Required keys for NuDB and RocksDB:
|
||||
# type = RWDB
|
||||
#
|
||||
# RWDB is a high-performance memory store written by XRPL-Labs and optimized
|
||||
# for xahaud. RWDB is NOT persistent and the data will be lost on restart.
|
||||
# RWDB is recommended for Validator and Peer nodes that are not required to
|
||||
# store history.
|
||||
#
|
||||
# RWDB maintains its high speed regardless of the amount of history
|
||||
# stored. Online delete should NOT be used instead RWDB will use the
|
||||
# ledger_history config value to determine how many ledgers to keep in memory.
|
||||
#
|
||||
# Required keys for NuDB, RWDB and RocksDB:
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
@@ -1112,7 +1123,8 @@
|
||||
# online_delete Minimum value of 256. Enable automatic purging
|
||||
# of older ledger information. Maintain at least this
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history.
|
||||
# than or equal to ledger_history. If using RWDB
|
||||
# this value is ignored.
|
||||
#
|
||||
# These keys modify the behavior of online_delete, and thus are only
|
||||
# relevant if online_delete is defined and non-zero:
|
||||
|
||||
@@ -144,4 +144,12 @@ D686F2538F410C9D0D856788E98E3579595DAF7B38D38887F81ECAC934B06040 HooksUpdate1
|
||||
86E83A7D2ECE3AD5FA87AB2195AE015C950469ABF0B72EAACED318F74886AE90 CryptoConditionsSuite
|
||||
3C43D9A973AA4443EF3FC38E42DD306160FBFFDAB901CD8BAA15D09F2597EB87 NonFungibleTokensV1
|
||||
0285B7E5E08E1A8E4C15636F0591D87F73CB6A7B6452A932AD72BBC8E5D1CBE3 fixNFTokenDirV1
|
||||
36799EA497B1369B170805C078AEFE6188345F9B3E324C21E9CA3FF574E3C3D6 fixNFTokenNegOffer
|
||||
36799EA497B1369B170805C078AEFE6188345F9B3E324C21E9CA3FF574E3C3D6 fixNFTokenNegOffer
|
||||
4C499D17719BB365B69010A436B64FD1A82AAB199FC1CEB06962EBD01059FB09 fixXahauV1
|
||||
215181D23BF5C173314B5FDB9C872C92DE6CC918483727DE037C0C13E7E6EE9D fixXahauV2
|
||||
0D8BF22FF7570D58598D1EF19EBB6E142AD46E59A223FD3816262FBB69345BEA Remit
|
||||
7CA0426E7F411D39BB014E57CD9E08F61DE1750F0D41FCD428D9FB80BB7596B0 ZeroB2M
|
||||
4B8466415FAB32FFA89D9DCBE166A42340115771DF611A7160F8D7439C87ECD8 fixNSDelete
|
||||
EDB4EE4C524E16BDD91D9A529332DED08DCAAA51CC6DC897ACFA1A0ED131C5B6 fix240819
|
||||
8063140E9260799D6716756B891CEC3E7006C4E4F277AB84670663A88F94B9C4 fixPageCap
|
||||
88693F108C3CD8A967F3F4253A32DEF5E35F9406ACD2A11B88B11D90865763A9 fix240911
|
||||
|
||||
29
hook/generate_sfcodes.sh
Executable file
29
hook/generate_sfcodes.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#/bin/bash
|
||||
RIPPLED_ROOT="../src/ripple"
|
||||
echo '// For documentation please see: https://xrpl-hooks.readme.io/reference/'
|
||||
echo '// Generated using generate_sfcodes.sh'
|
||||
cat $RIPPLED_ROOT/protocol/impl/SField.cpp | grep -E '^CONSTRUCT_' |
|
||||
sed 's/UINT16/1/g' |
|
||||
sed 's/UINT32/2/g' |
|
||||
sed 's/UINT64/3/g' |
|
||||
sed 's/HASH128/4/g' |
|
||||
sed 's/HASH256/5/g' |
|
||||
sed 's/UINT128/4/g' |
|
||||
sed 's/UINT256/5/g' |
|
||||
sed 's/AMOUNT/6/g' |
|
||||
sed 's/VL/7/g' | sed 's/Import7/ImportVL/g' |
|
||||
sed 's/ACCOUNT/8/g' |
|
||||
sed 's/OBJECT/14/g' |
|
||||
sed 's/ARRAY/15/g' |
|
||||
sed 's/UINT8/16/g' |
|
||||
sed 's/HASH160/17/g' |
|
||||
sed 's/UINT160/17/g' |
|
||||
sed 's/PATHSET/18/g' |
|
||||
sed 's/VECTOR256/19/g' |
|
||||
sed 's/UINT96/20/g' |
|
||||
sed 's/UINT192/21/g' |
|
||||
sed 's/UINT384/22/g' |
|
||||
sed 's/UINT512/23/g' |
|
||||
grep -Eo '"([^"]+)", *([0-9]+), *([0-9]+)' |
|
||||
sed 's/"//g' | sed 's/ *//g' | sed 's/,/ /g' |
|
||||
awk '{print ("#define sf"$1" (("$2"U << 16U) + "$3"U)")}'
|
||||
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* These are helper macros for writing hooks, all of them are optional as is including hookmacro.h at all
|
||||
* These are helper macros for writing hooks, all of them are optional as is including macro.h at all
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -60,7 +60,10 @@
|
||||
#define sfBurnedNFTokens ((2U << 16U) + 44U)
|
||||
#define sfHookStateCount ((2U << 16U) + 45U)
|
||||
#define sfEmitGeneration ((2U << 16U) + 46U)
|
||||
#define sfLockCount ((2U << 16U) + 47U)
|
||||
#define sfLockCount ((2U << 16U) + 49U)
|
||||
#define sfFirstNFTokenSequence ((2U << 16U) + 50U)
|
||||
#define sfXahauActivationLgrSeq ((2U << 16U) + 96U)
|
||||
#define sfImportSequence ((2U << 16U) + 97U)
|
||||
#define sfRewardTime ((2U << 16U) + 98U)
|
||||
#define sfRewardLgrFirst ((2U << 16U) + 99U)
|
||||
#define sfRewardLgrLast ((2U << 16U) + 100U)
|
||||
@@ -80,6 +83,8 @@
|
||||
#define sfHookInstructionCount ((3U << 16U) + 17U)
|
||||
#define sfHookReturnCode ((3U << 16U) + 18U)
|
||||
#define sfReferenceCount ((3U << 16U) + 19U)
|
||||
#define sfAccountIndex ((3U << 16U) + 98U)
|
||||
#define sfAccountCount ((3U << 16U) + 99U)
|
||||
#define sfRewardAccumulator ((3U << 16U) + 100U)
|
||||
#define sfEmailHash ((4U << 16U) + 1U)
|
||||
#define sfTakerPaysCurrency ((10U << 16U) + 1U)
|
||||
@@ -120,6 +125,9 @@
|
||||
#define sfOfferID ((5U << 16U) + 34U)
|
||||
#define sfEscrowID ((5U << 16U) + 35U)
|
||||
#define sfURITokenID ((5U << 16U) + 36U)
|
||||
#define sfGovernanceFlags ((5U << 16U) + 99U)
|
||||
#define sfGovernanceMarks ((5U << 16U) + 98U)
|
||||
#define sfEmittedTxnID ((5U << 16U) + 97U)
|
||||
#define sfAmount ((6U << 16U) + 1U)
|
||||
#define sfBalance ((6U << 16U) + 2U)
|
||||
#define sfLimitAmount ((6U << 16U) + 3U)
|
||||
@@ -136,6 +144,9 @@
|
||||
#define sfNFTokenBrokerFee ((6U << 16U) + 19U)
|
||||
#define sfHookCallbackFee ((6U << 16U) + 20U)
|
||||
#define sfLockedBalance ((6U << 16U) + 21U)
|
||||
#define sfBaseFeeDrops ((6U << 16U) + 22U)
|
||||
#define sfReserveBaseDrops ((6U << 16U) + 23U)
|
||||
#define sfReserveIncrementDrops ((6U << 16U) + 24U)
|
||||
#define sfPublicKey ((7U << 16U) + 1U)
|
||||
#define sfMessageKey ((7U << 16U) + 2U)
|
||||
#define sfSigningPubKey ((7U << 16U) + 3U)
|
||||
@@ -171,11 +182,13 @@
|
||||
#define sfNFTokenMinter ((8U << 16U) + 9U)
|
||||
#define sfEmitCallback ((8U << 16U) + 10U)
|
||||
#define sfHookAccount ((8U << 16U) + 16U)
|
||||
#define sfInform ((8U << 16U) + 99U)
|
||||
#define sfIndexes ((19U << 16U) + 1U)
|
||||
#define sfHashes ((19U << 16U) + 2U)
|
||||
#define sfAmendments ((19U << 16U) + 3U)
|
||||
#define sfNFTokenOffers ((19U << 16U) + 4U)
|
||||
#define sfHookNamespaces ((19U << 16U) + 5U)
|
||||
#define sfURITokenIDs ((19U << 16U) + 99U)
|
||||
#define sfPaths ((18U << 16U) + 1U)
|
||||
#define sfTransactionMetaData ((14U << 16U) + 2U)
|
||||
#define sfCreatedNode ((14U << 16U) + 3U)
|
||||
@@ -198,6 +211,12 @@
|
||||
#define sfHookDefinition ((14U << 16U) + 22U)
|
||||
#define sfHookParameter ((14U << 16U) + 23U)
|
||||
#define sfHookGrant ((14U << 16U) + 24U)
|
||||
#define sfGenesisMint ((14U << 16U) + 96U)
|
||||
#define sfActiveValidator ((14U << 16U) + 95U)
|
||||
#define sfImportVLKey ((14U << 16U) + 94U)
|
||||
#define sfHookEmission ((14U << 16U) + 93U)
|
||||
#define sfMintURIToken ((14U << 16U) + 92U)
|
||||
#define sfAmountEntry ((14U << 16U) + 91U)
|
||||
#define sfSigners ((15U << 16U) + 3U)
|
||||
#define sfSignerEntries ((15U << 16U) + 4U)
|
||||
#define sfTemplate ((15U << 16U) + 5U)
|
||||
@@ -212,4 +231,8 @@
|
||||
#define sfHookExecutions ((15U << 16U) + 18U)
|
||||
#define sfHookParameters ((15U << 16U) + 19U)
|
||||
#define sfHookGrants ((15U << 16U) + 20U)
|
||||
#define sfGenesisMints ((15U << 16U) + 96U)
|
||||
#define sfActiveValidators ((15U << 16U) + 95U)
|
||||
#define sfImportVLKeys ((15U << 16U) + 94U)
|
||||
#define sfHookEmissions ((15U << 16U) + 93U)
|
||||
#define sfAmounts ((15U << 16U) + 92U)
|
||||
@@ -4617,6 +4617,8 @@ DEFINE_HOOK_FUNCTION(
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
JLOG(j.trace()) << "HookInfo[" << HC_ACC()
|
||||
<< "]: etxn_fee_base exception: " << e.what();
|
||||
return INVALID_TXN;
|
||||
}
|
||||
|
||||
@@ -5402,7 +5404,7 @@ DEFINE_HOOK_FUNCTION(
|
||||
const int64_t float_one_internal = make_float(1000000000000000ull, -15, false);
|
||||
|
||||
inline int64_t
|
||||
float_divide_internal(int64_t float1, int64_t float2)
|
||||
float_divide_internal(int64_t float1, int64_t float2, bool hasFix)
|
||||
{
|
||||
RETURN_IF_INVALID_FLOAT(float1);
|
||||
RETURN_IF_INVALID_FLOAT(float2);
|
||||
@@ -5455,8 +5457,16 @@ float_divide_internal(int64_t float1, int64_t float2)
|
||||
while (man2 > 0)
|
||||
{
|
||||
int i = 0;
|
||||
for (; man1 > man2; man1 -= man2, ++i)
|
||||
;
|
||||
if (hasFix)
|
||||
{
|
||||
for (; man1 >= man2; man1 -= man2, ++i)
|
||||
;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (; man1 > man2; man1 -= man2, ++i)
|
||||
;
|
||||
}
|
||||
|
||||
man3 *= 10;
|
||||
man3 += i;
|
||||
@@ -5476,7 +5486,8 @@ DEFINE_HOOK_FUNCTION(int64_t, float_divide, int64_t float1, int64_t float2)
|
||||
HOOK_SETUP(); // populates memory_ctx, memory, memory_length, applyCtx,
|
||||
// hookCtx on current stack
|
||||
|
||||
return float_divide_internal(float1, float2);
|
||||
bool const hasFix = view.rules().enabled(fixFloatDivide);
|
||||
return float_divide_internal(float1, float2, hasFix);
|
||||
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
@@ -5495,7 +5506,9 @@ DEFINE_HOOK_FUNCTION(int64_t, float_invert, int64_t float1)
|
||||
return DIVISION_BY_ZERO;
|
||||
if (float1 == float_one_internal)
|
||||
return float_one_internal;
|
||||
return float_divide_internal(float_one_internal, float1);
|
||||
|
||||
bool const fixV3 = view.rules().enabled(fixFloatDivide);
|
||||
return float_divide_internal(float_one_internal, float1, fixV3);
|
||||
|
||||
HOOK_TEARDOWN();
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ private:
|
||||
run()
|
||||
{
|
||||
beast::setCurrentThreadName("LedgerCleaner");
|
||||
JLOG(j_.debug()) << "Started";
|
||||
JLOG(j_.debug()) << "Started ledger cleaner";
|
||||
|
||||
while (true)
|
||||
{
|
||||
@@ -392,7 +392,8 @@ private:
|
||||
|
||||
if (app_.getFeeTrack().isLoadedLocal())
|
||||
{
|
||||
JLOG(j_.debug()) << "Waiting for load to subside";
|
||||
JLOG(j_.debug())
|
||||
<< "Ledger Cleaner: Waiting for load to subside";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(5));
|
||||
continue;
|
||||
}
|
||||
@@ -415,13 +416,15 @@ private:
|
||||
bool fail = false;
|
||||
if (ledgerHash.isZero())
|
||||
{
|
||||
JLOG(j_.info())
|
||||
<< "Unable to get hash for ledger " << ledgerIndex;
|
||||
JLOG(j_.warn())
|
||||
<< "Ledger Cleaner: Unable to get hash for ledger "
|
||||
<< ledgerIndex;
|
||||
fail = true;
|
||||
}
|
||||
else if (!doLedger(ledgerIndex, ledgerHash, doNodes, doTxns))
|
||||
{
|
||||
JLOG(j_.info()) << "Failed to process ledger " << ledgerIndex;
|
||||
JLOG(j_.warn()) << "Ledger Cleaner: Failed to process ledger "
|
||||
<< ledgerIndex;
|
||||
fail = true;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
|
||||
#ifndef RIPPLE_APP_MAIN_APPLICATION_H_INCLUDED
|
||||
#define RIPPLE_APP_MAIN_APPLICATION_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/TaggedCache.h>
|
||||
#include <ripple/beast/utility/PropertyStream.h>
|
||||
#include <ripple/core/Config.h>
|
||||
|
||||
@@ -162,7 +162,8 @@ SHAMapStoreImp::SHAMapStoreImp(
|
||||
}
|
||||
|
||||
state_db_.init(config, dbName_);
|
||||
dbPaths();
|
||||
if (!config.mem_backend())
|
||||
dbPaths();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,6 +196,7 @@ SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||
"online_delete info from config");
|
||||
}
|
||||
SavedState state = state_db_.getState();
|
||||
|
||||
auto writableBackend = makeBackendRotating(state.writableDb);
|
||||
auto archiveBackend = makeBackendRotating(state.archiveDb);
|
||||
if (!state.writableDb.size())
|
||||
@@ -293,6 +295,8 @@ SHAMapStoreImp::run()
|
||||
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache(0));
|
||||
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache(0));
|
||||
|
||||
bool const isMem = app_.config().mem_backend();
|
||||
|
||||
if (advisoryDelete_)
|
||||
canDelete_ = state_db_.getCanDelete();
|
||||
|
||||
@@ -351,7 +355,7 @@ SHAMapStoreImp::run()
|
||||
// will delete up to (not including) lastRotated
|
||||
if (readyToRotate && !waitForImport)
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
JLOG(journal_.debug())
|
||||
<< "rotating validatedSeq " << validatedSeq << " lastRotated "
|
||||
<< lastRotated << " deleteInterval " << deleteInterval_
|
||||
<< " canDelete_ " << canDelete_ << " state "
|
||||
@@ -395,7 +399,7 @@ SHAMapStoreImp::run()
|
||||
// Only log if we completed without a "health" abort
|
||||
JLOG(journal_.debug()) << validatedSeq << " freshened caches";
|
||||
|
||||
JLOG(journal_.trace()) << "Making a new backend";
|
||||
JLOG(journal_.debug()) << "Making a new backend";
|
||||
auto newBackend = makeBackendRotating();
|
||||
JLOG(journal_.debug())
|
||||
<< validatedSeq << " new backend " << newBackend->getName();
|
||||
@@ -640,6 +644,33 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
|
||||
if (!db)
|
||||
Throw<std::runtime_error>("Failed to get relational database");
|
||||
|
||||
if (app_.config().useTxTables())
|
||||
{
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"Transactions",
|
||||
[&db]() -> std::optional<LedgerIndex> {
|
||||
return db->getTransactionsMinLedgerSeq();
|
||||
},
|
||||
[&db](LedgerIndex min) -> void {
|
||||
db->deleteTransactionsBeforeLedgerSeq(min);
|
||||
});
|
||||
if (healthWait() == stopping)
|
||||
return;
|
||||
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"AccountTransactions",
|
||||
[&db]() -> std::optional<LedgerIndex> {
|
||||
return db->getAccountTransactionsMinLedgerSeq();
|
||||
},
|
||||
[&db](LedgerIndex min) -> void {
|
||||
db->deleteAccountTransactionsBeforeLedgerSeq(min);
|
||||
});
|
||||
if (healthWait() == stopping)
|
||||
return;
|
||||
}
|
||||
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"Ledgers",
|
||||
@@ -647,33 +678,6 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
|
||||
[db](LedgerIndex min) -> void { db->deleteBeforeLedgerSeq(min); });
|
||||
if (healthWait() == stopping)
|
||||
return;
|
||||
|
||||
if (!app_.config().useTxTables())
|
||||
return;
|
||||
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"Transactions",
|
||||
[&db]() -> std::optional<LedgerIndex> {
|
||||
return db->getTransactionsMinLedgerSeq();
|
||||
},
|
||||
[&db](LedgerIndex min) -> void {
|
||||
db->deleteTransactionsBeforeLedgerSeq(min);
|
||||
});
|
||||
if (healthWait() == stopping)
|
||||
return;
|
||||
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"AccountTransactions",
|
||||
[&db]() -> std::optional<LedgerIndex> {
|
||||
return db->getAccountTransactionsMinLedgerSeq();
|
||||
},
|
||||
[&db](LedgerIndex min) -> void {
|
||||
db->deleteAccountTransactionsBeforeLedgerSeq(min);
|
||||
});
|
||||
if (healthWait() == stopping)
|
||||
return;
|
||||
}
|
||||
|
||||
SHAMapStoreImp::HealthResult
|
||||
|
||||
@@ -111,7 +111,7 @@ public:
|
||||
std::uint32_t minimumTxnInLedgerSA = 1000;
|
||||
/// Number of transactions per ledger that fee escalation "works
|
||||
/// towards".
|
||||
std::uint32_t targetTxnInLedger = 256;
|
||||
std::uint32_t targetTxnInLedger = 1000;
|
||||
/** Optional maximum allowed value of transactions per ledger before
|
||||
fee escalation kicks in. By default, the maximum is an emergent
|
||||
property of network, validator, and consensus performance. This
|
||||
|
||||
853
src/ripple/app/rdb/backend/FlatmapDatabase.h
Normal file
853
src/ripple/app/rdb/backend/FlatmapDatabase.h
Normal file
@@ -0,0 +1,853 @@
|
||||
#ifndef RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
|
||||
#define RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
|
||||
|
||||
#include <ripple/app/ledger/AcceptedLedger.h>
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/ledger/TransactionMaster.h>
|
||||
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/unordered/concurrent_flat_map.hpp>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
struct base_uint_hasher
|
||||
{
|
||||
using result_type = std::size_t;
|
||||
|
||||
result_type
|
||||
operator()(base_uint<256> const& value) const
|
||||
{
|
||||
return hardened_hash<>{}(value);
|
||||
}
|
||||
|
||||
result_type
|
||||
operator()(AccountID const& value) const
|
||||
{
|
||||
return hardened_hash<>{}(value);
|
||||
}
|
||||
};
|
||||
|
||||
class FlatmapDatabase : public SQLiteDatabase
|
||||
{
|
||||
private:
|
||||
struct LedgerData
|
||||
{
|
||||
LedgerInfo info;
|
||||
boost::unordered::
|
||||
concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
|
||||
transactions;
|
||||
};
|
||||
|
||||
struct AccountTxData
|
||||
{
|
||||
boost::unordered::
|
||||
concurrent_flat_map<std::pair<uint32_t, uint32_t>, AccountTx>
|
||||
transactions;
|
||||
};
|
||||
|
||||
Application& app_;
|
||||
Config const& config_;
|
||||
JobQueue& jobQueue_;
|
||||
|
||||
boost::unordered::concurrent_flat_map<LedgerIndex, LedgerData> ledgers_;
|
||||
boost::unordered::
|
||||
concurrent_flat_map<uint256, LedgerIndex, base_uint_hasher>
|
||||
ledgerHashToSeq_;
|
||||
boost::unordered::concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
|
||||
transactionMap_;
|
||||
boost::unordered::
|
||||
concurrent_flat_map<AccountID, AccountTxData, base_uint_hasher>
|
||||
accountTxMap_;
|
||||
|
||||
public:
|
||||
FlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
|
||||
: app_(app), config_(config), jobQueue_(jobQueue)
|
||||
{
|
||||
}
|
||||
|
||||
std::optional<LedgerIndex>
|
||||
getMinLedgerSeq() override
|
||||
{
|
||||
std::optional<LedgerIndex> minSeq;
|
||||
ledgers_.visit_all([&minSeq](auto const& pair) {
|
||||
if (!minSeq || pair.first < *minSeq)
|
||||
{
|
||||
minSeq = pair.first;
|
||||
}
|
||||
});
|
||||
return minSeq;
|
||||
}
|
||||
|
||||
std::optional<LedgerIndex>
|
||||
getTransactionsMinLedgerSeq() override
|
||||
{
|
||||
std::optional<LedgerIndex> minSeq;
|
||||
transactionMap_.visit_all([&minSeq](auto const& pair) {
|
||||
LedgerIndex seq = pair.second.second->getLgrSeq();
|
||||
if (!minSeq || seq < *minSeq)
|
||||
{
|
||||
minSeq = seq;
|
||||
}
|
||||
});
|
||||
return minSeq;
|
||||
}
|
||||
|
||||
std::optional<LedgerIndex>
|
||||
getAccountTransactionsMinLedgerSeq() override
|
||||
{
|
||||
std::optional<LedgerIndex> minSeq;
|
||||
accountTxMap_.visit_all([&minSeq](auto const& pair) {
|
||||
pair.second.transactions.visit_all([&minSeq](auto const& tx) {
|
||||
if (!minSeq || tx.first.first < *minSeq)
|
||||
{
|
||||
minSeq = tx.first.first;
|
||||
}
|
||||
});
|
||||
});
|
||||
return minSeq;
|
||||
}
|
||||
|
||||
std::optional<LedgerIndex>
|
||||
getMaxLedgerSeq() override
|
||||
{
|
||||
std::optional<LedgerIndex> maxSeq;
|
||||
ledgers_.visit_all([&maxSeq](auto const& pair) {
|
||||
if (!maxSeq || pair.first > *maxSeq)
|
||||
{
|
||||
maxSeq = pair.first;
|
||||
}
|
||||
});
|
||||
return maxSeq;
|
||||
}
|
||||
void
|
||||
deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) override
|
||||
{
|
||||
ledgers_.visit(ledgerSeq, [this](auto& item) {
|
||||
item.second.transactions.visit_all([this](auto const& txPair) {
|
||||
transactionMap_.erase(txPair.first);
|
||||
});
|
||||
item.second.transactions.clear();
|
||||
});
|
||||
|
||||
accountTxMap_.visit_all([ledgerSeq](auto& item) {
|
||||
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
|
||||
return tx.first.first == ledgerSeq;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) override
|
||||
{
|
||||
ledgers_.erase_if([this, ledgerSeq](auto const& item) {
|
||||
if (item.first < ledgerSeq)
|
||||
{
|
||||
item.second.transactions.visit_all([this](auto const& txPair) {
|
||||
transactionMap_.erase(txPair.first);
|
||||
});
|
||||
ledgerHashToSeq_.erase(item.second.info.hash);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
accountTxMap_.visit_all([ledgerSeq](auto& item) {
|
||||
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
|
||||
return tx.first.first < ledgerSeq;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
|
||||
{
|
||||
ledgers_.visit_all([this, ledgerSeq](auto& item) {
|
||||
if (item.first < ledgerSeq)
|
||||
{
|
||||
item.second.transactions.visit_all([this](auto const& txPair) {
|
||||
transactionMap_.erase(txPair.first);
|
||||
});
|
||||
item.second.transactions.clear();
|
||||
}
|
||||
});
|
||||
|
||||
accountTxMap_.visit_all([ledgerSeq](auto& item) {
|
||||
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
|
||||
return tx.first.first < ledgerSeq;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
|
||||
{
|
||||
accountTxMap_.visit_all([ledgerSeq](auto& item) {
|
||||
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
|
||||
return tx.first.first < ledgerSeq;
|
||||
});
|
||||
});
|
||||
}
|
||||
std::size_t
|
||||
getTransactionCount() override
|
||||
{
|
||||
return transactionMap_.size();
|
||||
}
|
||||
|
||||
std::size_t
|
||||
getAccountTransactionCount() override
|
||||
{
|
||||
std::size_t count = 0;
|
||||
accountTxMap_.visit_all([&count](auto const& item) {
|
||||
count += item.second.transactions.size();
|
||||
});
|
||||
return count;
|
||||
}
|
||||
|
||||
CountMinMax
|
||||
getLedgerCountMinMax() override
|
||||
{
|
||||
CountMinMax result{0, 0, 0};
|
||||
ledgers_.visit_all([&result](auto const& item) {
|
||||
result.numberOfRows++;
|
||||
if (result.minLedgerSequence == 0 ||
|
||||
item.first < result.minLedgerSequence)
|
||||
{
|
||||
result.minLedgerSequence = item.first;
|
||||
}
|
||||
if (item.first > result.maxLedgerSequence)
|
||||
{
|
||||
result.maxLedgerSequence = item.first;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
bool
|
||||
saveValidatedLedger(
|
||||
std::shared_ptr<Ledger const> const& ledger,
|
||||
bool current) override
|
||||
{
|
||||
try
|
||||
{
|
||||
LedgerData ledgerData;
|
||||
ledgerData.info = ledger->info();
|
||||
|
||||
auto aLedger = std::make_shared<AcceptedLedger>(ledger, app_);
|
||||
for (auto const& acceptedLedgerTx : *aLedger)
|
||||
{
|
||||
auto const& txn = acceptedLedgerTx->getTxn();
|
||||
auto const& meta = acceptedLedgerTx->getMeta();
|
||||
auto const& id = txn->getTransactionID();
|
||||
|
||||
std::string reason;
|
||||
auto accTx = std::make_pair(
|
||||
std::make_shared<ripple::Transaction>(txn, reason, app_),
|
||||
std::make_shared<ripple::TxMeta>(meta));
|
||||
|
||||
ledgerData.transactions.emplace(id, accTx);
|
||||
transactionMap_.emplace(id, accTx);
|
||||
|
||||
for (auto const& account : meta.getAffectedAccounts())
|
||||
{
|
||||
accountTxMap_.visit(account, [&](auto& data) {
|
||||
data.second.transactions.emplace(
|
||||
std::make_pair(
|
||||
ledger->info().seq,
|
||||
acceptedLedgerTx->getTxnSeq()),
|
||||
accTx);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
ledgers_.emplace(ledger->info().seq, std::move(ledgerData));
|
||||
ledgerHashToSeq_.emplace(ledger->info().hash, ledger->info().seq);
|
||||
|
||||
if (current)
|
||||
{
|
||||
auto const cutoffSeq =
|
||||
ledger->info().seq > app_.config().LEDGER_HISTORY
|
||||
? ledger->info().seq - app_.config().LEDGER_HISTORY
|
||||
: 0;
|
||||
|
||||
if (cutoffSeq > 0)
|
||||
{
|
||||
const std::size_t BATCH_SIZE = 128;
|
||||
std::size_t deleted = 0;
|
||||
|
||||
ledgers_.erase_if([&](auto const& item) {
|
||||
if (deleted >= BATCH_SIZE)
|
||||
return false;
|
||||
|
||||
if (item.first < cutoffSeq)
|
||||
{
|
||||
item.second.transactions.visit_all(
|
||||
[this](auto const& txPair) {
|
||||
transactionMap_.erase(txPair.first);
|
||||
});
|
||||
ledgerHashToSeq_.erase(item.second.info.hash);
|
||||
deleted++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
if (deleted > 0)
|
||||
{
|
||||
accountTxMap_.visit_all([cutoffSeq](auto& item) {
|
||||
item.second.transactions.erase_if(
|
||||
[cutoffSeq](auto const& tx) {
|
||||
return tx.first.first < cutoffSeq;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
deleteTransactionByLedgerSeq(ledger->info().seq);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<LedgerInfo>
|
||||
getLedgerInfoByIndex(LedgerIndex ledgerSeq) override
|
||||
{
|
||||
std::optional<LedgerInfo> result;
|
||||
ledgers_.visit(ledgerSeq, [&result](auto const& item) {
|
||||
result = item.second.info;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
std::optional<LedgerInfo>
|
||||
getNewestLedgerInfo() override
|
||||
{
|
||||
std::optional<LedgerInfo> result;
|
||||
ledgers_.visit_all([&result](auto const& item) {
|
||||
if (!result || item.second.info.seq > result->seq)
|
||||
{
|
||||
result = item.second.info;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
std::optional<LedgerInfo>
|
||||
getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) override
|
||||
{
|
||||
std::optional<LedgerInfo> result;
|
||||
ledgers_.visit_all([&](auto const& item) {
|
||||
if (item.first >= ledgerFirstIndex &&
|
||||
(!result || item.first < result->seq))
|
||||
{
|
||||
result = item.second.info;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
std::optional<LedgerInfo>
|
||||
getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) override
|
||||
{
|
||||
std::optional<LedgerInfo> result;
|
||||
ledgers_.visit_all([&](auto const& item) {
|
||||
if (item.first >= ledgerFirstIndex &&
|
||||
(!result || item.first > result->seq))
|
||||
{
|
||||
result = item.second.info;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
std::optional<LedgerInfo>
|
||||
getLedgerInfoByHash(uint256 const& ledgerHash) override
|
||||
{
|
||||
std::optional<LedgerInfo> result;
|
||||
ledgerHashToSeq_.visit(ledgerHash, [this, &result](auto const& item) {
|
||||
ledgers_.visit(item.second, [&result](auto const& item) {
|
||||
result = item.second.info;
|
||||
});
|
||||
});
|
||||
return result;
|
||||
}
|
||||
uint256
|
||||
getHashByIndex(LedgerIndex ledgerIndex) override
|
||||
{
|
||||
uint256 result;
|
||||
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
|
||||
result = item.second.info.hash;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
std::optional<LedgerHashPair>
|
||||
getHashesByIndex(LedgerIndex ledgerIndex) override
|
||||
{
|
||||
std::optional<LedgerHashPair> result;
|
||||
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
|
||||
result = LedgerHashPair{
|
||||
item.second.info.hash, item.second.info.parentHash};
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
std::map<LedgerIndex, LedgerHashPair>
|
||||
getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override
|
||||
{
|
||||
std::map<LedgerIndex, LedgerHashPair> result;
|
||||
ledgers_.visit_all([&](auto const& item) {
|
||||
if (item.first >= minSeq && item.first <= maxSeq)
|
||||
{
|
||||
result[item.first] = LedgerHashPair{
|
||||
item.second.info.hash, item.second.info.parentHash};
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
std::variant<AccountTx, TxSearched>
|
||||
getTransaction(
|
||||
uint256 const& id,
|
||||
std::optional<ClosedInterval<std::uint32_t>> const& range,
|
||||
error_code_i& ec) override
|
||||
{
|
||||
std::variant<AccountTx, TxSearched> result = TxSearched::unknown;
|
||||
transactionMap_.visit(id, [&](auto const& item) {
|
||||
auto const& tx = item.second;
|
||||
if (!range ||
|
||||
(range->lower() <= tx.second->getLgrSeq() &&
|
||||
tx.second->getLgrSeq() <= range->upper()))
|
||||
{
|
||||
result = tx;
|
||||
}
|
||||
else
|
||||
{
|
||||
result = TxSearched::all;
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
bool
|
||||
ledgerDbHasSpace(Config const& config) override
|
||||
{
|
||||
return true; // In-memory database always has space
|
||||
}
|
||||
|
||||
bool
|
||||
transactionDbHasSpace(Config const& config) override
|
||||
{
|
||||
return true; // In-memory database always has space
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
getKBUsedAll() override
|
||||
{
|
||||
std::uint32_t size = sizeof(*this);
|
||||
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
|
||||
size +=
|
||||
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
|
||||
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
|
||||
accountTxMap_.visit_all([&size](auto const& item) {
|
||||
size += sizeof(AccountID) + sizeof(AccountTxData);
|
||||
size += item.second.transactions.size() * sizeof(AccountTx);
|
||||
});
|
||||
return size / 1024; // Convert to KB
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
getKBUsedLedger() override
|
||||
{
|
||||
std::uint32_t size =
|
||||
ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
|
||||
size +=
|
||||
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
|
||||
return size / 1024;
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
getKBUsedTransaction() override
|
||||
{
|
||||
std::uint32_t size =
|
||||
transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
|
||||
accountTxMap_.visit_all([&size](auto const& item) {
|
||||
size += sizeof(AccountID) + sizeof(AccountTxData);
|
||||
size += item.second.transactions.size() * sizeof(AccountTx);
|
||||
});
|
||||
return size / 1024;
|
||||
}
|
||||
|
||||
void
|
||||
closeLedgerDB() override
|
||||
{
|
||||
// No-op for in-memory database
|
||||
}
|
||||
|
||||
void
|
||||
closeTransactionDB() override
|
||||
{
|
||||
// No-op for in-memory database
|
||||
}
|
||||
|
||||
~FlatmapDatabase()
|
||||
{
|
||||
// Concurrent maps need visit_all
|
||||
accountTxMap_.visit_all(
|
||||
[](auto& pair) { pair.second.transactions.clear(); });
|
||||
accountTxMap_.clear();
|
||||
|
||||
transactionMap_.clear();
|
||||
|
||||
ledgers_.visit_all(
|
||||
[](auto& pair) { pair.second.transactions.clear(); });
|
||||
ledgers_.clear();
|
||||
|
||||
ledgerHashToSeq_.clear();
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Transaction>>
|
||||
getTxHistory(LedgerIndex startIndex) override
|
||||
{
|
||||
std::vector<std::shared_ptr<Transaction>> result;
|
||||
transactionMap_.visit_all([&](auto const& item) {
|
||||
if (item.second.second->getLgrSeq() >= startIndex)
|
||||
{
|
||||
result.push_back(item.second.first);
|
||||
}
|
||||
});
|
||||
std::sort(
|
||||
result.begin(), result.end(), [](auto const& a, auto const& b) {
|
||||
return a->getLedger() > b->getLedger();
|
||||
});
|
||||
if (result.size() > 20)
|
||||
{
|
||||
result.resize(20);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
// Helper function to handle limits
|
||||
template <typename Container>
|
||||
void
|
||||
applyLimit(Container& container, std::size_t limit, bool bUnlimited)
|
||||
{
|
||||
if (!bUnlimited && limit > 0 && container.size() > limit)
|
||||
{
|
||||
container.resize(limit);
|
||||
}
|
||||
}
|
||||
|
||||
AccountTxs
|
||||
getOldestAccountTxs(AccountTxOptions const& options) override
|
||||
{
|
||||
AccountTxs result;
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
result.push_back(tx.second);
|
||||
}
|
||||
});
|
||||
});
|
||||
std::sort(
|
||||
result.begin(), result.end(), [](auto const& a, auto const& b) {
|
||||
return a.second->getLgrSeq() < b.second->getLgrSeq();
|
||||
});
|
||||
applyLimit(result, options.limit, options.bUnlimited);
|
||||
return result;
|
||||
}
|
||||
|
||||
AccountTxs
|
||||
getNewestAccountTxs(AccountTxOptions const& options) override
|
||||
{
|
||||
AccountTxs result;
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
result.push_back(tx.second);
|
||||
}
|
||||
});
|
||||
});
|
||||
std::sort(
|
||||
result.begin(), result.end(), [](auto const& a, auto const& b) {
|
||||
return a.second->getLgrSeq() > b.second->getLgrSeq();
|
||||
});
|
||||
applyLimit(result, options.limit, options.bUnlimited);
|
||||
return result;
|
||||
}
|
||||
|
||||
MetaTxsList
|
||||
getOldestAccountTxsB(AccountTxOptions const& options) override
|
||||
{
|
||||
MetaTxsList result;
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
result.emplace_back(
|
||||
tx.second.first->getSTransaction()
|
||||
->getSerializer()
|
||||
.peekData(),
|
||||
tx.second.second->getAsObject()
|
||||
.getSerializer()
|
||||
.peekData(),
|
||||
tx.first.first);
|
||||
}
|
||||
});
|
||||
});
|
||||
std::sort(
|
||||
result.begin(), result.end(), [](auto const& a, auto const& b) {
|
||||
return std::get<2>(a) < std::get<2>(b);
|
||||
});
|
||||
applyLimit(result, options.limit, options.bUnlimited);
|
||||
return result;
|
||||
}
|
||||
|
||||
MetaTxsList
|
||||
getNewestAccountTxsB(AccountTxOptions const& options) override
|
||||
{
|
||||
MetaTxsList result;
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
result.emplace_back(
|
||||
tx.second.first->getSTransaction()
|
||||
->getSerializer()
|
||||
.peekData(),
|
||||
tx.second.second->getAsObject()
|
||||
.getSerializer()
|
||||
.peekData(),
|
||||
tx.first.first);
|
||||
}
|
||||
});
|
||||
});
|
||||
std::sort(
|
||||
result.begin(), result.end(), [](auto const& a, auto const& b) {
|
||||
return std::get<2>(a) > std::get<2>(b);
|
||||
});
|
||||
applyLimit(result, options.limit, options.bUnlimited);
|
||||
return result;
|
||||
}
|
||||
std::pair<AccountTxs, std::optional<AccountTxMarker>>
|
||||
oldestAccountTxPage(AccountTxPageOptions const& options) override
|
||||
{
|
||||
AccountTxs result;
|
||||
std::optional<AccountTxMarker> marker;
|
||||
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
|
||||
txs;
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
txs.emplace_back(tx);
|
||||
}
|
||||
});
|
||||
|
||||
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
|
||||
return a.first < b.first;
|
||||
});
|
||||
|
||||
auto it = txs.begin();
|
||||
if (options.marker)
|
||||
{
|
||||
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
|
||||
return tx.first.first == options.marker->ledgerSeq &&
|
||||
tx.first.second == options.marker->txnSeq;
|
||||
});
|
||||
if (it != txs.end())
|
||||
++it;
|
||||
}
|
||||
|
||||
for (; it != txs.end() &&
|
||||
(options.limit == 0 || result.size() < options.limit);
|
||||
++it)
|
||||
{
|
||||
result.push_back(it->second);
|
||||
}
|
||||
|
||||
if (it != txs.end())
|
||||
{
|
||||
marker = AccountTxMarker{it->first.first, it->first.second};
|
||||
}
|
||||
});
|
||||
|
||||
return {result, marker};
|
||||
}
|
||||
|
||||
std::pair<AccountTxs, std::optional<AccountTxMarker>>
|
||||
newestAccountTxPage(AccountTxPageOptions const& options) override
|
||||
{
|
||||
AccountTxs result;
|
||||
std::optional<AccountTxMarker> marker;
|
||||
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
|
||||
txs;
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
txs.emplace_back(tx);
|
||||
}
|
||||
});
|
||||
|
||||
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
|
||||
return a.first > b.first;
|
||||
});
|
||||
|
||||
auto it = txs.begin();
|
||||
if (options.marker)
|
||||
{
|
||||
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
|
||||
return tx.first.first == options.marker->ledgerSeq &&
|
||||
tx.first.second == options.marker->txnSeq;
|
||||
});
|
||||
if (it != txs.end())
|
||||
++it;
|
||||
}
|
||||
|
||||
for (; it != txs.end() &&
|
||||
(options.limit == 0 || result.size() < options.limit);
|
||||
++it)
|
||||
{
|
||||
result.push_back(it->second);
|
||||
}
|
||||
|
||||
if (it != txs.end())
|
||||
{
|
||||
marker = AccountTxMarker{it->first.first, it->first.second};
|
||||
}
|
||||
});
|
||||
|
||||
return {result, marker};
|
||||
}
|
||||
|
||||
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
|
||||
oldestAccountTxPageB(AccountTxPageOptions const& options) override
|
||||
{
|
||||
MetaTxsList result;
|
||||
std::optional<AccountTxMarker> marker;
|
||||
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
txs.emplace_back(
|
||||
tx.first.first, tx.first.second, tx.second);
|
||||
}
|
||||
});
|
||||
|
||||
std::sort(txs.begin(), txs.end());
|
||||
|
||||
auto it = txs.begin();
|
||||
if (options.marker)
|
||||
{
|
||||
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
|
||||
return std::get<0>(tx) == options.marker->ledgerSeq &&
|
||||
std::get<1>(tx) == options.marker->txnSeq;
|
||||
});
|
||||
if (it != txs.end())
|
||||
++it;
|
||||
}
|
||||
|
||||
for (; it != txs.end() &&
|
||||
(options.limit == 0 || result.size() < options.limit);
|
||||
++it)
|
||||
{
|
||||
const auto& [_, __, tx] = *it;
|
||||
result.emplace_back(
|
||||
tx.first->getSTransaction()->getSerializer().peekData(),
|
||||
tx.second->getAsObject().getSerializer().peekData(),
|
||||
std::get<0>(*it));
|
||||
}
|
||||
|
||||
if (it != txs.end())
|
||||
{
|
||||
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
|
||||
}
|
||||
});
|
||||
|
||||
return {result, marker};
|
||||
}
|
||||
|
||||
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
|
||||
newestAccountTxPageB(AccountTxPageOptions const& options) override
|
||||
{
|
||||
MetaTxsList result;
|
||||
std::optional<AccountTxMarker> marker;
|
||||
|
||||
accountTxMap_.visit(options.account, [&](auto const& item) {
|
||||
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
|
||||
item.second.transactions.visit_all([&](auto const& tx) {
|
||||
if (tx.first.first >= options.minLedger &&
|
||||
tx.first.first <= options.maxLedger)
|
||||
{
|
||||
txs.emplace_back(
|
||||
tx.first.first, tx.first.second, tx.second);
|
||||
}
|
||||
});
|
||||
|
||||
std::sort(txs.begin(), txs.end(), std::greater<>());
|
||||
|
||||
auto it = txs.begin();
|
||||
if (options.marker)
|
||||
{
|
||||
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
|
||||
return std::get<0>(tx) == options.marker->ledgerSeq &&
|
||||
std::get<1>(tx) == options.marker->txnSeq;
|
||||
});
|
||||
if (it != txs.end())
|
||||
++it;
|
||||
}
|
||||
|
||||
for (; it != txs.end() &&
|
||||
(options.limit == 0 || result.size() < options.limit);
|
||||
++it)
|
||||
{
|
||||
const auto& [_, __, tx] = *it;
|
||||
result.emplace_back(
|
||||
tx.first->getSTransaction()->getSerializer().peekData(),
|
||||
tx.second->getAsObject().getSerializer().peekData(),
|
||||
std::get<0>(*it));
|
||||
}
|
||||
|
||||
if (it != txs.end())
|
||||
{
|
||||
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
|
||||
}
|
||||
});
|
||||
|
||||
return {result, marker};
|
||||
}
|
||||
};
|
||||
|
||||
// Factory function
|
||||
std::unique_ptr<SQLiteDatabase>
|
||||
getFlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
|
||||
{
|
||||
return std::make_unique<FlatmapDatabase>(app, config, jobQueue);
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
#endif // RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
|
||||
1034
src/ripple/app/rdb/backend/RWDBDatabase.h
Normal file
1034
src/ripple/app/rdb/backend/RWDBDatabase.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1132,7 +1132,7 @@ accountTxPage(
|
||||
{
|
||||
sql = boost::str(
|
||||
boost::format(
|
||||
prefix + (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u'
|
||||
prefix + (R"(AccountTransactions.LedgerSeq BETWEEN %u AND %u
|
||||
ORDER BY AccountTransactions.LedgerSeq %s,
|
||||
AccountTransactions.TxnSeq %s
|
||||
LIMIT %u;)")) %
|
||||
@@ -1155,12 +1155,14 @@ accountTxPage(
|
||||
FROM AccountTransactions, Transactions WHERE
|
||||
(AccountTransactions.TransID = Transactions.TransID AND
|
||||
AccountTransactions.Account = '%s' AND
|
||||
AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u')
|
||||
OR
|
||||
AccountTransactions.LedgerSeq BETWEEN %u AND %u)
|
||||
UNION
|
||||
SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,Status,RawTxn,TxnMeta
|
||||
FROM AccountTransactions, Transactions WHERE
|
||||
(AccountTransactions.TransID = Transactions.TransID AND
|
||||
AccountTransactions.Account = '%s' AND
|
||||
AccountTransactions.LedgerSeq = '%u' AND
|
||||
AccountTransactions.TxnSeq %s '%u')
|
||||
AccountTransactions.LedgerSeq = %u AND
|
||||
AccountTransactions.TxnSeq %s %u)
|
||||
ORDER BY AccountTransactions.LedgerSeq %s,
|
||||
AccountTransactions.TxnSeq %s
|
||||
LIMIT %u;
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/rdb/RelationalDatabase.h>
|
||||
#include <ripple/app/rdb/backend/FlatmapDatabase.h>
|
||||
#include <ripple/app/rdb/backend/RWDBDatabase.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <ripple/nodestore/DatabaseShard.h>
|
||||
|
||||
@@ -38,6 +40,8 @@ RelationalDatabase::init(
|
||||
{
|
||||
bool use_sqlite = false;
|
||||
bool use_postgres = false;
|
||||
bool use_rwdb = false;
|
||||
bool use_flatmap = false;
|
||||
|
||||
if (config.reporting())
|
||||
{
|
||||
@@ -52,6 +56,14 @@ RelationalDatabase::init(
|
||||
{
|
||||
use_sqlite = true;
|
||||
}
|
||||
else if (boost::iequals(get(rdb_section, "backend"), "rwdb"))
|
||||
{
|
||||
use_rwdb = true;
|
||||
}
|
||||
else if (boost::iequals(get(rdb_section, "backend"), "flatmap"))
|
||||
{
|
||||
use_flatmap = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
@@ -73,6 +85,14 @@ RelationalDatabase::init(
|
||||
{
|
||||
return getPostgresDatabase(app, config, jobQueue);
|
||||
}
|
||||
else if (use_rwdb)
|
||||
{
|
||||
return getRWDBDatabase(app, config, jobQueue);
|
||||
}
|
||||
else if (use_flatmap)
|
||||
{
|
||||
return getFlatmapDatabase(app, config, jobQueue);
|
||||
}
|
||||
|
||||
return std::unique_ptr<RelationalDatabase>();
|
||||
}
|
||||
|
||||
@@ -194,8 +194,14 @@ ETLSource::onResolve(
|
||||
{
|
||||
boost::beast::get_lowest_layer(*ws_).expires_after(
|
||||
std::chrono::seconds(30));
|
||||
|
||||
// Use async_connect with the entire results
|
||||
boost::beast::get_lowest_layer(*ws_).async_connect(
|
||||
results, [this](auto ec, auto ep) { onConnect(ec, ep); });
|
||||
results,
|
||||
[this](
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type
|
||||
ep) { onConnect(ec, ep); });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
@@ -549,7 +549,8 @@ using uint128 = base_uint<128>;
|
||||
using uint160 = base_uint<160>;
|
||||
using uint256 = base_uint<256>;
|
||||
|
||||
template <std::size_t Bits, class Tag>
|
||||
/*
|
||||
* template <std::size_t Bits, class Tag>
|
||||
[[nodiscard]] inline constexpr std::strong_ordering
|
||||
operator<=>(base_uint<Bits, Tag> const& lhs, base_uint<Bits, Tag> const& rhs)
|
||||
{
|
||||
@@ -570,6 +571,19 @@ operator<=>(base_uint<Bits, Tag> const& lhs, base_uint<Bits, Tag> const& rhs)
|
||||
return (*ret.first > *ret.second) ? std::strong_ordering::greater
|
||||
: std::strong_ordering::less;
|
||||
}
|
||||
*/
|
||||
|
||||
template <std::size_t Bits, class Tag>
|
||||
[[nodiscard]] inline constexpr std::strong_ordering
|
||||
operator<=>(base_uint<Bits, Tag> const& lhs, base_uint<Bits, Tag> const& rhs)
|
||||
{
|
||||
return std::lexicographical_compare_three_way(
|
||||
lhs.cbegin(),
|
||||
lhs.cend(),
|
||||
rhs.cbegin(),
|
||||
rhs.cend(),
|
||||
std::compare_three_way{});
|
||||
}
|
||||
|
||||
template <std::size_t Bits, typename Tag>
|
||||
[[nodiscard]] inline constexpr bool
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/basics/FileUtilities.h>
|
||||
#include <fstream>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -41,7 +42,7 @@ getFileContents(
|
||||
return {};
|
||||
}
|
||||
|
||||
ifstream fileStream(fullPath, std::ios::in);
|
||||
std::ifstream fileStream(fullPath.string(), std::ios::in);
|
||||
|
||||
if (!fileStream)
|
||||
{
|
||||
@@ -71,7 +72,8 @@ writeFileContents(
|
||||
using namespace boost::filesystem;
|
||||
using namespace boost::system::errc;
|
||||
|
||||
ofstream fileStream(destPath, std::ios::out | std::ios::trunc);
|
||||
std::ofstream fileStream(
|
||||
destPath.string(), std::ios::out | std::ios::trunc);
|
||||
|
||||
if (!fileStream)
|
||||
{
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <ripple/protocol/PublicKey.h>
|
||||
#include <ripple/protocol/SystemParameters.h> // VFALCO Breaks levelization
|
||||
#include <boost/beast/core/string.hpp>
|
||||
@@ -240,7 +241,7 @@ public:
|
||||
bool LEDGER_REPLAY = false;
|
||||
|
||||
// Work queue limits
|
||||
int MAX_TRANSACTIONS = 250;
|
||||
int MAX_TRANSACTIONS = 1000;
|
||||
static constexpr int MAX_JOB_QUEUE_TX = 1000;
|
||||
static constexpr int MIN_JOB_QUEUE_TX = 100;
|
||||
|
||||
@@ -350,6 +351,21 @@ public:
|
||||
{
|
||||
return RUN_REPORTING;
|
||||
}
|
||||
bool
|
||||
mem_backend() const
|
||||
{
|
||||
static bool const isMem =
|
||||
(!section(SECTION_RELATIONAL_DB).empty() &&
|
||||
boost::beast::iequals(
|
||||
get(section(SECTION_RELATIONAL_DB), "backend"), "rwdb")) ||
|
||||
(!section("node_db").empty() &&
|
||||
(boost::beast::iequals(get(section("node_db"), "type"), "rwdb") ||
|
||||
boost::beast::iequals(
|
||||
get(section("node_db"), "type"), "flatmap")));
|
||||
// RHNOTE: memory type is not selected for here because it breaks
|
||||
// tests
|
||||
return isMem;
|
||||
}
|
||||
|
||||
bool
|
||||
useTxTables() const
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/rdb/Download.h>
|
||||
#include <fstream>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
|
||||
@@ -1894,7 +1894,9 @@ fromNetwork(
|
||||
constexpr auto RPC_REPLY_MAX_BYTES = megabytes(256);
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
auto constexpr RPC_NOTIFY = 10min;
|
||||
// auto constexpr RPC_NOTIFY = 10min; // Wietse: lolwut 10 minutes for one
|
||||
// HTTP call?
|
||||
auto constexpr RPC_NOTIFY = 30s;
|
||||
|
||||
HTTPClient::request(
|
||||
bSSL,
|
||||
|
||||
@@ -78,12 +78,14 @@ public:
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
if (mDeque.size() >= eventQueueMax)
|
||||
{
|
||||
// Drop the previous event.
|
||||
JLOG(j_.warn()) << "RPCCall::fromNetwork drop";
|
||||
mDeque.pop_back();
|
||||
}
|
||||
// Wietse: we're not going to limit this, this is admin-port only, scale
|
||||
// accordingly Dropping events just like this results in inconsistent
|
||||
// data on the receiving end if (mDeque.size() >= eventQueueMax)
|
||||
// {
|
||||
// // Drop the previous event.
|
||||
// JLOG(j_.warn()) << "RPCCall::fromNetwork drop";
|
||||
// mDeque.pop_back();
|
||||
// }
|
||||
|
||||
auto jm = broadcast ? j_.debug() : j_.info();
|
||||
JLOG(jm) << "RPCCall::fromNetwork push: " << jvObj;
|
||||
@@ -182,7 +184,8 @@ private:
|
||||
}
|
||||
|
||||
private:
|
||||
enum { eventQueueMax = 32 };
|
||||
// Wietse: we're not going to limit this, this is admin-port only, scale
|
||||
// accordingly enum { eventQueueMax = 32 };
|
||||
|
||||
boost::asio::io_service& m_io_service;
|
||||
JobQueue& m_jobQueue;
|
||||
|
||||
235
src/ripple/nodestore/backend/FlatmapFactory.cpp
Normal file
235
src/ripple/nodestore/backend/FlatmapFactory.cpp
Normal file
@@ -0,0 +1,235 @@
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/nodestore/Factory.h>
|
||||
#include <ripple/nodestore/Manager.h>
|
||||
#include <ripple/nodestore/impl/DecodedBlob.h>
|
||||
#include <ripple/nodestore/impl/EncodedBlob.h>
|
||||
#include <ripple/nodestore/impl/codec.h>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/core/ignore_unused.hpp>
|
||||
#include <boost/unordered/concurrent_flat_map.hpp>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
namespace NodeStore {
|
||||
|
||||
class FlatmapBackend : public Backend
|
||||
{
|
||||
private:
|
||||
std::string name_;
|
||||
beast::Journal journal_;
|
||||
bool isOpen_{false};
|
||||
|
||||
struct base_uint_hasher
|
||||
{
|
||||
using result_type = std::size_t;
|
||||
|
||||
result_type
|
||||
operator()(base_uint<256> const& value) const
|
||||
{
|
||||
return hardened_hash<>{}(value);
|
||||
}
|
||||
};
|
||||
|
||||
using DataStore = boost::unordered::concurrent_flat_map<
|
||||
uint256,
|
||||
std::vector<std::uint8_t>, // Store compressed blob data
|
||||
base_uint_hasher>;
|
||||
|
||||
DataStore table_;
|
||||
|
||||
public:
|
||||
FlatmapBackend(
|
||||
size_t keyBytes,
|
||||
Section const& keyValues,
|
||||
beast::Journal journal)
|
||||
: name_(get(keyValues, "path")), journal_(journal)
|
||||
{
|
||||
boost::ignore_unused(journal_);
|
||||
if (name_.empty())
|
||||
name_ = "node_db";
|
||||
}
|
||||
|
||||
~FlatmapBackend() override
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
std::string
|
||||
getName() override
|
||||
{
|
||||
return name_;
|
||||
}
|
||||
|
||||
void
|
||||
open(bool createIfMissing) override
|
||||
{
|
||||
if (isOpen_)
|
||||
Throw<std::runtime_error>("already open");
|
||||
isOpen_ = true;
|
||||
}
|
||||
|
||||
bool
|
||||
isOpen() override
|
||||
{
|
||||
return isOpen_;
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
table_.clear();
|
||||
isOpen_ = false;
|
||||
}
|
||||
|
||||
Status
|
||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
|
||||
{
|
||||
if (!isOpen_)
|
||||
return notFound;
|
||||
|
||||
uint256 const hash(uint256::fromVoid(key));
|
||||
|
||||
bool found = table_.visit(hash, [&](const auto& key_value_pair) {
|
||||
nudb::detail::buffer bf;
|
||||
auto const result = nodeobject_decompress(
|
||||
key_value_pair.second.data(), key_value_pair.second.size(), bf);
|
||||
DecodedBlob decoded(hash.data(), result.first, result.second);
|
||||
if (!decoded.wasOk())
|
||||
{
|
||||
*pObject = nullptr;
|
||||
return;
|
||||
}
|
||||
*pObject = decoded.createObject();
|
||||
});
|
||||
return found ? (*pObject ? ok : dataCorrupt) : notFound;
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results;
|
||||
results.reserve(hashes.size());
|
||||
for (auto const& h : hashes)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nObj;
|
||||
Status status = fetch(h->begin(), &nObj);
|
||||
if (status != ok)
|
||||
results.push_back({});
|
||||
else
|
||||
results.push_back(nObj);
|
||||
}
|
||||
return {results, ok};
|
||||
}
|
||||
|
||||
void
|
||||
store(std::shared_ptr<NodeObject> const& object) override
|
||||
{
|
||||
if (!isOpen_)
|
||||
return;
|
||||
|
||||
if (!object)
|
||||
return;
|
||||
|
||||
EncodedBlob encoded(object);
|
||||
nudb::detail::buffer bf;
|
||||
auto const result =
|
||||
nodeobject_compress(encoded.getData(), encoded.getSize(), bf);
|
||||
|
||||
std::vector<std::uint8_t> compressed(
|
||||
static_cast<const std::uint8_t*>(result.first),
|
||||
static_cast<const std::uint8_t*>(result.first) + result.second);
|
||||
|
||||
table_.insert_or_assign(object->getHash(), std::move(compressed));
|
||||
}
|
||||
|
||||
void
|
||||
storeBatch(Batch const& batch) override
|
||||
{
|
||||
for (auto const& e : batch)
|
||||
store(e);
|
||||
}
|
||||
|
||||
void
|
||||
sync() override
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
|
||||
{
|
||||
if (!isOpen_)
|
||||
return;
|
||||
|
||||
table_.visit_all([&f](const auto& entry) {
|
||||
nudb::detail::buffer bf;
|
||||
auto const result = nodeobject_decompress(
|
||||
entry.second.data(), entry.second.size(), bf);
|
||||
DecodedBlob decoded(
|
||||
entry.first.data(), result.first, result.second);
|
||||
if (decoded.wasOk())
|
||||
f(decoded.createObject());
|
||||
});
|
||||
}
|
||||
|
||||
int
|
||||
getWriteLoad() override
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
setDeletePath() override
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
int
|
||||
fdRequired() const override
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t
|
||||
size() const
|
||||
{
|
||||
return table_.size();
|
||||
}
|
||||
};
|
||||
|
||||
class FlatmapFactory : public Factory
|
||||
{
|
||||
public:
|
||||
FlatmapFactory()
|
||||
{
|
||||
Manager::instance().insert(*this);
|
||||
}
|
||||
|
||||
~FlatmapFactory() override
|
||||
{
|
||||
Manager::instance().erase(*this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getName() const override
|
||||
{
|
||||
return "Flatmap";
|
||||
}
|
||||
|
||||
std::unique_ptr<Backend>
|
||||
createInstance(
|
||||
size_t keyBytes,
|
||||
Section const& keyValues,
|
||||
std::size_t burstSize,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal) override
|
||||
{
|
||||
return std::make_unique<FlatmapBackend>(keyBytes, keyValues, journal);
|
||||
}
|
||||
};
|
||||
|
||||
static FlatmapFactory flatmapFactory;
|
||||
|
||||
} // namespace NodeStore
|
||||
} // namespace ripple
|
||||
@@ -94,7 +94,7 @@ public:
|
||||
{
|
||||
boost::ignore_unused(journal_); // Keep unused journal_ just in case.
|
||||
if (name_.empty())
|
||||
Throw<std::runtime_error>("Missing path in Memory backend");
|
||||
Throw<std::runtime_error>("Missing path in TestMemory backend");
|
||||
}
|
||||
|
||||
~MemoryBackend() override
|
||||
|
||||
242
src/ripple/nodestore/backend/RWDBFactory.cpp
Normal file
242
src/ripple/nodestore/backend/RWDBFactory.cpp
Normal file
@@ -0,0 +1,242 @@
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/nodestore/Factory.h>
|
||||
#include <ripple/nodestore/Manager.h>
|
||||
#include <ripple/nodestore/impl/DecodedBlob.h>
|
||||
#include <ripple/nodestore/impl/EncodedBlob.h>
|
||||
#include <ripple/nodestore/impl/codec.h>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/core/ignore_unused.hpp>
|
||||
#include <boost/unordered/concurrent_flat_map.hpp>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
namespace NodeStore {
|
||||
|
||||
class RWDBBackend : public Backend
|
||||
{
|
||||
private:
|
||||
std::string name_;
|
||||
beast::Journal journal_;
|
||||
bool isOpen_{false};
|
||||
|
||||
struct base_uint_hasher
|
||||
{
|
||||
using result_type = std::size_t;
|
||||
|
||||
result_type
|
||||
operator()(base_uint<256> const& value) const
|
||||
{
|
||||
return hardened_hash<>{}(value);
|
||||
}
|
||||
};
|
||||
|
||||
using DataStore =
|
||||
std::map<uint256, std::vector<std::uint8_t>>; // Store compressed blob
|
||||
// data
|
||||
mutable std::recursive_mutex
|
||||
mutex_; // Only needed for std::map implementation
|
||||
|
||||
DataStore table_;
|
||||
|
||||
public:
|
||||
RWDBBackend(
|
||||
size_t keyBytes,
|
||||
Section const& keyValues,
|
||||
beast::Journal journal)
|
||||
: name_(get(keyValues, "path")), journal_(journal)
|
||||
{
|
||||
boost::ignore_unused(journal_);
|
||||
if (name_.empty())
|
||||
name_ = "node_db";
|
||||
}
|
||||
|
||||
~RWDBBackend() override
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
std::string
|
||||
getName() override
|
||||
{
|
||||
return name_;
|
||||
}
|
||||
|
||||
void
|
||||
open(bool createIfMissing) override
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
if (isOpen_)
|
||||
Throw<std::runtime_error>("already open");
|
||||
isOpen_ = true;
|
||||
}
|
||||
|
||||
bool
|
||||
isOpen() override
|
||||
{
|
||||
return isOpen_;
|
||||
}
|
||||
|
||||
void
|
||||
close() override
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
table_.clear();
|
||||
isOpen_ = false;
|
||||
}
|
||||
|
||||
Status
|
||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
|
||||
{
|
||||
if (!isOpen_)
|
||||
return notFound;
|
||||
|
||||
uint256 const hash(uint256::fromVoid(key));
|
||||
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = table_.find(hash);
|
||||
if (it == table_.end())
|
||||
return notFound;
|
||||
|
||||
nudb::detail::buffer bf;
|
||||
auto const result =
|
||||
nodeobject_decompress(it->second.data(), it->second.size(), bf);
|
||||
DecodedBlob decoded(hash.data(), result.first, result.second);
|
||||
if (!decoded.wasOk())
|
||||
return dataCorrupt;
|
||||
*pObject = decoded.createObject();
|
||||
return ok;
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results;
|
||||
results.reserve(hashes.size());
|
||||
for (auto const& h : hashes)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nObj;
|
||||
Status status = fetch(h->begin(), &nObj);
|
||||
if (status != ok)
|
||||
results.push_back({});
|
||||
else
|
||||
results.push_back(nObj);
|
||||
}
|
||||
return {results, ok};
|
||||
}
|
||||
|
||||
void
|
||||
store(std::shared_ptr<NodeObject> const& object) override
|
||||
{
|
||||
if (!isOpen_)
|
||||
return;
|
||||
|
||||
if (!object)
|
||||
return;
|
||||
|
||||
EncodedBlob encoded(object);
|
||||
nudb::detail::buffer bf;
|
||||
auto const result =
|
||||
nodeobject_compress(encoded.getData(), encoded.getSize(), bf);
|
||||
|
||||
std::vector<std::uint8_t> compressed(
|
||||
static_cast<const std::uint8_t*>(result.first),
|
||||
static_cast<const std::uint8_t*>(result.first) + result.second);
|
||||
|
||||
std::lock_guard lock(mutex_);
|
||||
table_[object->getHash()] = std::move(compressed);
|
||||
}
|
||||
|
||||
void
|
||||
storeBatch(Batch const& batch) override
|
||||
{
|
||||
for (auto const& e : batch)
|
||||
store(e);
|
||||
}
|
||||
|
||||
void
|
||||
sync() override
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
|
||||
{
|
||||
if (!isOpen_)
|
||||
return;
|
||||
|
||||
std::lock_guard lock(mutex_);
|
||||
for (const auto& entry : table_)
|
||||
{
|
||||
nudb::detail::buffer bf;
|
||||
auto const result = nodeobject_decompress(
|
||||
entry.second.data(), entry.second.size(), bf);
|
||||
DecodedBlob decoded(
|
||||
entry.first.data(), result.first, result.second);
|
||||
if (decoded.wasOk())
|
||||
f(decoded.createObject());
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
getWriteLoad() override
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
setDeletePath() override
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
int
|
||||
fdRequired() const override
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t
|
||||
size() const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
return table_.size();
|
||||
}
|
||||
};
|
||||
|
||||
class RWDBFactory : public Factory
|
||||
{
|
||||
public:
|
||||
RWDBFactory()
|
||||
{
|
||||
Manager::instance().insert(*this);
|
||||
}
|
||||
|
||||
~RWDBFactory() override
|
||||
{
|
||||
Manager::instance().erase(*this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getName() const override
|
||||
{
|
||||
return "RWDB";
|
||||
}
|
||||
|
||||
std::unique_ptr<Backend>
|
||||
createInstance(
|
||||
size_t keyBytes,
|
||||
Section const& keyValues,
|
||||
std::size_t burstSize,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal) override
|
||||
{
|
||||
return std::make_unique<RWDBBackend>(keyBytes, keyValues, journal);
|
||||
}
|
||||
};
|
||||
|
||||
static RWDBFactory rwDBFactory;
|
||||
|
||||
} // namespace NodeStore
|
||||
} // namespace ripple
|
||||
@@ -44,7 +44,7 @@ getFeatureValue(
|
||||
return {};
|
||||
boost::smatch match;
|
||||
boost::regex rx(feature + "=([^;\\s]+)");
|
||||
auto const value = header->value().to_string();
|
||||
auto const value = std::string(header->value());
|
||||
if (boost::regex_search(value, match, rx))
|
||||
return {match[1]};
|
||||
return {};
|
||||
@@ -233,7 +233,7 @@ verifyHandshake(
|
||||
{
|
||||
if (auto const iter = headers.find("Server-Domain"); iter != headers.end())
|
||||
{
|
||||
if (!isProperlyFormedTomlDomain(iter->value().to_string()))
|
||||
if (!isProperlyFormedTomlDomain(std::string(iter->value())))
|
||||
throw std::runtime_error("Invalid server domain");
|
||||
}
|
||||
|
||||
@@ -243,7 +243,8 @@ verifyHandshake(
|
||||
uint32_t peer_nid = 0;
|
||||
if (auto const iter = headers.find("Network-ID"); iter != headers.end())
|
||||
{
|
||||
if (!beast::lexicalCastChecked(peer_nid, iter->value().to_string()))
|
||||
if (!beast::lexicalCastChecked(
|
||||
peer_nid, std::string(iter->value())))
|
||||
throw std::runtime_error("Invalid peer network identifier");
|
||||
}
|
||||
|
||||
@@ -255,7 +256,7 @@ verifyHandshake(
|
||||
if (auto const iter = headers.find("Network-Time"); iter != headers.end())
|
||||
{
|
||||
auto const netTime =
|
||||
[str = iter->value().to_string()]() -> TimeKeeper::time_point {
|
||||
[str = std::string(iter->value())]() -> TimeKeeper::time_point {
|
||||
TimeKeeper::duration::rep val;
|
||||
|
||||
if (beast::lexicalCastChecked(val, str))
|
||||
@@ -291,7 +292,7 @@ verifyHandshake(
|
||||
if (auto const iter = headers.find("Public-Key"); iter != headers.end())
|
||||
{
|
||||
auto pk = parseBase58<PublicKey>(
|
||||
TokenType::NodePublic, iter->value().to_string());
|
||||
TokenType::NodePublic, std::string(iter->value()));
|
||||
|
||||
if (pk)
|
||||
{
|
||||
@@ -317,7 +318,7 @@ verifyHandshake(
|
||||
if (iter == headers.end())
|
||||
throw std::runtime_error("No session signature specified");
|
||||
|
||||
auto sig = base64_decode(iter->value().to_string());
|
||||
auto sig = base64_decode(std::string(iter->value()));
|
||||
|
||||
if (!verifyDigest(publicKey, sharedValue, makeSlice(sig), false))
|
||||
throw std::runtime_error("Failed to verify session");
|
||||
@@ -330,7 +331,7 @@ verifyHandshake(
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
auto const local_ip = boost::asio::ip::address::from_string(
|
||||
iter->value().to_string(), ec);
|
||||
std::string(iter->value()), ec);
|
||||
|
||||
if (ec)
|
||||
throw std::runtime_error("Invalid Local-IP");
|
||||
@@ -345,7 +346,7 @@ verifyHandshake(
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
auto const remote_ip = boost::asio::ip::address::from_string(
|
||||
iter->value().to_string(), ec);
|
||||
std::string(iter->value()), ec);
|
||||
|
||||
if (ec)
|
||||
throw std::runtime_error("Invalid Remote-IP");
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <ripple/rpc/json_body.h>
|
||||
#include <ripple/server/SimpleWriter.h>
|
||||
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/utility/in_place_factory.hpp>
|
||||
|
||||
@@ -136,7 +137,11 @@ OverlayImpl::OverlayImpl(
|
||||
stopwatch(),
|
||||
app_.journal("PeerFinder"),
|
||||
config,
|
||||
collector))
|
||||
collector,
|
||||
app.config().section(SECTION_RELATIONAL_DB).empty() ||
|
||||
!boost::iequals(
|
||||
get(app.config().section(SECTION_RELATIONAL_DB), "backend"),
|
||||
"rwdb")))
|
||||
, m_resolver(resolver)
|
||||
, next_id_(1)
|
||||
, timer_count_(0)
|
||||
|
||||
@@ -176,7 +176,7 @@ PeerImp::run()
|
||||
if (auto const iter = headers_.find("Closed-Ledger");
|
||||
iter != headers_.end())
|
||||
{
|
||||
closed = parseLedgerHash(iter->value().to_string());
|
||||
closed = parseLedgerHash(std::string(iter->value()));
|
||||
|
||||
if (!closed)
|
||||
fail("Malformed handshake data (1)");
|
||||
@@ -185,7 +185,7 @@ PeerImp::run()
|
||||
if (auto const iter = headers_.find("Previous-Ledger");
|
||||
iter != headers_.end())
|
||||
{
|
||||
previous = parseLedgerHash(iter->value().to_string());
|
||||
previous = parseLedgerHash(std::string(iter->value()));
|
||||
|
||||
if (!previous)
|
||||
fail("Malformed handshake data (2)");
|
||||
@@ -372,8 +372,8 @@ std::string
|
||||
PeerImp::getVersion() const
|
||||
{
|
||||
if (inbound_)
|
||||
return headers_["User-Agent"].to_string();
|
||||
return headers_["Server"].to_string();
|
||||
return std::string(headers_["User-Agent"]);
|
||||
return std::string(headers_["Server"]);
|
||||
}
|
||||
|
||||
Json::Value
|
||||
@@ -399,7 +399,7 @@ PeerImp::json()
|
||||
if (auto const d = domain(); !d.empty())
|
||||
ret[jss::server_domain] = domain();
|
||||
|
||||
if (auto const nid = headers_["Network-ID"].to_string(); !nid.empty())
|
||||
if (auto const nid = std::string(headers_["Network-ID"]); !nid.empty())
|
||||
ret[jss::network_id] = nid;
|
||||
|
||||
ret[jss::load] = usage_.balance();
|
||||
@@ -839,7 +839,7 @@ PeerImp::name() const
|
||||
std::string
|
||||
PeerImp::domain() const
|
||||
{
|
||||
return headers_["Server-Domain"].to_string();
|
||||
return std::string(headers_["Server-Domain"]);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
#define RIPPLE_PEERFINDER_CHECKER_H_INCLUDED
|
||||
|
||||
#include <ripple/beast/net/IPAddressConversion.h>
|
||||
#include <boost/asio/detail/handler_invoke_helpers.hpp>
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
54
src/ripple/peerfinder/impl/InMemoryStore.h
Normal file
54
src/ripple/peerfinder/impl/InMemoryStore.h
Normal file
@@ -0,0 +1,54 @@
|
||||
#ifndef RIPPLE_PEERFINDER_INMEMORYSTORE_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_INMEMORYSTORE_H_INCLUDED
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/peerfinder/impl/Store.h>
|
||||
#include <boost/functional/hash.hpp>
|
||||
#include <boost/unordered/concurrent_flat_map.hpp>
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
struct EndpointHasher
|
||||
{
|
||||
std::size_t
|
||||
operator()(beast::IP::Endpoint const& endpoint) const
|
||||
{
|
||||
std::size_t seed = 0;
|
||||
boost::hash_combine(seed, endpoint.address().to_string());
|
||||
boost::hash_combine(seed, endpoint.port());
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
|
||||
class InMemoryStore : public Store
|
||||
{
|
||||
private:
|
||||
boost::concurrent_flat_map<beast::IP::Endpoint, int, EndpointHasher>
|
||||
entries;
|
||||
|
||||
public:
|
||||
std::size_t
|
||||
load(load_callback const& cb) override
|
||||
{
|
||||
std::size_t count = 0;
|
||||
entries.visit_all([&](auto const& entry) {
|
||||
cb(entry.first, entry.second);
|
||||
++count;
|
||||
});
|
||||
return count;
|
||||
}
|
||||
|
||||
void
|
||||
save(std::vector<Entry> const& v) override
|
||||
{
|
||||
entries.clear();
|
||||
for (auto const& entry : v)
|
||||
entries.emplace(entry.endpoint, entry.valence);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace PeerFinder
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -17,8 +17,10 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <ripple/peerfinder/PeerfinderManager.h>
|
||||
#include <ripple/peerfinder/impl/Checker.h>
|
||||
#include <ripple/peerfinder/impl/InMemoryStore.h>
|
||||
#include <ripple/peerfinder/impl/Logic.h>
|
||||
#include <ripple/peerfinder/impl/SourceStrings.h>
|
||||
#include <ripple/peerfinder/impl/StoreSqdb.h>
|
||||
@@ -38,7 +40,7 @@ public:
|
||||
std::optional<boost::asio::io_service::work> work_;
|
||||
clock_type& m_clock;
|
||||
beast::Journal m_journal;
|
||||
StoreSqdb m_store;
|
||||
std::unique_ptr<Store> m_store;
|
||||
Checker<boost::asio::ip::tcp> checker_;
|
||||
Logic<decltype(checker_)> m_logic;
|
||||
BasicConfig const& m_config;
|
||||
@@ -50,15 +52,18 @@ public:
|
||||
clock_type& clock,
|
||||
beast::Journal journal,
|
||||
BasicConfig const& config,
|
||||
beast::insight::Collector::ptr const& collector)
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
bool useSqLiteStore)
|
||||
: Manager()
|
||||
, io_service_(io_service)
|
||||
, work_(std::in_place, std::ref(io_service_))
|
||||
, m_clock(clock)
|
||||
, m_journal(journal)
|
||||
, m_store(journal)
|
||||
, m_store(
|
||||
useSqLiteStore ? static_cast<Store*>(new StoreSqdb(journal))
|
||||
: static_cast<Store*>(new InMemoryStore()))
|
||||
, checker_(io_service_)
|
||||
, m_logic(clock, m_store, checker_, journal)
|
||||
, m_logic(clock, *m_store, checker_, journal)
|
||||
, m_config(config)
|
||||
, m_stats(std::bind(&ManagerImp::collect_metrics, this), collector)
|
||||
{
|
||||
@@ -215,7 +220,8 @@ public:
|
||||
void
|
||||
start() override
|
||||
{
|
||||
m_store.open(m_config);
|
||||
if (auto sqdb = dynamic_cast<StoreSqdb*>(m_store.get()))
|
||||
sqdb->open(m_config);
|
||||
m_logic.load();
|
||||
}
|
||||
|
||||
@@ -275,10 +281,11 @@ make_Manager(
|
||||
clock_type& clock,
|
||||
beast::Journal journal,
|
||||
BasicConfig const& config,
|
||||
beast::insight::Collector::ptr const& collector)
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
bool useSqLiteStore)
|
||||
{
|
||||
return std::make_unique<ManagerImp>(
|
||||
io_service, clock, journal, config, collector);
|
||||
io_service, clock, journal, config, collector, useSqLiteStore);
|
||||
}
|
||||
|
||||
} // namespace PeerFinder
|
||||
|
||||
@@ -34,7 +34,8 @@ make_Manager(
|
||||
clock_type& clock,
|
||||
beast::Journal journal,
|
||||
BasicConfig const& config,
|
||||
beast::insight::Collector::ptr const& collector);
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
bool useSqliteStore);
|
||||
|
||||
} // namespace PeerFinder
|
||||
} // namespace ripple
|
||||
|
||||
@@ -74,7 +74,7 @@ namespace detail {
|
||||
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
||||
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
||||
// the actual number of amendments. A LogicError on startup will verify this.
|
||||
static constexpr std::size_t numFeatures = 73;
|
||||
static constexpr std::size_t numFeatures = 74;
|
||||
|
||||
/** Amendments that this server supports and the default voting behavior.
|
||||
Whether they are enabled depends on the Rules defined in the validated
|
||||
@@ -361,6 +361,7 @@ extern uint256 const fixNSDelete;
|
||||
extern uint256 const fix240819;
|
||||
extern uint256 const fixPageCap;
|
||||
extern uint256 const fix240911;
|
||||
extern uint256 const fixFloatDivide;
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
|
||||
@@ -467,6 +467,7 @@ REGISTER_FIX (fixNSDelete, Supported::yes, VoteBehavior::De
|
||||
REGISTER_FIX (fix240819, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FIX (fixPageCap, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FIX (fix240911, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FIX (fixFloatDivide, Supported::yes, VoteBehavior::DefaultYes);
|
||||
|
||||
// The following amendments are obsolete, but must remain supported
|
||||
// because they could potentially get enabled.
|
||||
|
||||
@@ -247,11 +247,11 @@ build_map(boost::beast::http::fields const& h)
|
||||
std::map<std::string, std::string> c;
|
||||
for (auto const& e : h)
|
||||
{
|
||||
auto key(e.name_string().to_string());
|
||||
auto key(std::string(e.name_string()));
|
||||
std::transform(key.begin(), key.end(), key.begin(), [](auto kc) {
|
||||
return std::tolower(static_cast<unsigned char>(kc));
|
||||
});
|
||||
c[key] = e.value().to_string();
|
||||
c[key] = std::string(e.value());
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ public:
|
||||
{
|
||||
auto it = h.find("X-User");
|
||||
if (it != h.end())
|
||||
user_ = it->value().to_string();
|
||||
user_ = std::string(it->value());
|
||||
fwdfor_ = std::string(forwardedFor(h));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,6 +244,7 @@ SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
|
||||
// Get a node without throwing
|
||||
// Used on maps where missing nodes are expected
|
||||
/*
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
@@ -266,6 +267,49 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
|
||||
return node;
|
||||
}
|
||||
*/
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
using namespace std::chrono;
|
||||
auto start = high_resolution_clock::now();
|
||||
auto timeout = nanoseconds(50);
|
||||
|
||||
while (true)
|
||||
{
|
||||
// Try to fetch from cache first
|
||||
auto node = cacheLookup(hash);
|
||||
if (node)
|
||||
return node;
|
||||
|
||||
if (backed_)
|
||||
{
|
||||
node = fetchNodeFromDB(hash);
|
||||
if (node)
|
||||
{
|
||||
canonicalize(hash, node);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
if (filter)
|
||||
node = checkFilter(hash, filter);
|
||||
|
||||
if (node)
|
||||
return node;
|
||||
|
||||
// Check if we've exceeded timeout
|
||||
auto elapsed = high_resolution_clock::now() - start;
|
||||
if (elapsed >= timeout)
|
||||
break;
|
||||
|
||||
// Short yield to avoid overwhelming CPU
|
||||
std::this_thread::yield();
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMapTreeNode>
|
||||
SHAMap::fetchNodeNT(SHAMapHash const& hash) const
|
||||
|
||||
@@ -42,6 +42,9 @@ class LedgerLoad_test : public beast::unit_test::suite
|
||||
cfg->START_UP = type;
|
||||
assert(!dbPath.empty());
|
||||
cfg->legacy("database_path", dbPath);
|
||||
auto& sectionNode = cfg->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("type", "memory");
|
||||
cfg->overwrite(SECTION_RELATIONAL_DB, "backend", "sqlite");
|
||||
return cfg;
|
||||
}
|
||||
|
||||
@@ -62,7 +65,13 @@ class LedgerLoad_test : public beast::unit_test::suite
|
||||
|
||||
retval.ledgerFile = td.file("ledgerdata.json");
|
||||
|
||||
Env env{*this};
|
||||
Env env{*this, envconfig([](std::unique_ptr<Config> cfg) {
|
||||
auto& sectionNode =
|
||||
cfg->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("type", "memory");
|
||||
cfg->overwrite(SECTION_RELATIONAL_DB, "backend", "sqlite");
|
||||
return cfg;
|
||||
})};
|
||||
Account prev;
|
||||
|
||||
for (auto i = 0; i < 20; ++i)
|
||||
@@ -154,7 +163,7 @@ class LedgerLoad_test : public beast::unit_test::suite
|
||||
copy_file(
|
||||
sd.ledgerFile,
|
||||
ledgerFileCorrupt,
|
||||
copy_option::overwrite_if_exists,
|
||||
copy_options::overwrite_existing,
|
||||
ec);
|
||||
if (!BEAST_EXPECTS(!ec, ec.message()))
|
||||
return;
|
||||
|
||||
@@ -50,7 +50,13 @@ struct LedgerReplay_test : public beast::unit_test::suite
|
||||
auto const alice = Account("alice");
|
||||
auto const bob = Account("bob");
|
||||
|
||||
Env env(*this);
|
||||
Env env = [&] {
|
||||
auto c = jtx::envconfig();
|
||||
auto& sectionNode = c->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("type", "memory");
|
||||
c->overwrite(SECTION_RELATIONAL_DB, "backend", "sqlite");
|
||||
return jtx::Env(*this, std::move(c));
|
||||
}();
|
||||
env.fund(XRP(100000), alice, bob);
|
||||
env.close();
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/ssl/stream.hpp>
|
||||
#include <boost/beast/core/flat_buffer.hpp>
|
||||
#include <boost/beast/http.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/version.hpp>
|
||||
@@ -574,7 +575,7 @@ private:
|
||||
if (ec)
|
||||
break;
|
||||
|
||||
auto path = req.target().to_string();
|
||||
auto path = req.target(); //.to_string();
|
||||
res.insert("Server", "TrustedPublisherServer");
|
||||
res.version(req.version());
|
||||
res.keep_alive(req.keep_alive());
|
||||
@@ -677,7 +678,8 @@ private:
|
||||
// unknown request
|
||||
res.result(boost::beast::http::status::not_found);
|
||||
res.insert("Content-Type", "text/html");
|
||||
res.body() = "The file '" + path + "' was not found";
|
||||
res.body() =
|
||||
"The file '" + std::string(path) + "' was not found";
|
||||
}
|
||||
|
||||
if (prepare)
|
||||
|
||||
@@ -49,8 +49,9 @@ setupConfigForUnitTests(Config& cfg)
|
||||
cfg.FEES.account_reserve = XRP(200).value().xrp().drops();
|
||||
cfg.FEES.owner_reserve = XRP(50).value().xrp().drops();
|
||||
|
||||
cfg.overwrite(ConfigSection::nodeDatabase(), "type", "memory");
|
||||
cfg.overwrite(ConfigSection::nodeDatabase(), "type", "rwdb");
|
||||
cfg.overwrite(ConfigSection::nodeDatabase(), "path", "main");
|
||||
cfg.overwrite(SECTION_RELATIONAL_DB, "backend", "rwdb");
|
||||
cfg.deprecatedClearSection(ConfigSection::importNodeDatabase());
|
||||
cfg.legacy("database_path", "");
|
||||
cfg.setupControl(true, true, true);
|
||||
|
||||
@@ -82,6 +82,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// rwdb backend does not keep table/data after close
|
||||
if (type != "rwdb")
|
||||
{
|
||||
// Re-open the backend
|
||||
std::unique_ptr<Backend> backend = Manager::instance().make_Backend(
|
||||
@@ -105,6 +107,8 @@ public:
|
||||
{
|
||||
std::uint64_t const seedValue = 50;
|
||||
|
||||
testBackend("memory", seedValue);
|
||||
testBackend("rwdb", seedValue);
|
||||
testBackend("nudb", seedValue);
|
||||
|
||||
#if RIPPLE_ROCKSDB_AVAILABLE
|
||||
@@ -117,7 +121,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(Backend, ripple_core, ripple);
|
||||
BEAST_DEFINE_TESTSUITE(Backend, NodeStore, ripple);
|
||||
|
||||
} // namespace NodeStore
|
||||
} // namespace ripple
|
||||
|
||||
@@ -564,7 +564,7 @@ public:
|
||||
BEAST_EXPECT(areBatchesEqual(batch, copy));
|
||||
}
|
||||
|
||||
if (type == "memory")
|
||||
if (type == "memory" || type == "rwdb")
|
||||
{
|
||||
// Verify default earliest ledger sequence
|
||||
{
|
||||
@@ -661,6 +661,8 @@ public:
|
||||
|
||||
testNodeStore("memory", false, seedValue);
|
||||
|
||||
testNodeStore("rwdb", false, seedValue);
|
||||
|
||||
// Persistent backend tests
|
||||
{
|
||||
testNodeStore("nudb", true, seedValue);
|
||||
|
||||
@@ -47,9 +47,6 @@ class GetCounts_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(
|
||||
result.isMember(jss::uptime) &&
|
||||
!result[jss::uptime].asString().empty());
|
||||
BEAST_EXPECT(
|
||||
result.isMember(jss::dbKBTotal) &&
|
||||
result[jss::dbKBTotal].asInt() > 0);
|
||||
}
|
||||
|
||||
// create some transactions
|
||||
|
||||
@@ -63,9 +63,11 @@ public:
|
||||
jtx::Env env = [&] {
|
||||
auto c = jtx::envconfig();
|
||||
auto& sectionNode = c->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("type", "memory");
|
||||
sectionNode.set("earliest_seq", "257");
|
||||
sectionNode.set("ledgers_per_shard", "256");
|
||||
c->setupControl(true, true, true);
|
||||
c->overwrite(SECTION_RELATIONAL_DB, "backend", "sqlite");
|
||||
|
||||
return jtx::Env(*this, std::move(c));
|
||||
}();
|
||||
@@ -138,9 +140,11 @@ public:
|
||||
section.set("ledgers_per_shard", "256");
|
||||
section.set("earliest_seq", "257");
|
||||
auto& sectionNode = c->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("type", "memory");
|
||||
sectionNode.set("earliest_seq", "257");
|
||||
sectionNode.set("ledgers_per_shard", "256");
|
||||
c->setupControl(true, true, true);
|
||||
c->overwrite(SECTION_RELATIONAL_DB, "backend", "sqlite");
|
||||
|
||||
return jtx::Env(*this, std::move(c));
|
||||
}();
|
||||
@@ -282,9 +286,11 @@ public:
|
||||
section.set("ledgers_per_shard", "256");
|
||||
section.set("earliest_seq", "257");
|
||||
auto& sectionNode = c->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("type", "memory");
|
||||
sectionNode.set("earliest_seq", "257");
|
||||
sectionNode.set("ledgers_per_shard", "256");
|
||||
c->setupControl(true, true, true);
|
||||
c->overwrite(SECTION_RELATIONAL_DB, "backend", "sqlite");
|
||||
|
||||
return jtx::Env(
|
||||
*this, std::move(c), nullptr, beast::severities::kDisabled);
|
||||
|
||||
@@ -674,10 +674,10 @@ class ServerStatus_test : public beast::unit_test::suite,
|
||||
resp.result() == boost::beast::http::status::switching_protocols);
|
||||
BEAST_EXPECT(
|
||||
resp.find("Upgrade") != resp.end() &&
|
||||
resp["Upgrade"] == "websocket");
|
||||
std::string(resp["Upgrade"]) == "websocket");
|
||||
BEAST_EXPECT(
|
||||
resp.find("Connection") != resp.end() &&
|
||||
resp["Connection"] == "upgrade");
|
||||
std::string(resp["Connection"]) == "Upgrade");
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
, j_(j)
|
||||
{
|
||||
Section testSection;
|
||||
testSection.set("type", "memory");
|
||||
testSection.set("type", "rwdb");
|
||||
testSection.set("path", "SHAMap_test");
|
||||
db_ = NodeStore::Manager::instance().make_Database(
|
||||
megabytes(4), scheduler_, 1, testSection, j);
|
||||
|
||||
Reference in New Issue
Block a user