mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
refactor: Remove reporting mode (#5092)
This commit is contained in:
1
BUILD.md
1
BUILD.md
@@ -376,7 +376,6 @@ stored inside the build directory, as either of:
|
||||
| Option | Default Value | Description |
|
||||
| --- | ---| ---|
|
||||
| `assert` | OFF | Enable assertions.
|
||||
| `reporting` | OFF | Build the reporting mode feature. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | ON | Build tests. |
|
||||
| `unity` | ON | Configure a unity build. |
|
||||
|
||||
@@ -141,14 +141,6 @@ else()
|
||||
endif()
|
||||
target_link_libraries(ripple_libs INTERFACE ${nudb})
|
||||
|
||||
if(reporting)
|
||||
find_package(cassandra-cpp-driver REQUIRED)
|
||||
find_package(PostgreSQL REQUIRED)
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PostgreSQL::PostgreSQL
|
||||
)
|
||||
endif()
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
ed25519::ed25519
|
||||
lz4::lz4
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#
|
||||
# 4. HTTPS Client
|
||||
#
|
||||
# 5. Reporting Mode
|
||||
# 5. <vacated>
|
||||
#
|
||||
# 6. Database
|
||||
#
|
||||
@@ -884,119 +884,6 @@
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# 5. Reporting Mode
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# rippled has an optional operating mode called Reporting Mode. In Reporting
|
||||
# Mode, rippled does not connect to the peer to peer network. Instead, rippled
|
||||
# will continuously extract data from one or more rippled servers that are
|
||||
# connected to the peer to peer network (referred to as an ETL source).
|
||||
# Reporting mode servers will forward RPC requests that require access to the
|
||||
# peer to peer network (submit, fee, etc) to an ETL source.
|
||||
#
|
||||
# [reporting] Settings for Reporting Mode. If and only if this section is
|
||||
# present, rippled will start in reporting mode. This section
|
||||
# contains a list of ETL source names, and key-value pairs. The
|
||||
# ETL source names each correspond to a configuration file
|
||||
# section; the names must match exactly. The key-value pairs are
|
||||
# optional.
|
||||
#
|
||||
#
|
||||
# [<name>]
|
||||
#
|
||||
# A series of key/value pairs that specify an ETL source.
|
||||
#
|
||||
# source_ip = <IP-address>
|
||||
#
|
||||
# Required. IP address of the ETL source. Can also be a DNS record.
|
||||
#
|
||||
# source_ws_port = <number>
|
||||
#
|
||||
# Required. Port on which ETL source is accepting unencrypted websocket
|
||||
# connections.
|
||||
#
|
||||
# source_grpc_port = <number>
|
||||
#
|
||||
# Required for ETL. Port on which ETL source is accepting gRPC requests.
|
||||
# If this option is ommitted, this ETL source cannot actually be used for
|
||||
# ETL; the Reporting Mode server can still forward RPCs to this ETL
|
||||
# source, but cannot extract data from this ETL source.
|
||||
#
|
||||
#
|
||||
# Key-value pairs (all optional):
|
||||
#
|
||||
# read_only Valid values: 0, 1. Default is 0. If set to 1, the server
|
||||
# will start in strict read-only mode, and will not perform
|
||||
# ETL. The server will still handle RPC requests, and will
|
||||
# still forward RPC requests that require access to the p2p
|
||||
# network.
|
||||
#
|
||||
# start_sequence
|
||||
# Sequence of first ledger to extract if the database is empty.
|
||||
# ETL extracts ledgers in order. If this setting is absent and
|
||||
# the database is empty, ETL will start with the next ledger
|
||||
# validated by the network. If this setting is present and the
|
||||
# database is not empty, an exception is thrown.
|
||||
#
|
||||
# num_markers Degree of parallelism used during the initial ledger
|
||||
# download. Only used if the database is empty. Valid values
|
||||
# are 1-256. A higher degree of parallelism results in a
|
||||
# faster download, but puts more load on the ETL source.
|
||||
# Default is 2.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# [reporting]
|
||||
# etl_source1
|
||||
# etl_source2
|
||||
# read_only=0
|
||||
# start_sequence=32570
|
||||
# num_markers=8
|
||||
#
|
||||
# [etl_source1]
|
||||
# source_ip=1.2.3.4
|
||||
# source_ws_port=6005
|
||||
# source_grpc_port=50051
|
||||
#
|
||||
# [etl_source2]
|
||||
# source_ip=5.6.7.8
|
||||
# source_ws_port=6005
|
||||
# source_grpc_port=50051
|
||||
#
|
||||
# Minimal Example:
|
||||
#
|
||||
# [reporting]
|
||||
# etl_source1
|
||||
#
|
||||
# [etl_source1]
|
||||
# source_ip=1.2.3.4
|
||||
# source_ws_port=6005
|
||||
# source_grpc_port=50051
|
||||
#
|
||||
#
|
||||
# Notes:
|
||||
#
|
||||
# Reporting Mode requires Postgres (instead of SQLite). The Postgres
|
||||
# connection info is specified under the [ledger_tx_tables] config section;
|
||||
# see the Database section for further documentation.
|
||||
#
|
||||
# Each ETL source specified must have gRPC enabled (by adding a [port_grpc]
|
||||
# section to the config). It is recommended to add a secure_gateway entry to
|
||||
# the gRPC section, in order to bypass the server's rate limiting.
|
||||
# This section needs to be added to the config of the ETL source, not
|
||||
# the config of the reporting node. In the example below, the
|
||||
# reporting server is running at 127.0.0.1. Multiple IPs can be
|
||||
# specified in secure_gateway via a comma separated list.
|
||||
#
|
||||
# [port_grpc]
|
||||
# ip = 0.0.0.0
|
||||
# port = 50051
|
||||
# secure_gateway = 127.0.0.1
|
||||
#
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# 6. Database
|
||||
#
|
||||
#------------
|
||||
@@ -1004,13 +891,7 @@
|
||||
# rippled creates 4 SQLite database to hold bookkeeping information
|
||||
# about transactions, local credentials, and various other things.
|
||||
# It also creates the NodeDB, which holds all the objects that
|
||||
# make up the current and historical ledgers. In Reporting Mode, rippled
|
||||
# uses a Postgres database instead of SQLite.
|
||||
#
|
||||
# The simplest way to work with Postgres is to install it locally.
|
||||
# When it is running, execute the initdb.sh script in the current
|
||||
# directory as: sudo -u postgres ./initdb.sh
|
||||
# This will create the rippled user and an empty database of the same name.
|
||||
# make up the current and historical ledgers.
|
||||
#
|
||||
# The size of the NodeDB grows in proportion to the amount of new data and the
|
||||
# amount of historical data (a configurable setting) so the performance of the
|
||||
@@ -1052,14 +933,6 @@
|
||||
# keeping full history is not advised, and using online delete is
|
||||
# recommended.
|
||||
#
|
||||
# type = Cassandra
|
||||
#
|
||||
# Apache Cassandra is an open-source, distributed key-value store - see
|
||||
# https://cassandra.apache.org/ for more details.
|
||||
#
|
||||
# Cassandra is an alternative backend to be used only with Reporting Mode.
|
||||
# See the Reporting Mode section for more details about Reporting Mode.
|
||||
#
|
||||
# type = RWDB
|
||||
#
|
||||
# RWDB is a high-performance memory store written by XRPL-Labs and optimized
|
||||
@@ -1075,21 +948,6 @@
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Required keys for Cassandra:
|
||||
#
|
||||
# contact_points IP of a node in the Cassandra cluster
|
||||
#
|
||||
# port CQL Native Transport Port
|
||||
#
|
||||
# secure_connect_bundle
|
||||
# Absolute path to a secure connect bundle. When using
|
||||
# a secure connect bundle, contact_points and port are
|
||||
# not required.
|
||||
#
|
||||
# keyspace Name of Cassandra keyspace to use
|
||||
#
|
||||
# table_name Name of table in above keyspace to use
|
||||
#
|
||||
# Optional keys
|
||||
#
|
||||
# cache_size Size of cache for database records. Default is 16384.
|
||||
@@ -1166,25 +1024,6 @@
|
||||
# checking until healthy.
|
||||
# Default is 5.
|
||||
#
|
||||
# Optional keys for Cassandra:
|
||||
#
|
||||
# username Username to use if Cassandra cluster requires
|
||||
# authentication
|
||||
#
|
||||
# password Password to use if Cassandra cluster requires
|
||||
# authentication
|
||||
#
|
||||
# max_requests_outstanding
|
||||
# Limits the maximum number of concurrent database
|
||||
# writes. Default is 10 million. For slower clusters,
|
||||
# large numbers of concurrent writes can overload the
|
||||
# cluster. Setting this option can help eliminate
|
||||
# write timeouts and other write errors due to the
|
||||
# cluster being overloaded.
|
||||
# io_threads
|
||||
# Set the number of IO threads used by the
|
||||
# Cassandra driver. Defaults to 4.
|
||||
#
|
||||
# Notes:
|
||||
# The 'node_db' entry configures the primary, persistent storage.
|
||||
#
|
||||
@@ -1280,42 +1119,6 @@
|
||||
# This setting may not be combined with the
|
||||
# "safety_level" setting.
|
||||
#
|
||||
# [ledger_tx_tables] (optional)
|
||||
#
|
||||
# conninfo Info for connecting to Postgres. Format is
|
||||
# postgres://[username]:[password]@[ip]/[database].
|
||||
# The database and user must already exist. If this
|
||||
# section is missing and rippled is running in
|
||||
# Reporting Mode, rippled will connect as the
|
||||
# user running rippled to a database with the
|
||||
# same name. On Linux and Mac OS X, the connection
|
||||
# will take place using the server's UNIX domain
|
||||
# socket. On Windows, through the localhost IP
|
||||
# address. Default is empty.
|
||||
#
|
||||
# use_tx_tables Valid values: 1, 0
|
||||
# The default is 1 (true). Determines whether to use
|
||||
# the SQLite transaction database. If set to 0,
|
||||
# rippled will not write to the transaction database,
|
||||
# and will reject tx, account_tx and tx_history RPCs.
|
||||
# In Reporting Mode, this setting is ignored.
|
||||
#
|
||||
# max_connections Valid values: any positive integer up to 64 bit
|
||||
# storage length. This configures the maximum
|
||||
# number of concurrent connections to postgres.
|
||||
# Default is the maximum possible value to
|
||||
# fit in a 64 bit integer.
|
||||
#
|
||||
# timeout Number of seconds after which idle postgres
|
||||
# connections are discconnected. If set to 0,
|
||||
# connections never timeout. Default is 600.
|
||||
#
|
||||
#
|
||||
# remember_ip Value values: 1, 0
|
||||
# Default is 1 (true). Whether to cache host and
|
||||
# port connection settings.
|
||||
#
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# 7. Diagnostics
|
||||
@@ -1579,6 +1382,12 @@
|
||||
# Admin level API commands over Secure Websockets, when originating
|
||||
# from the same machine (via the loopback adapter at 127.0.0.1).
|
||||
#
|
||||
# "grpc"
|
||||
#
|
||||
# ETL commands for Clio. We recommend setting secure_gateway
|
||||
# in this section to a comma-separated list of the addresses
|
||||
# of your Clio servers, in order to bypass rippled's rate limiting.
|
||||
#
|
||||
# This port is commented out but can be enabled by removing
|
||||
# the '#' from each corresponding line including the entry under [server]
|
||||
#
|
||||
@@ -1661,15 +1470,6 @@ advisory_delete=0
|
||||
/var/lib/rippled/db
|
||||
|
||||
|
||||
# To use Postgres, uncomment this section and fill in the appropriate connection
|
||||
# info. Postgres can only be used in Reporting Mode.
|
||||
# To disable writing to the transaction database, uncomment this section, and
|
||||
# set use_tx_tables=0
|
||||
# [ledger_tx_tables]
|
||||
# conninfo = postgres://[username]:[password]@[ip]/[database]
|
||||
# use_tx_tables=1
|
||||
|
||||
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
@@ -1697,15 +1497,3 @@ validators.txt
|
||||
# set to ssl_verify to 0.
|
||||
[ssl_verify]
|
||||
1
|
||||
|
||||
|
||||
# To run in Reporting Mode, uncomment this section and fill in the appropriate
|
||||
# connection info for one or more ETL sources.
|
||||
# [reporting]
|
||||
# etl_source
|
||||
#
|
||||
#
|
||||
# [etl_source]
|
||||
# source_grpc_port=50051
|
||||
# source_ws_port=6005
|
||||
# source_ip=127.0.0.1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -140,14 +140,6 @@ if(xrpld)
|
||||
target_compile_definitions(rippled PRIVATE RIPPLED_RUNNING_IN_CI)
|
||||
endif ()
|
||||
|
||||
if(reporting)
|
||||
set(suffix -reporting)
|
||||
set_target_properties(rippled PROPERTIES OUTPUT_NAME rippled-reporting)
|
||||
get_target_property(BIN_NAME rippled OUTPUT_NAME)
|
||||
message(STATUS "Reporting mode build: rippled renamed ${BIN_NAME}")
|
||||
target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING)
|
||||
endif()
|
||||
|
||||
# any files that don't play well with unity should be added here
|
||||
if(tests)
|
||||
set_source_files_properties(
|
||||
|
||||
@@ -10,8 +10,6 @@ option(assert "Enables asserts, even in release builds" OFF)
|
||||
|
||||
option(xrpld "Build xrpld" ON)
|
||||
|
||||
option(reporting "Build rippled with reporting mode enabled" OFF)
|
||||
|
||||
option(tests "Build tests" ON)
|
||||
|
||||
option(unity "Creates a build using UNITY support in cmake. This is the default" ON)
|
||||
|
||||
@@ -15,7 +15,6 @@ class Xrpl(ConanFile):
|
||||
'coverage': [True, False],
|
||||
'fPIC': [True, False],
|
||||
'jemalloc': [True, False],
|
||||
'reporting': [True, False],
|
||||
'rocksdb': [True, False],
|
||||
'shared': [True, False],
|
||||
'static': [True, False],
|
||||
@@ -45,7 +44,6 @@ class Xrpl(ConanFile):
|
||||
'coverage': False,
|
||||
'fPIC': True,
|
||||
'jemalloc': False,
|
||||
'reporting': False,
|
||||
'rocksdb': True,
|
||||
'shared': False,
|
||||
'static': True,
|
||||
@@ -53,8 +51,6 @@ class Xrpl(ConanFile):
|
||||
'unity': False,
|
||||
'xrpld': False,
|
||||
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
'cassandra-cpp-driver/*:use_atomic': None,
|
||||
'date/*:header_only': True,
|
||||
'grpc/*:shared': False,
|
||||
'grpc/*:secure': True,
|
||||
@@ -73,7 +69,6 @@ class Xrpl(ConanFile):
|
||||
'libarchive/*:with_pcreposix': False,
|
||||
'libarchive/*:with_xattr': False,
|
||||
'libarchive/*:with_zlib': False,
|
||||
'libpq/*:shared': False,
|
||||
'lz4/*:shared': False,
|
||||
'openssl/*:shared': False,
|
||||
'protobuf/*:shared': False,
|
||||
@@ -111,9 +106,6 @@ class Xrpl(ConanFile):
|
||||
self.requires('sqlite3/3.42.0', force=True)
|
||||
if self.options.jemalloc:
|
||||
self.requires('jemalloc/5.3.0')
|
||||
if self.options.reporting:
|
||||
self.requires('cassandra-cpp-driver/2.15.3')
|
||||
self.requires('libpq/14.7')
|
||||
if self.options.rocksdb:
|
||||
self.requires('rocksdb/6.29.5')
|
||||
|
||||
@@ -140,7 +132,6 @@ class Xrpl(ConanFile):
|
||||
tc.variables['assert'] = self.options.assertions
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['jemalloc'] = self.options.jemalloc
|
||||
tc.variables['reporting'] = self.options.reporting
|
||||
tc.variables['rocksdb'] = self.options.rocksdb
|
||||
tc.variables['BUILD_SHARED_LIBS'] = self.options.shared
|
||||
tc.variables['static'] = self.options.static
|
||||
|
||||
@@ -11,7 +11,7 @@ import "org/xrpl/rpc/v1/get_ledger_diff.proto";
|
||||
|
||||
|
||||
// These methods are binary only methods for retrieiving arbitrary ledger state
|
||||
// via gRPC. These methods are used by clio and reporting mode, but can also be
|
||||
// via gRPC. These methods are used by clio, but can also be
|
||||
// used by any client that wants to extract ledger state in an efficient manner.
|
||||
// They do not directly mimic the JSON equivalent methods.
|
||||
service XRPLedgerAPIService {
|
||||
|
||||
@@ -137,8 +137,8 @@ enum error_code_i {
|
||||
rpcINVALID_LGR_RANGE = 79,
|
||||
rpcEXPIRED_VALIDATOR_LIST = 80,
|
||||
|
||||
// Reporting
|
||||
rpcFAILED_TO_FORWARD = 90,
|
||||
// unused = 90,
|
||||
// DEPRECATED. New code must not use this value.
|
||||
rpcREPORTING_UNSUPPORTED = 91,
|
||||
|
||||
rpcOBJECT_NOT_FOUND = 92,
|
||||
@@ -151,8 +151,7 @@ enum error_code_i {
|
||||
// Oracle
|
||||
rpcORACLE_MALFORMED = 95,
|
||||
|
||||
rpcLAST =
|
||||
rpcORACLE_MALFORMED // rpcLAST should always equal the last code.=
|
||||
rpcLAST = rpcORACLE_MALFORMED // rpcLAST should always equal the last code.
|
||||
};
|
||||
|
||||
/** Codes returned in the `warnings` array of certain RPC commands.
|
||||
@@ -163,7 +162,7 @@ enum warning_code_i {
|
||||
warnRPC_UNSUPPORTED_MAJORITY = 1001,
|
||||
warnRPC_AMENDMENT_BLOCKED = 1002,
|
||||
warnRPC_EXPIRED_VALIDATOR_LIST = 1003,
|
||||
warnRPC_REPORTING = 1004
|
||||
// unused = 1004
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -71,7 +71,6 @@ constexpr static ErrorInfo unorderedErrorInfos[]{
|
||||
{rpcDST_ISR_MALFORMED, "dstIsrMalformed", "Destination issuer is malformed.", 400},
|
||||
{rpcEXCESSIVE_LGR_RANGE, "excessiveLgrRange", "Ledger range exceeds 1000.", 400},
|
||||
{rpcFORBIDDEN, "forbidden", "Bad credentials.", 403},
|
||||
{rpcFAILED_TO_FORWARD, "failedToForward", "Failed to forward request to p2p node", 503},
|
||||
{rpcHIGH_FEE, "highFee", "Current transaction fee exceeds your limit.", 402},
|
||||
{rpcINTERNAL, "internal", "Internal error.", 500},
|
||||
{rpcINVALID_LGR_RANGE, "invalidLgrRange", "Ledger range is invalid.", 400},
|
||||
@@ -98,7 +97,6 @@ constexpr static ErrorInfo unorderedErrorInfos[]{
|
||||
{rpcNO_PF_REQUEST, "noPathRequest", "No pathfinding request in progress.", 404},
|
||||
{rpcOBJECT_NOT_FOUND, "objectNotFound", "The requested object was not found.", 404},
|
||||
{rpcPUBLIC_MALFORMED, "publicMalformed", "Public key is malformed.", 400},
|
||||
{rpcREPORTING_UNSUPPORTED, "reportingUnsupported", "Requested operation not supported by reporting mode server", 405},
|
||||
{rpcSENDMAX_MALFORMED, "sendMaxMalformed", "SendMax amount malformed.", 400},
|
||||
{rpcSIGNING_MALFORMED, "signingMalformed", "Signing of transaction is malformed.", 400},
|
||||
{rpcSLOW_DOWN, "slowDown", "You are placing too much load on the server.", 429},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -36,17 +36,8 @@ AcceptedLedger::AcceptedLedger(
|
||||
ledger, item.first, item.second));
|
||||
};
|
||||
|
||||
if (app.config().reporting())
|
||||
{
|
||||
auto const txs = flatFetchTransactions(*ledger, app);
|
||||
transactions_.reserve(txs.size());
|
||||
insertAll(txs);
|
||||
}
|
||||
else
|
||||
{
|
||||
transactions_.reserve(256);
|
||||
insertAll(ledger->txs);
|
||||
}
|
||||
transactions_.reserve(256);
|
||||
insertAll(ledger->txs);
|
||||
|
||||
std::sort(
|
||||
transactions_.begin(),
|
||||
|
||||
@@ -29,12 +29,10 @@
|
||||
#include <xrpld/app/misc/HashRouter.h>
|
||||
#include <xrpld/app/misc/LoadFeeTrack.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <xrpld/consensus/LedgerTiming.h>
|
||||
#include <xrpld/core/Config.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/core/Pg.h>
|
||||
#include <xrpld/core/SociDB.h>
|
||||
#include <xrpld/nodestore/Database.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
@@ -269,11 +267,6 @@ Ledger::Ledger(
|
||||
if (info_.txHash.isNonZero() &&
|
||||
!txMap_.fetchRoot(SHAMapHash{info_.txHash}, nullptr))
|
||||
{
|
||||
if (config.reporting())
|
||||
{
|
||||
// Reporting should never have incomplete data
|
||||
Throw<std::runtime_error>("Missing tx map root for ledger");
|
||||
}
|
||||
loaded = false;
|
||||
JLOG(j.warn()) << "Don't have transaction root for ledger" << info_.seq;
|
||||
}
|
||||
@@ -281,11 +274,6 @@ Ledger::Ledger(
|
||||
if (info_.accountHash.isNonZero() &&
|
||||
!stateMap_.fetchRoot(SHAMapHash{info_.accountHash}, nullptr))
|
||||
{
|
||||
if (config.reporting())
|
||||
{
|
||||
// Reporting should never have incomplete data
|
||||
Throw<std::runtime_error>("Missing state map root for ledger");
|
||||
}
|
||||
loaded = false;
|
||||
JLOG(j.warn()) << "Don't have state data root for ledger" << info_.seq;
|
||||
}
|
||||
@@ -300,7 +288,7 @@ Ledger::Ledger(
|
||||
if (!loaded)
|
||||
{
|
||||
info_.hash = calculateLedgerHash(info_);
|
||||
if (acquire && !config.reporting())
|
||||
if (acquire)
|
||||
family.missingNodeAcquireByHash(info_.hash, info_.seq);
|
||||
}
|
||||
}
|
||||
@@ -1182,92 +1170,4 @@ loadByHash(uint256 const& ledgerHash, Application& app, bool acquire)
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>>
|
||||
flatFetchTransactions(Application& app, std::vector<uint256>& nodestoreHashes)
|
||||
{
|
||||
if (!app.config().reporting())
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"flatFetchTransactions: not running in reporting mode");
|
||||
}
|
||||
|
||||
std::vector<
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>>
|
||||
txns;
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto nodeDb =
|
||||
dynamic_cast<NodeStore::DatabaseNodeImp*>(&(app.getNodeStore()));
|
||||
if (!nodeDb)
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"Called flatFetchTransactions but database is not DatabaseNodeImp");
|
||||
}
|
||||
auto objs = nodeDb->fetchBatch(nodestoreHashes);
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
JLOG(app.journal("Ledger").debug())
|
||||
<< " Flat fetch time : " << ((end - start).count() / 1000000000.0)
|
||||
<< " number of transactions " << nodestoreHashes.size();
|
||||
assert(objs.size() == nodestoreHashes.size());
|
||||
for (size_t i = 0; i < objs.size(); ++i)
|
||||
{
|
||||
uint256& nodestoreHash = nodestoreHashes[i];
|
||||
auto& obj = objs[i];
|
||||
if (obj)
|
||||
{
|
||||
auto node = SHAMapTreeNode::makeFromPrefix(
|
||||
makeSlice(obj->getData()), SHAMapHash{nodestoreHash});
|
||||
if (!node)
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"flatFetchTransactions : Error making SHAMap node");
|
||||
}
|
||||
auto item = (static_cast<SHAMapLeafNode*>(node.get()))->peekItem();
|
||||
if (!item)
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"flatFetchTransactions : Error reading SHAMap node");
|
||||
}
|
||||
auto txnPlusMeta = deserializeTxPlusMeta(*item);
|
||||
if (!txnPlusMeta.first || !txnPlusMeta.second)
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"flatFetchTransactions : Error deserializing SHAMap node");
|
||||
}
|
||||
txns.push_back(std::move(txnPlusMeta));
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"flatFetchTransactions : Containing SHAMap node not found");
|
||||
}
|
||||
}
|
||||
return txns;
|
||||
}
|
||||
std::vector<
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>>
|
||||
flatFetchTransactions(ReadView const& ledger, Application& app)
|
||||
{
|
||||
if (!app.config().reporting())
|
||||
{
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const db =
|
||||
dynamic_cast<PostgresDatabase*>(&app.getRelationalDatabase());
|
||||
if (!db)
|
||||
Throw<std::runtime_error>("Failed to get relational database");
|
||||
|
||||
auto nodestoreHashes = db->getTxHashes(ledger.info().seq);
|
||||
|
||||
return flatFetchTransactions(app, nodestoreHashes);
|
||||
}
|
||||
} // namespace ripple
|
||||
|
||||
@@ -467,32 +467,6 @@ loadByHash(uint256 const& ledgerHash, Application& app, bool acquire = true);
|
||||
extern std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
getLatestLedger(Application& app);
|
||||
|
||||
// *** Reporting Mode Only ***
|
||||
// Fetch all of the transactions contained in ledger from the nodestore.
|
||||
// The transactions are fetched directly as a batch, instead of traversing the
|
||||
// transaction SHAMap. Fetching directly is significantly faster than
|
||||
// traversing, as there are less database reads, and all of the reads can
|
||||
// executed concurrently. This function only works in reporting mode.
|
||||
// @param ledger the ledger for which to fetch the contained transactions
|
||||
// @param app reference to the Application
|
||||
// @return vector of (transaction, metadata) pairs
|
||||
extern std::vector<
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>>
|
||||
flatFetchTransactions(ReadView const& ledger, Application& app);
|
||||
|
||||
// *** Reporting Mode Only ***
|
||||
// For each nodestore hash, fetch the transaction.
|
||||
// The transactions are fetched directly as a batch, instead of traversing the
|
||||
// transaction SHAMap. Fetching directly is significantly faster than
|
||||
// traversing, as there are less database reads, and all of the reads can
|
||||
// executed concurrently. This function only works in reporting mode.
|
||||
// @param nodestoreHashes hashes of the transactions to fetch
|
||||
// @param app reference to the Application
|
||||
// @return vector of (transaction, metadata) pairs
|
||||
extern std::vector<
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>>
|
||||
flatFetchTransactions(Application& app, std::vector<uint256>& nodestoreHashes);
|
||||
|
||||
/** Deserialize a SHAMapItem containing a single STTx
|
||||
|
||||
Throw:
|
||||
|
||||
@@ -46,24 +46,6 @@ namespace ripple {
|
||||
class Peer;
|
||||
class Transaction;
|
||||
|
||||
// This error is thrown when a codepath tries to access the open or closed
|
||||
// ledger while the server is running in reporting mode. Any RPCs that request
|
||||
// the open or closed ledger should be forwarded to a p2p node. Usually, the
|
||||
// decision to forward is made based on the required condition of the handler,
|
||||
// or which ledger is specified. However, there are some codepaths which are not
|
||||
// covered by the aforementioned logic (though they probably should), so this
|
||||
// error is thrown in case a codepath falls through the cracks.
|
||||
class ReportingShouldProxy : public std::runtime_error
|
||||
{
|
||||
public:
|
||||
ReportingShouldProxy()
|
||||
: std::runtime_error(
|
||||
"Reporting mode has no open or closed ledger. Proxy this "
|
||||
"request")
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
// Tracks the current ledger and any ledgers in the process of closing
|
||||
// Tracks ledger history
|
||||
// Tracks held transactions
|
||||
@@ -97,10 +79,6 @@ public:
|
||||
std::shared_ptr<Ledger const>
|
||||
getClosedLedger()
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
Throw<ReportingShouldProxy>();
|
||||
}
|
||||
return mClosedLedger.get();
|
||||
}
|
||||
|
||||
|
||||
@@ -34,10 +34,9 @@
|
||||
#include <xrpld/app/misc/TxQ.h>
|
||||
#include <xrpld/app/misc/ValidatorList.h>
|
||||
#include <xrpld/app/paths/PathRequests.h>
|
||||
#include <xrpld/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <xrpld/app/rdb/RelationalDatabase.h>
|
||||
#include <xrpld/app/tx/apply.h>
|
||||
#include <xrpld/core/DatabaseCon.h>
|
||||
#include <xrpld/core/Pg.h>
|
||||
#include <xrpld/core/TimeKeeper.h>
|
||||
#include <xrpld/overlay/Overlay.h>
|
||||
#include <xrpld/overlay/Peer.h>
|
||||
@@ -274,12 +273,6 @@ LedgerMaster::getValidatedLedgerAge()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
return static_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->getValidatedLedgerAge();
|
||||
#endif
|
||||
|
||||
std::chrono::seconds valClose{mValidLedgerSign.load()};
|
||||
if (valClose == 0s)
|
||||
{
|
||||
@@ -305,12 +298,6 @@ LedgerMaster::isCaughtUp(std::string& reason)
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
return static_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->isCaughtUp(reason);
|
||||
#endif
|
||||
|
||||
if (getPublishedLedgerAge() > 3min)
|
||||
{
|
||||
reason = "No recently-published ledger";
|
||||
@@ -618,9 +605,6 @@ LedgerMaster::clearLedger(std::uint32_t seq)
|
||||
bool
|
||||
LedgerMaster::isValidated(ReadView const& ledger)
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
return true; // Reporting mode only supports validated ledger
|
||||
|
||||
if (ledger.open())
|
||||
return false;
|
||||
|
||||
@@ -694,32 +678,6 @@ LedgerMaster::getFullValidatedRange(
|
||||
bool
|
||||
LedgerMaster::getValidatedRange(std::uint32_t& minVal, std::uint32_t& maxVal)
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
std::string res = getCompleteLedgers();
|
||||
try
|
||||
{
|
||||
if (res == "empty" || res == "error" || res.empty())
|
||||
return false;
|
||||
else if (size_t delim = res.find('-'); delim != std::string::npos)
|
||||
{
|
||||
minVal = std::stol(res.substr(0, delim));
|
||||
maxVal = std::stol(res.substr(delim + 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
minVal = maxVal = std::stol(res);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(m_journal.error()) << "LedgerMaster::getValidatedRange: "
|
||||
"exception parsing complete ledgers: "
|
||||
<< e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (!getFullValidatedRange(minVal, maxVal))
|
||||
return false;
|
||||
|
||||
@@ -1702,25 +1660,12 @@ LedgerMaster::peekMutex()
|
||||
std::shared_ptr<ReadView const>
|
||||
LedgerMaster::getCurrentLedger()
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
Throw<ReportingShouldProxy>();
|
||||
}
|
||||
return app_.openLedger().current();
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger const>
|
||||
LedgerMaster::getValidatedLedger()
|
||||
{
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
auto seq = app_.getRelationalDatabase().getMaxLedgerSeq();
|
||||
if (!seq)
|
||||
return {};
|
||||
return getLedgerBySeq(*seq);
|
||||
}
|
||||
#endif
|
||||
return mValidLedger.get();
|
||||
}
|
||||
|
||||
@@ -1749,11 +1694,6 @@ LedgerMaster::getPublishedLedger()
|
||||
std::string
|
||||
LedgerMaster::getCompleteLedgers()
|
||||
{
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
return static_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->getCompleteLedgers();
|
||||
#endif
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
return to_string(mCompleteLedgers);
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/DeliverMax.h>
|
||||
#include <xrpld/app/misc/TxQ.h>
|
||||
#include <xrpld/core/Pg.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/DeliveredAmount.h>
|
||||
#include <xrpld/rpc/detail/RPCHelpers.h>
|
||||
@@ -232,14 +231,7 @@ fillJsonTx(Object& json, LedgerFill const& fill)
|
||||
}
|
||||
};
|
||||
|
||||
if (fill.context && fill.context->app.config().reporting())
|
||||
{
|
||||
appendAll(flatFetchTransactions(fill.ledger, fill.context->app));
|
||||
}
|
||||
else
|
||||
{
|
||||
appendAll(fill.ledger.txs);
|
||||
}
|
||||
appendAll(fill.ledger.txs);
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
{
|
||||
|
||||
@@ -46,9 +46,8 @@
|
||||
#include <xrpld/app/misc/ValidatorKeys.h>
|
||||
#include <xrpld/app/misc/ValidatorSite.h>
|
||||
#include <xrpld/app/paths/PathRequests.h>
|
||||
#include <xrpld/app/rdb/RelationalDatabase.h>
|
||||
#include <xrpld/app/rdb/Wallet.h>
|
||||
#include <xrpld/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <xrpld/app/reporting/ReportingETL.h>
|
||||
#include <xrpld/app/tx/apply.h>
|
||||
#include <xrpld/core/DatabaseCon.h>
|
||||
#include <xrpld/nodestore/DummyScheduler.h>
|
||||
@@ -239,7 +238,6 @@ public:
|
||||
io_latency_sampler m_io_latency_sampler;
|
||||
|
||||
std::unique_ptr<GRPCServer> grpcServer_;
|
||||
std::unique_ptr<ReportingETL> reportingETL_;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
@@ -299,8 +297,7 @@ public:
|
||||
|
||||
, m_jobQueue(std::make_unique<JobQueue>(
|
||||
[](std::unique_ptr<Config> const& config) {
|
||||
if (config->standalone() && !config->reporting() &&
|
||||
!config->FORCE_MULTI_THREAD)
|
||||
if (config->standalone() && !config->FORCE_MULTI_THREAD)
|
||||
return 1;
|
||||
|
||||
if (config->WORKERS)
|
||||
@@ -478,9 +475,6 @@ public:
|
||||
std::chrono::milliseconds(100),
|
||||
get_io_service())
|
||||
, grpcServer_(std::make_unique<GRPCServer>(*this))
|
||||
, reportingETL_(
|
||||
config_->reporting() ? std::make_unique<ReportingETL>(*this)
|
||||
: nullptr)
|
||||
{
|
||||
initAccountIdCache(config_->getValueFor(SizedItem::accountIdCacheSize));
|
||||
|
||||
@@ -789,16 +783,12 @@ public:
|
||||
OpenLedger&
|
||||
openLedger() override
|
||||
{
|
||||
if (config_->reporting())
|
||||
Throw<ReportingShouldProxy>();
|
||||
return *openLedger_;
|
||||
}
|
||||
|
||||
OpenLedger const&
|
||||
openLedger() const override
|
||||
{
|
||||
if (config_->reporting())
|
||||
Throw<ReportingShouldProxy>();
|
||||
return *openLedger_;
|
||||
}
|
||||
|
||||
@@ -830,13 +820,6 @@ public:
|
||||
return *mWalletDB;
|
||||
}
|
||||
|
||||
ReportingETL&
|
||||
getReportingETL() override
|
||||
{
|
||||
assert(reportingETL_.get() != nullptr);
|
||||
return *reportingETL_;
|
||||
}
|
||||
|
||||
bool
|
||||
serverOkay(std::string& reason) override;
|
||||
|
||||
@@ -1132,11 +1115,6 @@ public:
|
||||
<< "; size after: " << cachedSLEs_.size();
|
||||
}
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (auto pg = dynamic_cast<PostgresDatabase*>(&*mRelationalDatabase))
|
||||
pg->sweep();
|
||||
#endif
|
||||
|
||||
// Set timer to do another sweep later.
|
||||
setSweepTimer();
|
||||
}
|
||||
@@ -1282,54 +1260,51 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline)
|
||||
|
||||
auto const startUp = config_->START_UP;
|
||||
JLOG(m_journal.debug()) << "startUp: " << startUp;
|
||||
if (!config_->reporting())
|
||||
if (startUp == Config::FRESH)
|
||||
{
|
||||
if (startUp == Config::FRESH)
|
||||
{
|
||||
JLOG(m_journal.info()) << "Starting new Ledger";
|
||||
JLOG(m_journal.info()) << "Starting new Ledger";
|
||||
|
||||
startGenesisLedger();
|
||||
}
|
||||
else if (
|
||||
startUp == Config::LOAD || startUp == Config::LOAD_FILE ||
|
||||
startUp == Config::REPLAY || startUp == Config::LOAD_JSON)
|
||||
{
|
||||
JLOG(m_journal.info()) << "Loading specified Ledger";
|
||||
startGenesisLedger();
|
||||
}
|
||||
else if (
|
||||
startUp == Config::LOAD || startUp == Config::LOAD_FILE ||
|
||||
startUp == Config::REPLAY || startUp == Config::LOAD_JSON)
|
||||
{
|
||||
JLOG(m_journal.info()) << "Loading specified Ledger";
|
||||
|
||||
if (!loadOldLedger(
|
||||
config_->START_LEDGER,
|
||||
startUp == Config::REPLAY,
|
||||
startUp == Config::LOAD_FILE,
|
||||
startUp == Config::LOAD_JSON,
|
||||
config_->TRAP_TX_HASH))
|
||||
if (!loadOldLedger(
|
||||
config_->START_LEDGER,
|
||||
startUp == Config::REPLAY,
|
||||
startUp == Config::LOAD_FILE,
|
||||
startUp == Config::LOAD_JSON,
|
||||
config_->TRAP_TX_HASH))
|
||||
{
|
||||
JLOG(m_journal.error())
|
||||
<< "The specified ledger could not be loaded.";
|
||||
if (config_->FAST_LOAD)
|
||||
{
|
||||
JLOG(m_journal.error())
|
||||
<< "The specified ledger could not be loaded.";
|
||||
if (config_->FAST_LOAD)
|
||||
{
|
||||
// Fall back to syncing from the network, such as
|
||||
// when there's no existing data.
|
||||
startGenesisLedger();
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Fall back to syncing from the network, such as
|
||||
// when there's no existing data.
|
||||
startGenesisLedger();
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (startUp == Config::NETWORK)
|
||||
{
|
||||
// This should probably become the default once we have a stable
|
||||
// network.
|
||||
if (!config_->standalone())
|
||||
m_networkOPs->setNeedNetworkLedger();
|
||||
}
|
||||
else if (startUp == Config::NETWORK)
|
||||
{
|
||||
// This should probably become the default once we have a stable
|
||||
// network.
|
||||
if (!config_->standalone())
|
||||
m_networkOPs->setNeedNetworkLedger();
|
||||
|
||||
startGenesisLedger();
|
||||
}
|
||||
else
|
||||
{
|
||||
startGenesisLedger();
|
||||
}
|
||||
startGenesisLedger();
|
||||
}
|
||||
else
|
||||
{
|
||||
startGenesisLedger();
|
||||
}
|
||||
|
||||
if (auto const& forcedRange = config().FORCED_LEDGER_RANGE_PRESENT)
|
||||
@@ -1338,8 +1313,7 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline)
|
||||
forcedRange->first, forcedRange->second);
|
||||
}
|
||||
|
||||
if (!config().reporting())
|
||||
m_orderBookDB.setup(getLedgerMaster().getCurrentLedger());
|
||||
m_orderBookDB.setup(getLedgerMaster().getCurrentLedger());
|
||||
|
||||
nodeIdentity_ = getNodeIdentity(*this, cmdline);
|
||||
|
||||
@@ -1349,61 +1323,55 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!config().reporting())
|
||||
{
|
||||
if (validatorKeys_.configInvalid())
|
||||
return false;
|
||||
|
||||
if (!validatorManifests_->load(
|
||||
getWalletDB(),
|
||||
"ValidatorManifests",
|
||||
validatorKeys_.manifest,
|
||||
config().section(SECTION_VALIDATOR_KEY_REVOCATION).values()))
|
||||
{
|
||||
if (validatorKeys_.configInvalid())
|
||||
return false;
|
||||
|
||||
if (!validatorManifests_->load(
|
||||
getWalletDB(),
|
||||
"ValidatorManifests",
|
||||
validatorKeys_.manifest,
|
||||
config()
|
||||
.section(SECTION_VALIDATOR_KEY_REVOCATION)
|
||||
.values()))
|
||||
{
|
||||
JLOG(m_journal.fatal())
|
||||
<< "Invalid configured validator manifest.";
|
||||
return false;
|
||||
}
|
||||
|
||||
publisherManifests_->load(getWalletDB(), "PublisherManifests");
|
||||
|
||||
// It is possible to have a valid ValidatorKeys object without
|
||||
// setting the signingKey or masterKey. This occurs if the
|
||||
// configuration file does not have either
|
||||
// SECTION_VALIDATOR_TOKEN or SECTION_VALIDATION_SEED section.
|
||||
|
||||
// masterKey for the configuration-file specified validator keys
|
||||
std::optional<PublicKey> localSigningKey;
|
||||
if (validatorKeys_.keys)
|
||||
localSigningKey = validatorKeys_.keys->publicKey;
|
||||
|
||||
// Setup trusted validators
|
||||
if (!validators_->load(
|
||||
localSigningKey,
|
||||
config().section(SECTION_VALIDATORS).values(),
|
||||
config().section(SECTION_VALIDATOR_LIST_KEYS).values()))
|
||||
{
|
||||
JLOG(m_journal.fatal())
|
||||
<< "Invalid entry in validator configuration.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!validatorSites_->load(
|
||||
config().section(SECTION_VALIDATOR_LIST_SITES).values()))
|
||||
{
|
||||
JLOG(m_journal.fatal())
|
||||
<< "Invalid entry in [" << SECTION_VALIDATOR_LIST_SITES << "]";
|
||||
JLOG(m_journal.fatal()) << "Invalid configured validator manifest.";
|
||||
return false;
|
||||
}
|
||||
|
||||
// Tell the AmendmentTable who the trusted validators are.
|
||||
m_amendmentTable->trustChanged(validators_->getQuorumKeys().second);
|
||||
publisherManifests_->load(getWalletDB(), "PublisherManifests");
|
||||
|
||||
// It is possible to have a valid ValidatorKeys object without
|
||||
// setting the signingKey or masterKey. This occurs if the
|
||||
// configuration file does not have either
|
||||
// SECTION_VALIDATOR_TOKEN or SECTION_VALIDATION_SEED section.
|
||||
|
||||
// masterKey for the configuration-file specified validator keys
|
||||
std::optional<PublicKey> localSigningKey;
|
||||
if (validatorKeys_.keys)
|
||||
localSigningKey = validatorKeys_.keys->publicKey;
|
||||
|
||||
// Setup trusted validators
|
||||
if (!validators_->load(
|
||||
localSigningKey,
|
||||
config().section(SECTION_VALIDATORS).values(),
|
||||
config().section(SECTION_VALIDATOR_LIST_KEYS).values()))
|
||||
{
|
||||
JLOG(m_journal.fatal())
|
||||
<< "Invalid entry in validator configuration.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!validatorSites_->load(
|
||||
config().section(SECTION_VALIDATOR_LIST_SITES).values()))
|
||||
{
|
||||
JLOG(m_journal.fatal())
|
||||
<< "Invalid entry in [" << SECTION_VALIDATOR_LIST_SITES << "]";
|
||||
return false;
|
||||
}
|
||||
|
||||
// Tell the AmendmentTable who the trusted validators are.
|
||||
m_amendmentTable->trustChanged(validators_->getQuorumKeys().second);
|
||||
|
||||
if (config_->IMPORT_VL_KEYS.empty())
|
||||
{
|
||||
JLOG(m_journal.warn())
|
||||
@@ -1422,23 +1390,19 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline)
|
||||
// move the instantiation inside a conditional:
|
||||
//
|
||||
// if (!config_.standalone())
|
||||
if (!config_->reporting())
|
||||
{
|
||||
overlay_ = make_Overlay(
|
||||
*this,
|
||||
setup_Overlay(*config_),
|
||||
*serverHandler_,
|
||||
*m_resourceManager,
|
||||
*m_resolver,
|
||||
get_io_service(),
|
||||
*config_,
|
||||
m_collectorManager->collector());
|
||||
add(*overlay_); // add to PropertyStream
|
||||
}
|
||||
overlay_ = make_Overlay(
|
||||
*this,
|
||||
setup_Overlay(*config_),
|
||||
*serverHandler_,
|
||||
*m_resourceManager,
|
||||
*m_resolver,
|
||||
get_io_service(),
|
||||
*config_,
|
||||
m_collectorManager->collector());
|
||||
add(*overlay_); // add to PropertyStream
|
||||
|
||||
// start first consensus round
|
||||
if (!config_->reporting() &&
|
||||
!m_networkOPs->beginConsensus(
|
||||
if (!m_networkOPs->beginConsensus(
|
||||
m_ledgerMaster->getClosedLedger()->info().hash))
|
||||
{
|
||||
JLOG(m_journal.fatal()) << "Unable to start consensus";
|
||||
@@ -1552,9 +1516,6 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline)
|
||||
|
||||
validatorSites_->start();
|
||||
|
||||
if (reportingETL_)
|
||||
reportingETL_->start();
|
||||
|
||||
// Datagram monitor if applicable
|
||||
if (!config_->standalone() && !config_->DATAGRAM_MONITOR.empty())
|
||||
{
|
||||
@@ -1678,10 +1639,6 @@ ApplicationImp::run()
|
||||
m_inboundTransactions->stop();
|
||||
m_inboundLedgers->stop();
|
||||
ledgerCleaner_->stop();
|
||||
if (reportingETL_)
|
||||
reportingETL_->stop();
|
||||
if (auto pg = dynamic_cast<PostgresDatabase*>(&*mRelationalDatabase))
|
||||
pg->stop();
|
||||
m_nodeStore->stop();
|
||||
perfLog_->stop();
|
||||
|
||||
|
||||
@@ -99,8 +99,6 @@ class RelationalDatabase;
|
||||
class DatabaseCon;
|
||||
class SHAMapStore;
|
||||
|
||||
class ReportingETL;
|
||||
|
||||
using NodeCache = TaggedCache<SHAMapHash, Blob>;
|
||||
|
||||
template <class Adaptor>
|
||||
@@ -252,9 +250,6 @@ public:
|
||||
virtual std::chrono::milliseconds
|
||||
getIOLatency() = 0;
|
||||
|
||||
virtual ReportingETL&
|
||||
getReportingETL() = 0;
|
||||
|
||||
virtual bool
|
||||
serverOkay(std::string& reason) = 0;
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/main/GRPCServer.h>
|
||||
#include <xrpld/app/reporting/P2pProxy.h>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
#include <xrpl/resource/Fees.h>
|
||||
|
||||
@@ -187,11 +186,6 @@ GRPCServerImpl::CallData<Request, Response>::process(
|
||||
InfoSub::pointer(),
|
||||
apiVersion},
|
||||
request_};
|
||||
if (shouldForwardToP2p(context, requiredCondition_))
|
||||
{
|
||||
forwardToP2p(context);
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure we can currently handle the rpc
|
||||
error_code_i conditionMetRes =
|
||||
@@ -207,18 +201,9 @@ GRPCServerImpl::CallData<Request, Response>::process(
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
std::pair<Response, grpc::Status> result =
|
||||
handler_(context);
|
||||
setIsUnlimited(result.first, isUnlimited);
|
||||
responder_.Finish(result.first, result.second, this);
|
||||
}
|
||||
catch (ReportingShouldProxy&)
|
||||
{
|
||||
forwardToP2p(context);
|
||||
return;
|
||||
}
|
||||
std::pair<Response, grpc::Status> result = handler_(context);
|
||||
setIsUnlimited(result.first, isUnlimited);
|
||||
responder_.Finish(result.first, result.second, this);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -229,46 +214,6 @@ GRPCServerImpl::CallData<Request, Response>::process(
|
||||
}
|
||||
}
|
||||
|
||||
template <class Request, class Response>
|
||||
void
|
||||
GRPCServerImpl::CallData<Request, Response>::forwardToP2p(
|
||||
RPC::GRPCContext<Request>& context)
|
||||
{
|
||||
if (auto descriptor =
|
||||
Request::GetDescriptor()->FindFieldByName("client_ip"))
|
||||
{
|
||||
Request::GetReflection()->SetString(&request_, descriptor, ctx_.peer());
|
||||
JLOG(app_.journal("gRPCServer").debug())
|
||||
<< "Set client_ip to " << ctx_.peer();
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"Attempting to forward but no client_ip field in "
|
||||
"protobuf message");
|
||||
}
|
||||
auto stub = getP2pForwardingStub(context);
|
||||
if (stub)
|
||||
{
|
||||
grpc::ClientContext clientContext;
|
||||
Response response;
|
||||
auto status = forward_(stub.get(), &clientContext, request_, &response);
|
||||
responder_.Finish(response, status, this);
|
||||
JLOG(app_.journal("gRPCServer").debug()) << "Forwarded request to tx";
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(app_.journal("gRPCServer").error())
|
||||
<< "Failed to forward request to tx";
|
||||
grpc::Status status{
|
||||
grpc::StatusCode::INTERNAL,
|
||||
"Attempted to act as proxy but failed "
|
||||
"to create forwarding stub"};
|
||||
responder_.FinishWithError(status, this);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Request, class Response>
|
||||
bool
|
||||
GRPCServerImpl::CallData<Request, Response>::isFinished()
|
||||
@@ -289,29 +234,10 @@ GRPCServerImpl::CallData<Request, Response>::getRole(bool isUnlimited)
|
||||
{
|
||||
if (isUnlimited)
|
||||
return Role::IDENTIFIED;
|
||||
else if (wasForwarded())
|
||||
return Role::PROXY;
|
||||
else
|
||||
return Role::USER;
|
||||
}
|
||||
|
||||
template <class Request, class Response>
|
||||
bool
|
||||
GRPCServerImpl::CallData<Request, Response>::wasForwarded()
|
||||
{
|
||||
if (auto descriptor =
|
||||
Request::GetDescriptor()->FindFieldByName("client_ip"))
|
||||
{
|
||||
std::string clientIp =
|
||||
Request::GetReflection()->GetString(request_, descriptor);
|
||||
if (!clientIp.empty())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class Request, class Response>
|
||||
std::optional<std::string>
|
||||
GRPCServerImpl::CallData<Request, Response>::getUser()
|
||||
@@ -338,35 +264,6 @@ GRPCServerImpl::CallData<Request, Response>::getClientIpAddress()
|
||||
return {};
|
||||
}
|
||||
|
||||
template <class Request, class Response>
|
||||
std::optional<boost::asio::ip::address>
|
||||
GRPCServerImpl::CallData<Request, Response>::getProxiedClientIpAddress()
|
||||
{
|
||||
auto endpoint = getProxiedClientEndpoint();
|
||||
if (endpoint)
|
||||
return endpoint->address();
|
||||
return {};
|
||||
}
|
||||
|
||||
template <class Request, class Response>
|
||||
std::optional<boost::asio::ip::tcp::endpoint>
|
||||
GRPCServerImpl::CallData<Request, Response>::getProxiedClientEndpoint()
|
||||
{
|
||||
auto descriptor = Request::GetDescriptor()->FindFieldByName("client_ip");
|
||||
if (descriptor)
|
||||
{
|
||||
std::string clientIp =
|
||||
Request::GetReflection()->GetString(request_, descriptor);
|
||||
if (!clientIp.empty())
|
||||
{
|
||||
JLOG(app_.journal("gRPCServer").debug())
|
||||
<< "Got client_ip from request : " << clientIp;
|
||||
return getEndpoint(clientIp);
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
template <class Request, class Response>
|
||||
std::optional<boost::asio::ip::tcp::endpoint>
|
||||
GRPCServerImpl::CallData<Request, Response>::getClientEndpoint()
|
||||
@@ -381,8 +278,7 @@ GRPCServerImpl::CallData<Request, Response>::clientIsUnlimited()
|
||||
if (!getUser())
|
||||
return false;
|
||||
auto clientIp = getClientIpAddress();
|
||||
auto proxiedIp = getProxiedClientIpAddress();
|
||||
if (clientIp && !proxiedIp)
|
||||
if (clientIp)
|
||||
{
|
||||
for (auto& ip : secureGatewayIPs_)
|
||||
{
|
||||
@@ -414,11 +310,7 @@ Resource::Consumer
|
||||
GRPCServerImpl::CallData<Request, Response>::getUsage()
|
||||
{
|
||||
auto endpoint = getClientEndpoint();
|
||||
auto proxiedEndpoint = getProxiedClientEndpoint();
|
||||
if (proxiedEndpoint)
|
||||
return app_.getResourceManager().newInboundEndpoint(
|
||||
beast::IP::from_asio(proxiedEndpoint.value()));
|
||||
else if (endpoint)
|
||||
if (endpoint)
|
||||
return app_.getResourceManager().newInboundEndpoint(
|
||||
beast::IP::from_asio(endpoint.value()));
|
||||
Throw<std::runtime_error>("Failed to get client endpoint");
|
||||
|
||||
@@ -375,7 +375,6 @@ run(int argc, char** argv)
|
||||
"quorum",
|
||||
po::value<std::size_t>(),
|
||||
"Override the minimum validation quorum.")(
|
||||
"reportingReadOnly", "Run in read-only reporting mode")(
|
||||
"silent", "No output to the console after startup.")(
|
||||
"standalone,a", "Run with no peers.")("verbose,v", "Verbose logging.")
|
||||
|
||||
@@ -400,9 +399,6 @@ run(int argc, char** argv)
|
||||
po::value<std::string>(),
|
||||
"Trap a specific transaction during replay.")(
|
||||
"start", "Start from a fresh Ledger.")(
|
||||
"startReporting",
|
||||
po::value<std::string>(),
|
||||
"Start reporting from a fresh Ledger.")(
|
||||
"vacuum", "VACUUM the transaction db.")(
|
||||
"valid", "Consider the initial ledger a valid network ledger.");
|
||||
|
||||
@@ -658,17 +654,6 @@ run(int argc, char** argv)
|
||||
config->START_UP = Config::FRESH;
|
||||
}
|
||||
|
||||
if (vm.count("startReporting"))
|
||||
{
|
||||
config->START_UP = Config::FRESH;
|
||||
config->START_LEDGER = vm["startReporting"].as<std::string>();
|
||||
}
|
||||
|
||||
if (vm.count("reportingReadOnly"))
|
||||
{
|
||||
config->setReportingReadOnly(true);
|
||||
}
|
||||
|
||||
if (vm.count("import"))
|
||||
config->doImport = true;
|
||||
|
||||
|
||||
@@ -412,7 +412,7 @@ private:
|
||||
CountedObjects::getInstance().getCounts(1);
|
||||
|
||||
// Database metrics if applicable
|
||||
if (!app_.config().reporting() && app_.config().useTxTables())
|
||||
if (app_.config().useTxTables())
|
||||
{
|
||||
auto const db =
|
||||
dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase());
|
||||
@@ -723,10 +723,7 @@ private:
|
||||
header->uptime = UptimeClock::now().time_since_epoch().count();
|
||||
header->io_latency_us = app_.getIOLatency().count();
|
||||
header->validation_quorum = app_.validators().quorum();
|
||||
|
||||
if (!app_.config().reporting())
|
||||
header->peer_count = app_.overlay().size();
|
||||
|
||||
header->peer_count = app_.overlay().size();
|
||||
header->node_size = app_.config().NODE_SIZE;
|
||||
|
||||
// Get state accounting data
|
||||
@@ -790,25 +787,22 @@ private:
|
||||
if (fp != 0)
|
||||
header->fetch_pack_size = fp;
|
||||
|
||||
// Pack load factor info if not reporting
|
||||
if (!app_.config().reporting())
|
||||
{
|
||||
auto const escalationMetrics =
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current());
|
||||
auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
|
||||
auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
|
||||
auto const loadFactorFeeEscalation =
|
||||
mulDiv(
|
||||
escalationMetrics.openLedgerFeeLevel,
|
||||
loadBaseServer,
|
||||
escalationMetrics.referenceFeeLevel)
|
||||
.value_or(muldiv_max);
|
||||
// Pack load factor info
|
||||
auto const escalationMetrics =
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current());
|
||||
auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
|
||||
auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
|
||||
auto const loadFactorFeeEscalation =
|
||||
mulDiv(
|
||||
escalationMetrics.openLedgerFeeLevel,
|
||||
loadBaseServer,
|
||||
escalationMetrics.referenceFeeLevel)
|
||||
.value_or(muldiv_max);
|
||||
|
||||
header->load_factor = std::max(
|
||||
safe_cast<std::uint64_t>(loadFactorServer),
|
||||
loadFactorFeeEscalation);
|
||||
header->load_base = loadBaseServer;
|
||||
}
|
||||
header->load_factor = std::max(
|
||||
safe_cast<std::uint64_t>(loadFactorServer),
|
||||
loadFactorFeeEscalation);
|
||||
header->load_base = loadBaseServer;
|
||||
|
||||
#if defined(__linux__)
|
||||
// Get system info using sysinfo
|
||||
@@ -899,7 +893,7 @@ private:
|
||||
|
||||
// Pack ledger info and ranges
|
||||
auto lpClosed = ledgerMaster.getValidatedLedger();
|
||||
if (!lpClosed && !app_.config().reporting())
|
||||
if (!lpClosed)
|
||||
lpClosed = ledgerMaster.getClosedLedger();
|
||||
|
||||
if (lpClosed)
|
||||
|
||||
@@ -40,9 +40,7 @@
|
||||
#include <xrpld/app/misc/ValidatorKeys.h>
|
||||
#include <xrpld/app/misc/ValidatorList.h>
|
||||
#include <xrpld/app/misc/detail/AccountTxPaging.h>
|
||||
#include <xrpld/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <xrpld/app/reporting/ReportingETL.h>
|
||||
#include <xrpld/app/tx/apply.h>
|
||||
#include <xrpld/consensus/Consensus.h>
|
||||
#include <xrpld/consensus/ConsensusParms.h>
|
||||
@@ -389,15 +387,6 @@ public:
|
||||
void
|
||||
pubValidation(std::shared_ptr<STValidation> const& val) override;
|
||||
|
||||
void
|
||||
forwardValidation(Json::Value const& jvObj) override;
|
||||
void
|
||||
forwardManifest(Json::Value const& jvObj) override;
|
||||
void
|
||||
forwardProposedTransaction(Json::Value const& jvObj) override;
|
||||
void
|
||||
forwardProposedAccountTransaction(Json::Value const& jvObj) override;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
//
|
||||
// InfoSub::Source.
|
||||
@@ -2532,8 +2521,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
if (fp != 0)
|
||||
info[jss::fetch_pack] = Json::UInt(fp);
|
||||
|
||||
if (!app_.config().reporting())
|
||||
info[jss::peers] = Json::UInt(app_.overlay().size());
|
||||
info[jss::peers] = Json::UInt(app_.overlay().size());
|
||||
|
||||
Json::Value lastClose = Json::objectValue;
|
||||
lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
|
||||
@@ -2556,85 +2544,80 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
if (admin)
|
||||
info[jss::load] = m_job_queue.getJson();
|
||||
|
||||
if (!app_.config().reporting())
|
||||
if (auto const netid = app_.overlay().networkID())
|
||||
info[jss::network_id] = static_cast<Json::UInt>(*netid);
|
||||
|
||||
auto const escalationMetrics =
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current());
|
||||
|
||||
auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
|
||||
auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
|
||||
/* Scale the escalated fee level to unitless "load factor".
|
||||
In practice, this just strips the units, but it will continue
|
||||
to work correctly if either base value ever changes. */
|
||||
auto const loadFactorFeeEscalation =
|
||||
mulDiv(
|
||||
escalationMetrics.openLedgerFeeLevel,
|
||||
loadBaseServer,
|
||||
escalationMetrics.referenceFeeLevel)
|
||||
.value_or(ripple::muldiv_max);
|
||||
|
||||
auto const loadFactor = std::max(
|
||||
safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
|
||||
|
||||
if (!human)
|
||||
{
|
||||
if (auto const netid = app_.overlay().networkID())
|
||||
info[jss::network_id] = static_cast<Json::UInt>(*netid);
|
||||
info[jss::load_base] = loadBaseServer;
|
||||
info[jss::load_factor] = trunc32(loadFactor);
|
||||
info[jss::load_factor_server] = loadFactorServer;
|
||||
|
||||
auto const escalationMetrics =
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current());
|
||||
/* Json::Value doesn't support uint64, so clamp to max
|
||||
uint32 value. This is mostly theoretical, since there
|
||||
probably isn't enough extant XRP to drive the factor
|
||||
that high.
|
||||
*/
|
||||
info[jss::load_factor_fee_escalation] =
|
||||
escalationMetrics.openLedgerFeeLevel.jsonClipped();
|
||||
info[jss::load_factor_fee_queue] =
|
||||
escalationMetrics.minProcessingFeeLevel.jsonClipped();
|
||||
info[jss::load_factor_fee_reference] =
|
||||
escalationMetrics.referenceFeeLevel.jsonClipped();
|
||||
}
|
||||
else
|
||||
{
|
||||
info[jss::load_factor] =
|
||||
static_cast<double>(loadFactor) / loadBaseServer;
|
||||
|
||||
auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
|
||||
auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
|
||||
/* Scale the escalated fee level to unitless "load factor".
|
||||
In practice, this just strips the units, but it will continue
|
||||
to work correctly if either base value ever changes. */
|
||||
auto const loadFactorFeeEscalation =
|
||||
mulDiv(
|
||||
escalationMetrics.openLedgerFeeLevel,
|
||||
loadBaseServer,
|
||||
escalationMetrics.referenceFeeLevel)
|
||||
.value_or(ripple::muldiv_max);
|
||||
if (loadFactorServer != loadFactor)
|
||||
info[jss::load_factor_server] =
|
||||
static_cast<double>(loadFactorServer) / loadBaseServer;
|
||||
|
||||
auto const loadFactor = std::max(
|
||||
safe_cast<std::uint64_t>(loadFactorServer),
|
||||
loadFactorFeeEscalation);
|
||||
|
||||
if (!human)
|
||||
if (admin)
|
||||
{
|
||||
info[jss::load_base] = loadBaseServer;
|
||||
info[jss::load_factor] = trunc32(loadFactor);
|
||||
info[jss::load_factor_server] = loadFactorServer;
|
||||
|
||||
/* Json::Value doesn't support uint64, so clamp to max
|
||||
uint32 value. This is mostly theoretical, since there
|
||||
probably isn't enough extant XRP to drive the factor
|
||||
that high.
|
||||
*/
|
||||
std::uint32_t fee = app_.getFeeTrack().getLocalFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_local] =
|
||||
static_cast<double>(fee) / loadBaseServer;
|
||||
fee = app_.getFeeTrack().getRemoteFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_net] =
|
||||
static_cast<double>(fee) / loadBaseServer;
|
||||
fee = app_.getFeeTrack().getClusterFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_cluster] =
|
||||
static_cast<double>(fee) / loadBaseServer;
|
||||
}
|
||||
if (escalationMetrics.openLedgerFeeLevel !=
|
||||
escalationMetrics.referenceFeeLevel &&
|
||||
(admin || loadFactorFeeEscalation != loadFactor))
|
||||
info[jss::load_factor_fee_escalation] =
|
||||
escalationMetrics.openLedgerFeeLevel.jsonClipped();
|
||||
escalationMetrics.openLedgerFeeLevel.decimalFromReference(
|
||||
escalationMetrics.referenceFeeLevel);
|
||||
if (escalationMetrics.minProcessingFeeLevel !=
|
||||
escalationMetrics.referenceFeeLevel)
|
||||
info[jss::load_factor_fee_queue] =
|
||||
escalationMetrics.minProcessingFeeLevel.jsonClipped();
|
||||
info[jss::load_factor_fee_reference] =
|
||||
escalationMetrics.referenceFeeLevel.jsonClipped();
|
||||
}
|
||||
else
|
||||
{
|
||||
info[jss::load_factor] =
|
||||
static_cast<double>(loadFactor) / loadBaseServer;
|
||||
|
||||
if (loadFactorServer != loadFactor)
|
||||
info[jss::load_factor_server] =
|
||||
static_cast<double>(loadFactorServer) / loadBaseServer;
|
||||
|
||||
if (admin)
|
||||
{
|
||||
std::uint32_t fee = app_.getFeeTrack().getLocalFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_local] =
|
||||
static_cast<double>(fee) / loadBaseServer;
|
||||
fee = app_.getFeeTrack().getRemoteFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_net] =
|
||||
static_cast<double>(fee) / loadBaseServer;
|
||||
fee = app_.getFeeTrack().getClusterFee();
|
||||
if (fee != loadBaseServer)
|
||||
info[jss::load_factor_cluster] =
|
||||
static_cast<double>(fee) / loadBaseServer;
|
||||
}
|
||||
if (escalationMetrics.openLedgerFeeLevel !=
|
||||
escalationMetrics.referenceFeeLevel &&
|
||||
(admin || loadFactorFeeEscalation != loadFactor))
|
||||
info[jss::load_factor_fee_escalation] =
|
||||
escalationMetrics.openLedgerFeeLevel.decimalFromReference(
|
||||
escalationMetrics.referenceFeeLevel);
|
||||
if (escalationMetrics.minProcessingFeeLevel !=
|
||||
escalationMetrics.referenceFeeLevel)
|
||||
info[jss::load_factor_fee_queue] =
|
||||
escalationMetrics.minProcessingFeeLevel
|
||||
.decimalFromReference(
|
||||
escalationMetrics.referenceFeeLevel);
|
||||
}
|
||||
escalationMetrics.minProcessingFeeLevel.decimalFromReference(
|
||||
escalationMetrics.referenceFeeLevel);
|
||||
}
|
||||
|
||||
bool valid = false;
|
||||
@@ -2642,7 +2625,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
|
||||
if (lpClosed)
|
||||
valid = true;
|
||||
else if (!app_.config().reporting())
|
||||
else
|
||||
lpClosed = m_ledgerMaster.getClosedLedger();
|
||||
|
||||
if (lpClosed)
|
||||
@@ -2679,11 +2662,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
l[jss::close_time_offset] =
|
||||
static_cast<std::uint32_t>(closeOffset.count());
|
||||
|
||||
#if RIPPLED_REPORTING
|
||||
std::int64_t const dbAge =
|
||||
std::max(m_ledgerMaster.getValidatedLedgerAge().count(), 0L);
|
||||
l[jss::age] = Json::UInt(dbAge);
|
||||
#else
|
||||
constexpr std::chrono::seconds highAgeThreshold{1000000};
|
||||
if (m_ledgerMaster.haveValidated())
|
||||
{
|
||||
@@ -2703,7 +2681,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
Json::UInt(age < highAgeThreshold ? age.count() : 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (valid)
|
||||
@@ -2720,19 +2697,12 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
|
||||
accounting_.json(info);
|
||||
info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
|
||||
if (!app_.config().reporting())
|
||||
{
|
||||
info[jss::jq_trans_overflow] =
|
||||
std::to_string(app_.overlay().getJqTransOverflow());
|
||||
info[jss::peer_disconnects] =
|
||||
std::to_string(app_.overlay().getPeerDisconnect());
|
||||
info[jss::peer_disconnects_resources] =
|
||||
std::to_string(app_.overlay().getPeerDisconnectCharges());
|
||||
}
|
||||
else
|
||||
{
|
||||
info["reporting"] = app_.getReportingETL().getInfo();
|
||||
}
|
||||
info[jss::jq_trans_overflow] =
|
||||
std::to_string(app_.overlay().getJqTransOverflow());
|
||||
info[jss::peer_disconnects] =
|
||||
std::to_string(app_.overlay().getPeerDisconnect());
|
||||
info[jss::peer_disconnects_resources] =
|
||||
std::to_string(app_.overlay().getPeerDisconnectCharges());
|
||||
|
||||
// This array must be sorted in increasing order.
|
||||
static constexpr std::array<std::string_view, 7> protocols{
|
||||
@@ -2832,77 +2802,6 @@ NetworkOPsImp::pubProposedTransaction(
|
||||
pubProposedAccountTransaction(ledger, transaction, result);
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::forwardProposedTransaction(Json::Value const& jvObj)
|
||||
{
|
||||
// reporting does not forward validated transactions
|
||||
// validated transactions will be published to the proper streams when the
|
||||
// etl process writes a validated ledger
|
||||
if (jvObj[jss::validated].asBool())
|
||||
return;
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
auto it = mStreamMaps[sRTTransactions].begin();
|
||||
while (it != mStreamMaps[sRTTransactions].end())
|
||||
{
|
||||
InfoSub::pointer p = it->second.lock();
|
||||
|
||||
if (p)
|
||||
{
|
||||
p->send(jvObj, true);
|
||||
++it;
|
||||
}
|
||||
else
|
||||
{
|
||||
it = mStreamMaps[sRTTransactions].erase(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
forwardProposedAccountTransaction(jvObj);
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::forwardValidation(Json::Value const& jvObj)
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
for (auto i = mStreamMaps[sValidations].begin();
|
||||
i != mStreamMaps[sValidations].end();)
|
||||
{
|
||||
if (auto p = i->second.lock())
|
||||
{
|
||||
p->send(jvObj, true);
|
||||
++i;
|
||||
}
|
||||
else
|
||||
{
|
||||
i = mStreamMaps[sValidations].erase(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::forwardManifest(Json::Value const& jvObj)
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
for (auto i = mStreamMaps[sManifests].begin();
|
||||
i != mStreamMaps[sManifests].end();)
|
||||
{
|
||||
if (auto p = i->second.lock())
|
||||
{
|
||||
p->send(jvObj, true);
|
||||
++i;
|
||||
}
|
||||
else
|
||||
{
|
||||
i = mStreamMaps[sManifests].erase(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
getAccounts(Json::Value const& jvObj, std::vector<AccountID>& accounts)
|
||||
{
|
||||
@@ -2921,74 +2820,6 @@ getAccounts(Json::Value const& jvObj, std::vector<AccountID>& accounts)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::forwardProposedAccountTransaction(Json::Value const& jvObj)
|
||||
{
|
||||
hash_set<InfoSub::pointer> notify;
|
||||
int iProposed = 0;
|
||||
// check if there are any subscribers before attempting to parse the JSON
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
if (mSubRTAccount.empty())
|
||||
return;
|
||||
}
|
||||
|
||||
// parse the JSON outside of the lock
|
||||
std::vector<AccountID> accounts;
|
||||
if (jvObj.isMember(jss::transaction))
|
||||
{
|
||||
try
|
||||
{
|
||||
getAccounts(jvObj[jss::transaction], accounts);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< __func__ << " : "
|
||||
<< "error parsing json for accounts affected";
|
||||
return;
|
||||
}
|
||||
}
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
if (!mSubRTAccount.empty())
|
||||
{
|
||||
for (auto const& affectedAccount : accounts)
|
||||
{
|
||||
auto simiIt = mSubRTAccount.find(affectedAccount);
|
||||
if (simiIt != mSubRTAccount.end())
|
||||
{
|
||||
auto it = simiIt->second.begin();
|
||||
|
||||
while (it != simiIt->second.end())
|
||||
{
|
||||
InfoSub::pointer p = it->second.lock();
|
||||
|
||||
if (p)
|
||||
{
|
||||
notify.insert(p);
|
||||
++it;
|
||||
++iProposed;
|
||||
}
|
||||
else
|
||||
it = simiIt->second.erase(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
JLOG(m_journal.trace()) << "forwardProposedAccountTransaction:"
|
||||
<< " iProposed=" << iProposed;
|
||||
|
||||
if (!notify.empty())
|
||||
{
|
||||
for (InfoSub::ref isrListener : notify)
|
||||
isrListener->send(jvObj, true);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
{
|
||||
@@ -3105,8 +2936,6 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
void
|
||||
NetworkOPsImp::reportFeeChange()
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
return;
|
||||
ServerFeeSummary f{
|
||||
app_.openLedger().current()->fees().base,
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current()),
|
||||
@@ -3605,30 +3434,8 @@ NetworkOPsImp::unsubAccountInternal(
|
||||
void
|
||||
NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
{
|
||||
enum DatabaseType { Postgres, Sqlite, None };
|
||||
enum DatabaseType { Sqlite, None };
|
||||
static const auto databaseType = [&]() -> DatabaseType {
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
// Use a dynamic_cast to return DatabaseType::None
|
||||
// on failure.
|
||||
if (dynamic_cast<PostgresDatabase*>(&app_.getRelationalDatabase()))
|
||||
{
|
||||
return DatabaseType::Postgres;
|
||||
}
|
||||
return DatabaseType::None;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use a dynamic_cast to return DatabaseType::None
|
||||
// on failure.
|
||||
if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
|
||||
{
|
||||
return DatabaseType::Sqlite;
|
||||
}
|
||||
return DatabaseType::None;
|
||||
}
|
||||
#else
|
||||
// Use a dynamic_cast to return DatabaseType::None
|
||||
// on failure.
|
||||
if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
|
||||
@@ -3636,7 +3443,6 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
return DatabaseType::Sqlite;
|
||||
}
|
||||
return DatabaseType::None;
|
||||
#endif
|
||||
}();
|
||||
|
||||
if (databaseType == DatabaseType::None)
|
||||
@@ -3739,40 +3545,6 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
std::optional<RelationalDatabase::AccountTxMarker>>> {
|
||||
switch (dbType)
|
||||
{
|
||||
case Postgres: {
|
||||
auto db = static_cast<PostgresDatabase*>(
|
||||
&app_.getRelationalDatabase());
|
||||
RelationalDatabase::AccountTxArgs args;
|
||||
args.account = accountId;
|
||||
LedgerRange range{minLedger, maxLedger};
|
||||
args.ledger = range;
|
||||
args.marker = marker;
|
||||
auto [txResult, status] = db->getAccountTx(args);
|
||||
if (status != rpcSUCCESS)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " getAccountTx failed";
|
||||
return {};
|
||||
}
|
||||
|
||||
if (auto txns =
|
||||
std::get_if<RelationalDatabase::AccountTxs>(
|
||||
&txResult.transactions);
|
||||
txns)
|
||||
{
|
||||
return std::make_pair(*txns, txResult.marker);
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "AccountHistory job for account "
|
||||
<< toBase58(accountId)
|
||||
<< " getAccountTx wrong data";
|
||||
return {};
|
||||
}
|
||||
}
|
||||
case Sqlite: {
|
||||
auto db = static_cast<SQLiteDatabase*>(
|
||||
&app_.getRelationalDatabase());
|
||||
|
||||
@@ -244,15 +244,6 @@ public:
|
||||
virtual void
|
||||
pubValidation(std::shared_ptr<STValidation> const& val) = 0;
|
||||
|
||||
virtual void
|
||||
forwardValidation(Json::Value const& jvObj) = 0;
|
||||
virtual void
|
||||
forwardManifest(Json::Value const& jvObj) = 0;
|
||||
virtual void
|
||||
forwardProposedTransaction(Json::Value const& jvObj) = 0;
|
||||
virtual void
|
||||
forwardProposedAccountTransaction(Json::Value const& jvObj) = 0;
|
||||
|
||||
virtual void
|
||||
stateAccounting(Json::Value& obj) = 0;
|
||||
};
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include <xrpld/app/rdb/State.h>
|
||||
#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <xrpld/core/ConfigSections.h>
|
||||
#include <xrpld/core/Pg.h>
|
||||
#include <xrpld/nodestore/Scheduler.h>
|
||||
#include <xrpld/nodestore/detail/DatabaseRotatingImp.h>
|
||||
#include <xrpld/shamap/SHAMapMissingNode.h>
|
||||
@@ -120,13 +119,6 @@ SHAMapStoreImp::SHAMapStoreImp(
|
||||
|
||||
if (deleteInterval_)
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"Reporting does not support online_delete. Remove "
|
||||
"online_delete info from config");
|
||||
}
|
||||
|
||||
// Configuration that affects the behavior of online delete
|
||||
get_if_exists(section, "delete_batch", deleteBatch_);
|
||||
std::uint32_t temp;
|
||||
@@ -189,12 +181,6 @@ SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||
|
||||
if (deleteInterval_)
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"Reporting does not support online_delete. Remove "
|
||||
"online_delete info from config");
|
||||
}
|
||||
SavedState state = state_db_.getState();
|
||||
|
||||
auto writableBackend = makeBackendRotating(state.writableDb);
|
||||
@@ -282,13 +268,6 @@ SHAMapStoreImp::copyNode(std::uint64_t& nodeCount, SHAMapTreeNode const& node)
|
||||
void
|
||||
SHAMapStoreImp::run()
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"Reporting does not support online_delete. Remove "
|
||||
"online_delete info from config");
|
||||
}
|
||||
beast::setCurrentThreadName("SHAMapStore");
|
||||
LedgerIndex lastRotated = state_db_.getState().lastRotated;
|
||||
netOPs_ = &app_.getOPs();
|
||||
@@ -600,13 +579,6 @@ SHAMapStoreImp::freshenCaches()
|
||||
void
|
||||
SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
|
||||
{
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"Reporting does not support online_delete. Remove "
|
||||
"online_delete info from config");
|
||||
}
|
||||
// Do not allow ledgers to be acquired from the network
|
||||
// that are about to be deleted.
|
||||
minimumOnline_ = lastRotated + 1;
|
||||
|
||||
@@ -21,11 +21,9 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/HashRouter.h>
|
||||
#include <xrpld/app/misc/Transaction.h>
|
||||
#include <xrpld/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <xrpld/app/tx/apply.h>
|
||||
#include <xrpld/core/DatabaseCon.h>
|
||||
#include <xrpld/core/Pg.h>
|
||||
#include <xrpld/rpc/CTID.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/safe_cast.h>
|
||||
@@ -139,20 +137,6 @@ Transaction::load(
|
||||
return load(id, app, op{range}, ec);
|
||||
}
|
||||
|
||||
Transaction::Locator
|
||||
Transaction::locate(uint256 const& id, Application& app)
|
||||
{
|
||||
auto const db =
|
||||
dynamic_cast<PostgresDatabase*>(&app.getRelationalDatabase());
|
||||
|
||||
if (!db)
|
||||
{
|
||||
Throw<std::runtime_error>("Failed to get relational database");
|
||||
}
|
||||
|
||||
return db->locateTransaction(id);
|
||||
}
|
||||
|
||||
std::variant<
|
||||
std::pair<std::shared_ptr<Transaction>, std::shared_ptr<TxMeta>>,
|
||||
TxSearched>
|
||||
|
||||
@@ -28,9 +28,7 @@ src/xrpld/app/rdb/
|
||||
│ ├── detail
|
||||
│ │ ├── Node.cpp
|
||||
│ │ ├── Node.h
|
||||
│ │ ├── PostgresDatabase.cpp
|
||||
│ │ └── SQLiteDatabase.cpp
|
||||
│ ├── PostgresDatabase.h
|
||||
│ └── SQLiteDatabase.h
|
||||
├── detail
|
||||
│ ├── PeerFinder.cpp
|
||||
@@ -50,7 +48,6 @@ src/xrpld/app/rdb/
|
||||
| File | Contents |
|
||||
| ----------- | ----------- |
|
||||
| `Node.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite node databases|
|
||||
| <nobr>`PostgresDatabase.[h\|cpp]`</nobr> | Defines/Implements the class `PostgresDatabase`/`PostgresDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores |
|
||||
|`SQLiteDatabase.[h\|cpp]`| Defines/Implements the class `SQLiteDatabase`/`SQLiteDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores |
|
||||
| `PeerFinder.[h\|cpp]` | Defines/Implements methods for interacting with the PeerFinder SQLite database |
|
||||
|`RelationalDatabase.cpp`| Implements the static method `RelationalDatabase::init` which is used to initialize an instance of `RelationalDatabase` |
|
||||
|
||||
@@ -111,29 +111,6 @@ public:
|
||||
std::optional<AccountTxMarker> marker;
|
||||
};
|
||||
|
||||
/// Struct used to keep track of what to write to transactions and
|
||||
/// account_transactions tables in Postgres
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
boost::container::flat_set<AccountID> accounts;
|
||||
uint32_t ledgerSequence;
|
||||
uint32_t transactionIndex;
|
||||
uint256 txHash;
|
||||
uint256 nodestoreHash;
|
||||
|
||||
AccountTransactionsData(
|
||||
TxMeta const& meta,
|
||||
uint256 const& nodestoreHash,
|
||||
beast::Journal j)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, txHash(meta.getTxID())
|
||||
, nodestoreHash(nodestoreHash)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief init Creates and returns an appropriate RelationalDatabase
|
||||
* instance based on configuration.
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED
|
||||
#define RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED
|
||||
|
||||
#include <xrpld/app/rdb/RelationalDatabase.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class PostgresDatabase : public RelationalDatabase
|
||||
{
|
||||
public:
|
||||
virtual void
|
||||
stop() = 0;
|
||||
|
||||
/**
|
||||
* @brief sweep Sweeps the database.
|
||||
*/
|
||||
virtual void
|
||||
sweep() = 0;
|
||||
|
||||
/**
|
||||
* @brief getCompleteLedgers Returns a string which contains a list of
|
||||
* completed ledgers.
|
||||
* @return String with completed ledger sequences
|
||||
*/
|
||||
virtual std::string
|
||||
getCompleteLedgers() = 0;
|
||||
|
||||
/**
|
||||
* @brief getValidatedLedgerAge Returns the age of the last validated
|
||||
* ledger.
|
||||
* @return Age of the last validated ledger in seconds
|
||||
*/
|
||||
virtual std::chrono::seconds
|
||||
getValidatedLedgerAge() = 0;
|
||||
|
||||
/**
|
||||
* @brief writeLedgerAndTransactions Writes new ledger and transaction data
|
||||
* into the database.
|
||||
* @param info Ledger info to write.
|
||||
* @param accountTxData Transaction data to write
|
||||
* @return True on success, false on failure.
|
||||
*/
|
||||
virtual bool
|
||||
writeLedgerAndTransactions(
|
||||
LedgerInfo const& info,
|
||||
std::vector<AccountTransactionsData> const& accountTxData) = 0;
|
||||
|
||||
/**
|
||||
* @brief getTxHashes Returns a vector of the hashes of transactions
|
||||
* belonging to the ledger with the provided sequence.
|
||||
* @param seq Ledger sequence
|
||||
* @return Vector of transaction hashes
|
||||
*/
|
||||
virtual std::vector<uint256>
|
||||
getTxHashes(LedgerIndex seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief getAccountTx Get the last account transactions specified by the
|
||||
* AccountTxArgs struct.
|
||||
* @param args Arguments which specify the account and which transactions to
|
||||
* return.
|
||||
* @return Vector of account transactions and the RPC status response.
|
||||
*/
|
||||
virtual std::pair<AccountTxResult, RPC::Status>
|
||||
getAccountTx(AccountTxArgs const& args) = 0;
|
||||
|
||||
/**
|
||||
* @brief locateTransaction Returns information used to locate
|
||||
* a transaction.
|
||||
* @param id Hash of the transaction.
|
||||
* @return Information used to locate a transaction. Contains a nodestore
|
||||
* hash and a ledger sequence pair if the transaction was found.
|
||||
* Otherwise, contains the range of ledgers present in the database
|
||||
* at the time of search.
|
||||
*/
|
||||
virtual Transaction::Locator
|
||||
locateTransaction(uint256 const& id) = 0;
|
||||
|
||||
/**
|
||||
* @brief isCaughtUp returns whether the database is caught up with the
|
||||
* network
|
||||
* @param[out] reason if the database is not caught up, reason contains a
|
||||
* helpful message describing why
|
||||
* @return false if the most recently written ledger has a close time
|
||||
* over 3 minutes ago, or if there are no ledgers in the
|
||||
* database. true otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isCaughtUp(std::string& reason) = 0;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,9 +28,6 @@ namespace ripple {
|
||||
extern std::unique_ptr<RelationalDatabase>
|
||||
getSQLiteDatabase(Application& app, Config const& config, JobQueue& jobQueue);
|
||||
|
||||
extern std::unique_ptr<RelationalDatabase>
|
||||
getPostgresDatabase(Application& app, Config const& config, JobQueue& jobQueue);
|
||||
|
||||
std::unique_ptr<RelationalDatabase>
|
||||
RelationalDatabase::init(
|
||||
Application& app,
|
||||
@@ -38,17 +35,12 @@ RelationalDatabase::init(
|
||||
JobQueue& jobQueue)
|
||||
{
|
||||
bool use_sqlite = false;
|
||||
bool use_postgres = false;
|
||||
bool use_rwdb = false;
|
||||
bool use_flatmap = false;
|
||||
|
||||
if (config.reporting())
|
||||
const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)};
|
||||
if (!rdb_section.empty())
|
||||
{
|
||||
use_postgres = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)};
|
||||
if (!rdb_section.empty())
|
||||
{
|
||||
if (boost::iequals(get(rdb_section, "backend"), "sqlite"))
|
||||
@@ -72,18 +64,20 @@ RelationalDatabase::init(
|
||||
}
|
||||
else
|
||||
{
|
||||
use_sqlite = true;
|
||||
Throw<std::runtime_error>(
|
||||
"Invalid rdb_section backend value: " +
|
||||
get(rdb_section, "backend"));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
use_sqlite = true;
|
||||
}
|
||||
|
||||
if (use_sqlite)
|
||||
{
|
||||
return getSQLiteDatabase(app, config, jobQueue);
|
||||
}
|
||||
else if (use_postgres)
|
||||
{
|
||||
return getPostgresDatabase(app, config, jobQueue);
|
||||
}
|
||||
else if (use_rwdb)
|
||||
{
|
||||
return getRWDBDatabase(app, config, jobQueue);
|
||||
@@ -95,5 +89,4 @@ RelationalDatabase::init(
|
||||
|
||||
return std::unique_ptr<RelationalDatabase>();
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/ledger/ReadView.h>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/// This datastructure is used to keep track of the sequence of the most recent
|
||||
/// ledger validated by the network. There are two methods that will wait until
|
||||
/// certain conditions are met. This datastructure is able to be "stopped". When
|
||||
/// the datastructure is stopped, any threads currently waiting are unblocked.
|
||||
/// Any later calls to methods of this datastructure will not wait. Once the
|
||||
/// datastructure is stopped, the datastructure remains stopped for the rest of
|
||||
/// its lifetime.
|
||||
class NetworkValidatedLedgers
|
||||
{
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
|
||||
mutable std::condition_variable cv_;
|
||||
|
||||
bool stopping_ = false;
|
||||
|
||||
public:
|
||||
/// Notify the datastructure that idx has been validated by the network
|
||||
/// @param idx sequence validated by network
|
||||
void
|
||||
push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/// Get most recently validated sequence. If no ledgers are known to have
|
||||
/// been validated, this function waits until the next ledger is validated
|
||||
/// @return sequence of most recently validated ledger. empty optional if
|
||||
/// the datastructure has been stopped
|
||||
std::optional<uint32_t>
|
||||
getMostRecent() const
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [this]() { return max_ || stopping_; });
|
||||
return max_;
|
||||
}
|
||||
|
||||
/// Get most recently validated sequence.
|
||||
/// @return sequence of most recently validated ledger, or empty optional
|
||||
/// if no ledgers are known to have been validated.
|
||||
std::optional<uint32_t>
|
||||
tryGetMostRecent() const
|
||||
{
|
||||
std::unique_lock lk(m_);
|
||||
return max_;
|
||||
}
|
||||
|
||||
/// Waits for the sequence to be validated by the network
|
||||
/// @param sequence to wait for
|
||||
/// @return true if sequence was validated, false otherwise
|
||||
/// a return value of false means the datastructure has been stopped
|
||||
bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [sequence, this]() {
|
||||
return (max_ && sequence <= *max_) || stopping_;
|
||||
});
|
||||
return !stopping_;
|
||||
}
|
||||
|
||||
/// Puts the datastructure in the stopped state
|
||||
/// Future calls to this datastructure will not block
|
||||
/// This operation cannot be reversed
|
||||
void
|
||||
stop()
|
||||
{
|
||||
std::lock_guard lck(m_);
|
||||
stopping_ = true;
|
||||
cv_.notify_all();
|
||||
}
|
||||
};
|
||||
|
||||
/// Generic thread-safe queue with an optional maximum size
|
||||
/// Note, we can't use a lockfree queue here, since we need the ability to wait
|
||||
/// for an element to be added or removed from the queue. These waits are
|
||||
/// blocking calls.
|
||||
template <class T>
|
||||
class ThreadSafeQueue
|
||||
{
|
||||
std::queue<T> queue_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
std::condition_variable cv_;
|
||||
std::optional<uint32_t> maxSize_;
|
||||
|
||||
public:
|
||||
/// @param maxSize maximum size of the queue. Calls that would cause the
|
||||
/// queue to exceed this size will block until free space is available
|
||||
explicit ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize)
|
||||
{
|
||||
}
|
||||
|
||||
/// Create a queue with no maximum size
|
||||
ThreadSafeQueue() = default;
|
||||
|
||||
/// @param elt element to push onto queue
|
||||
/// if maxSize is set, this method will block until free space is available
|
||||
void
|
||||
push(T const& elt)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
// if queue has a max size, wait until not full
|
||||
if (maxSize_)
|
||||
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
|
||||
queue_.push(elt);
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/// @param elt element to push onto queue. elt is moved from
|
||||
/// if maxSize is set, this method will block until free space is available
|
||||
void
|
||||
push(T&& elt)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
// if queue has a max size, wait until not full
|
||||
if (maxSize_)
|
||||
cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; });
|
||||
queue_.push(std::move(elt));
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/// @return element popped from queue. Will block until queue is non-empty
|
||||
T
|
||||
pop()
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [this]() { return !queue_.empty(); });
|
||||
T ret = std::move(queue_.front());
|
||||
queue_.pop();
|
||||
// if queue has a max size, unblock any possible pushers
|
||||
if (maxSize_)
|
||||
cv_.notify_all();
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
/// Parititions the uint256 keyspace into numMarkers partitions, each of equal
|
||||
/// size.
|
||||
inline std::vector<uint256>
|
||||
getMarkers(size_t numMarkers)
|
||||
{
|
||||
assert(numMarkers <= 256);
|
||||
|
||||
unsigned char incr = 256 / numMarkers;
|
||||
|
||||
std::vector<uint256> markers;
|
||||
markers.reserve(numMarkers);
|
||||
uint256 base{0};
|
||||
for (size_t i = 0; i < numMarkers; ++i)
|
||||
{
|
||||
markers.push_back(base);
|
||||
base.data()[0] += incr;
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -1,988 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/reporting/ETLSource.h>
|
||||
#include <xrpld/app/reporting/ReportingETL.h>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
#include <xrpl/json/json_reader.h>
|
||||
#include <xrpl/json/json_writer.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// Create ETL source without grpc endpoint
|
||||
// Fetch ledger and load initial ledger will fail for this source
|
||||
// Primarily used in read-only mode, to monitor when ledgers are validated
|
||||
ETLSource::ETLSource(std::string ip, std::string wsPort, ReportingETL& etl)
|
||||
: ip_(ip)
|
||||
, wsPort_(wsPort)
|
||||
, etl_(etl)
|
||||
, ioc_(etl.getApplication().getIOService())
|
||||
, ws_(std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_)))
|
||||
, resolver_(boost::asio::make_strand(ioc_))
|
||||
, networkValidatedLedgers_(etl_.getNetworkValidatedLedgers())
|
||||
, journal_(etl_.getApplication().journal("ReportingETL::ETLSource"))
|
||||
, app_(etl_.getApplication())
|
||||
, timer_(ioc_)
|
||||
{
|
||||
}
|
||||
|
||||
ETLSource::ETLSource(
|
||||
std::string ip,
|
||||
std::string wsPort,
|
||||
std::string grpcPort,
|
||||
ReportingETL& etl)
|
||||
: ip_(ip)
|
||||
, wsPort_(wsPort)
|
||||
, grpcPort_(grpcPort)
|
||||
, etl_(etl)
|
||||
, ioc_(etl.getApplication().getIOService())
|
||||
, ws_(std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_)))
|
||||
, resolver_(boost::asio::make_strand(ioc_))
|
||||
, networkValidatedLedgers_(etl_.getNetworkValidatedLedgers())
|
||||
, journal_(etl_.getApplication().journal("ReportingETL::ETLSource"))
|
||||
, app_(etl_.getApplication())
|
||||
, timer_(ioc_)
|
||||
{
|
||||
std::string connectionString;
|
||||
try
|
||||
{
|
||||
connectionString =
|
||||
beast::IP::Endpoint(
|
||||
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_))
|
||||
.to_string();
|
||||
|
||||
JLOG(journal_.info())
|
||||
<< "Using IP to connect to ETL source: " << connectionString;
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
connectionString = "dns:" + ip_ + ":" + grpcPort_;
|
||||
JLOG(journal_.info())
|
||||
<< "Using DNS to connect to ETL source: " << connectionString;
|
||||
}
|
||||
try
|
||||
{
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateChannel(
|
||||
connectionString, grpc::InsecureChannelCredentials()));
|
||||
JLOG(journal_.info()) << "Made stub for remote = " << toString();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(journal_.error()) << "Exception while creating stub = " << e.what()
|
||||
<< " . Remote = " << toString();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::reconnect(boost::beast::error_code ec)
|
||||
{
|
||||
connected_ = false;
|
||||
// These are somewhat normal errors. operation_aborted occurs on shutdown,
|
||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||
// if we cannot connect to the transaction processing process
|
||||
if (ec != boost::asio::error::operation_aborted &&
|
||||
ec != boost::asio::error::connection_refused)
|
||||
{
|
||||
JLOG(journal_.error()) << __func__ << " : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.warn()) << __func__ << " : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
}
|
||||
|
||||
if (etl_.isStopping())
|
||||
{
|
||||
JLOG(journal_.debug()) << __func__ << " : " << toString()
|
||||
<< " - etl is stopping. aborting reconnect";
|
||||
return;
|
||||
}
|
||||
|
||||
// exponentially increasing timeouts, with a max of 30 seconds
|
||||
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
numFailures_++;
|
||||
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
|
||||
timer_.async_wait([this, fname = __func__](auto ec) {
|
||||
bool startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
JLOG(journal_.trace()) << fname << " async_wait : ec = " << ec;
|
||||
close(startAgain);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::close(bool startAgain)
|
||||
{
|
||||
timer_.cancel();
|
||||
ioc_.post([this, startAgain]() {
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (ws_->is_open())
|
||||
{
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
closing_ = true;
|
||||
ws_->async_close(
|
||||
boost::beast::websocket::close_code::normal,
|
||||
[this, startAgain, fname = __func__](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< fname << " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
start();
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
start();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::start()
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : " << toString();
|
||||
|
||||
auto const host = ip_;
|
||||
auto const port = wsPort_;
|
||||
|
||||
resolver_.async_resolve(
|
||||
host, port, [this](auto ec, auto results) { onResolve(ec, results); });
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::onResolve(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type results)
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - "
|
||||
<< toString();
|
||||
if (ec)
|
||||
{
|
||||
// try again
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::beast::get_lowest_layer(*ws_).expires_after(
|
||||
std::chrono::seconds(30));
|
||||
|
||||
// Use async_connect with the entire results
|
||||
boost::beast::get_lowest_layer(*ws_).async_connect(
|
||||
results,
|
||||
[this](
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type
|
||||
ep) { onConnect(ec, ep); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - "
|
||||
<< toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
numFailures_ = 0;
|
||||
// Turn off timeout on the tcp stream, because websocket stream has it's
|
||||
// own timeout system
|
||||
boost::beast::get_lowest_layer(*ws_).expires_never();
|
||||
|
||||
// Set suggested timeout settings for the websocket
|
||||
ws_->set_option(
|
||||
boost::beast::websocket::stream_base::timeout::suggested(
|
||||
boost::beast::role_type::client));
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
ws_->set_option(boost::beast::websocket::stream_base::decorator(
|
||||
[](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent,
|
||||
std::string(BOOST_BEAST_VERSION_STRING) +
|
||||
" websocket-client-async");
|
||||
}));
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
ws_->async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::onHandshake(boost::beast::error_code ec)
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - "
|
||||
<< toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
Json::Value jv;
|
||||
jv["command"] = "subscribe";
|
||||
|
||||
jv["streams"] = Json::arrayValue;
|
||||
Json::Value ledgerStream("ledger");
|
||||
jv["streams"].append(ledgerStream);
|
||||
Json::Value txnStream("transactions_proposed");
|
||||
jv["streams"].append(txnStream);
|
||||
Json::Value validationStream("validations");
|
||||
jv["streams"].append(validationStream);
|
||||
Json::Value manifestStream("manifests");
|
||||
jv["streams"].append(manifestStream);
|
||||
Json::FastWriter fastWriter;
|
||||
|
||||
JLOG(journal_.trace()) << "Sending subscribe stream message";
|
||||
// Send the message
|
||||
ws_->async_write(
|
||||
boost::asio::buffer(fastWriter.write(jv)),
|
||||
[this](auto ec, size_t size) { onWrite(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::onWrite(boost::beast::error_code ec, size_t bytesWritten)
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - "
|
||||
<< toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
ws_->async_read(
|
||||
readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ETLSource::onRead(boost::beast::error_code ec, size_t size)
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - "
|
||||
<< toString();
|
||||
// if error or error reading message, start over
|
||||
if (ec)
|
||||
{
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
handleMessage();
|
||||
boost::beast::flat_buffer buffer;
|
||||
swap(readBuffer_, buffer);
|
||||
|
||||
JLOG(journal_.trace())
|
||||
<< __func__ << " : calling async_read - " << toString();
|
||||
ws_->async_read(
|
||||
readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
ETLSource::handleMessage()
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : " << toString();
|
||||
|
||||
setLastMsgTime();
|
||||
connected_ = true;
|
||||
try
|
||||
{
|
||||
Json::Value response;
|
||||
Json::Reader reader;
|
||||
if (!reader.parse(
|
||||
static_cast<char const*>(readBuffer_.data().data()), response))
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< __func__ << " : "
|
||||
<< "Error parsing stream message."
|
||||
<< " Message = " << readBuffer_.data().data();
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t ledgerIndex = 0;
|
||||
if (response.isMember("result"))
|
||||
{
|
||||
if (response["result"].isMember(jss::ledger_index))
|
||||
{
|
||||
ledgerIndex = response["result"][jss::ledger_index].asUInt();
|
||||
}
|
||||
if (response[jss::result].isMember(jss::validated_ledgers))
|
||||
{
|
||||
setValidatedRange(
|
||||
response[jss::result][jss::validated_ledgers].asString());
|
||||
}
|
||||
JLOG(journal_.debug())
|
||||
<< __func__ << " : "
|
||||
<< "Received a message on ledger "
|
||||
<< " subscription stream. Message : "
|
||||
<< response.toStyledString() << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (etl_.getETLLoadBalancer().shouldPropagateStream(this))
|
||||
{
|
||||
if (response.isMember(jss::transaction))
|
||||
{
|
||||
etl_.getApplication().getOPs().forwardProposedTransaction(
|
||||
response);
|
||||
}
|
||||
else if (
|
||||
response.isMember("type") &&
|
||||
response["type"] == "validationReceived")
|
||||
{
|
||||
etl_.getApplication().getOPs().forwardValidation(response);
|
||||
}
|
||||
else if (
|
||||
response.isMember("type") &&
|
||||
response["type"] == "manifestReceived")
|
||||
{
|
||||
etl_.getApplication().getOPs().forwardManifest(response);
|
||||
}
|
||||
}
|
||||
|
||||
if (response.isMember("type") && response["type"] == "ledgerClosed")
|
||||
{
|
||||
JLOG(journal_.debug())
|
||||
<< __func__ << " : "
|
||||
<< "Received a message on ledger "
|
||||
<< " subscription stream. Message : "
|
||||
<< response.toStyledString() << " - " << toString();
|
||||
if (response.isMember(jss::ledger_index))
|
||||
{
|
||||
ledgerIndex = response[jss::ledger_index].asUInt();
|
||||
}
|
||||
if (response.isMember(jss::validated_ledgers))
|
||||
{
|
||||
setValidatedRange(
|
||||
response[jss::validated_ledgers].asString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ledgerIndex != 0)
|
||||
{
|
||||
JLOG(journal_.trace())
|
||||
<< __func__ << " : "
|
||||
<< "Pushing ledger sequence = " << ledgerIndex << " - "
|
||||
<< toString();
|
||||
networkValidatedLedgers_.push(ledgerIndex);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(journal_.error()) << "Exception in handleMessage : " << e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
class AsyncCallData
|
||||
{
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
|
||||
|
||||
org::xrpl::rpc::v1::GetLedgerDataRequest request_;
|
||||
std::unique_ptr<grpc::ClientContext> context_;
|
||||
|
||||
grpc::Status status_;
|
||||
|
||||
unsigned char nextPrefix_;
|
||||
|
||||
beast::Journal journal_;
|
||||
|
||||
public:
|
||||
AsyncCallData(
|
||||
uint256& marker,
|
||||
std::optional<uint256> nextMarker,
|
||||
uint32_t seq,
|
||||
beast::Journal& j)
|
||||
: journal_(j)
|
||||
{
|
||||
request_.mutable_ledger()->set_sequence(seq);
|
||||
if (marker.isNonZero())
|
||||
{
|
||||
request_.set_marker(marker.data(), marker.size());
|
||||
}
|
||||
request_.set_user("ETL");
|
||||
nextPrefix_ = 0x00;
|
||||
if (nextMarker)
|
||||
nextPrefix_ = nextMarker->data()[0];
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
|
||||
JLOG(journal_.debug())
|
||||
<< "Setting up AsyncCallData. marker = " << strHex(marker)
|
||||
<< " . prefix = " << strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = " << strHex(std::string(1, nextPrefix_));
|
||||
|
||||
assert(nextPrefix_ > prefix || nextPrefix_ == 0x00);
|
||||
|
||||
cur_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
|
||||
next_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
}
|
||||
|
||||
enum class CallStatus { MORE, DONE, ERRORED };
|
||||
CallStatus
|
||||
process(
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
||||
grpc::CompletionQueue& cq,
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>>& queue,
|
||||
bool abort = false)
|
||||
{
|
||||
JLOG(journal_.debug()) << "Processing calldata";
|
||||
if (abort)
|
||||
{
|
||||
JLOG(journal_.error()) << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
JLOG(journal_.debug()) << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code()
|
||||
<< " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
assert(false);
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
|
||||
bool more = true;
|
||||
|
||||
// if no marker returned, we are done
|
||||
if (cur_->marker().size() == 0)
|
||||
more = false;
|
||||
|
||||
// if returned marker is greater than our end, we are done
|
||||
unsigned char prefix = cur_->marker()[0];
|
||||
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
|
||||
more = false;
|
||||
|
||||
// if we are not done, make the next async call
|
||||
if (more)
|
||||
{
|
||||
request_.set_marker(std::move(cur_->marker()));
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
for (auto& obj : cur_->ledger_objects().objects())
|
||||
{
|
||||
auto key = uint256::fromVoidChecked(obj.key());
|
||||
if (!key)
|
||||
throw std::runtime_error("Received malformed object ID");
|
||||
|
||||
auto& data = obj.data();
|
||||
|
||||
SerialIter it{data.data(), data.size()};
|
||||
std::shared_ptr<SLE> sle = std::make_shared<SLE>(it, *key);
|
||||
|
||||
queue.push(sle);
|
||||
}
|
||||
|
||||
return more ? CallStatus::MORE : CallStatus::DONE;
|
||||
}
|
||||
|
||||
void
|
||||
call(
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
||||
grpc::CompletionQueue& cq)
|
||||
{
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<
|
||||
org::xrpl::rpc::v1::GetLedgerDataResponse>>
|
||||
rpc(stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
|
||||
|
||||
rpc->StartCall();
|
||||
|
||||
rpc->Finish(next_.get(), &status_, this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getMarkerPrefix()
|
||||
{
|
||||
if (next_->marker().size() == 0)
|
||||
return "";
|
||||
else
|
||||
return strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
};
|
||||
|
||||
bool
|
||||
ETLSource::loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>>& writeQueue)
|
||||
{
|
||||
if (!stub_)
|
||||
return false;
|
||||
|
||||
grpc::CompletionQueue cq;
|
||||
|
||||
void* tag;
|
||||
|
||||
bool ok = false;
|
||||
|
||||
std::vector<AsyncCallData> calls;
|
||||
std::vector<uint256> markers{getMarkers(etl_.getNumMarkers())};
|
||||
|
||||
for (size_t i = 0; i < markers.size(); ++i)
|
||||
{
|
||||
std::optional<uint256> nextMarker;
|
||||
if (i + 1 < markers.size())
|
||||
nextMarker = markers[i + 1];
|
||||
calls.emplace_back(markers[i], nextMarker, sequence, journal_);
|
||||
}
|
||||
|
||||
JLOG(journal_.debug()) << "Starting data download for ledger " << sequence
|
||||
<< ". Using source = " << toString();
|
||||
|
||||
for (auto& c : calls)
|
||||
c.call(stub_, cq);
|
||||
|
||||
size_t numFinished = 0;
|
||||
bool abort = false;
|
||||
while (numFinished < calls.size() && !etl_.isStopping() &&
|
||||
cq.Next(&tag, &ok))
|
||||
{
|
||||
assert(tag);
|
||||
|
||||
auto ptr = static_cast<AsyncCallData*>(tag);
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
JLOG(journal_.error()) << "loadInitialLedger - ok is false";
|
||||
return false;
|
||||
// handle cancelled
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.debug())
|
||||
<< "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
auto result = ptr->process(stub_, cq, writeQueue, abort);
|
||||
if (result != AsyncCallData::CallStatus::MORE)
|
||||
{
|
||||
numFinished++;
|
||||
JLOG(journal_.debug())
|
||||
<< "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
}
|
||||
if (result == AsyncCallData::CallStatus::ERRORED)
|
||||
{
|
||||
abort = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return !abort;
|
||||
}
|
||||
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
ETLSource::fetchLedger(uint32_t ledgerSequence, bool getObjects)
|
||||
{
|
||||
org::xrpl::rpc::v1::GetLedgerResponse response;
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
request.mutable_ledger()->set_sequence(ledgerSequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
request.set_get_objects(getObjects);
|
||||
request.set_user("ETL");
|
||||
grpc::Status status = stub_->GetLedger(&context, request, &response);
|
||||
if (status.ok() && !response.is_unlimited())
|
||||
{
|
||||
JLOG(journal_.warn()) << "ETLSource::fetchLedger - is_unlimited is "
|
||||
"false. Make sure secure_gateway is set "
|
||||
"correctly on the ETL source. source = "
|
||||
<< toString();
|
||||
assert(false);
|
||||
}
|
||||
return {status, std::move(response)};
|
||||
}
|
||||
|
||||
ETLLoadBalancer::ETLLoadBalancer(ReportingETL& etl)
|
||||
: etl_(etl)
|
||||
, journal_(etl_.getApplication().journal("ReportingETL::LoadBalancer"))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
ETLLoadBalancer::add(
|
||||
std::string& host,
|
||||
std::string& websocketPort,
|
||||
std::string& grpcPort)
|
||||
{
|
||||
std::unique_ptr<ETLSource> ptr =
|
||||
std::make_unique<ETLSource>(host, websocketPort, grpcPort, etl_);
|
||||
sources_.push_back(std::move(ptr));
|
||||
JLOG(journal_.info()) << __func__ << " : added etl source - "
|
||||
<< sources_.back()->toString();
|
||||
}
|
||||
|
||||
void
|
||||
ETLLoadBalancer::add(std::string& host, std::string& websocketPort)
|
||||
{
|
||||
std::unique_ptr<ETLSource> ptr =
|
||||
std::make_unique<ETLSource>(host, websocketPort, etl_);
|
||||
sources_.push_back(std::move(ptr));
|
||||
JLOG(journal_.info()) << __func__ << " : added etl source - "
|
||||
<< sources_.back()->toString();
|
||||
}
|
||||
|
||||
void
|
||||
ETLLoadBalancer::loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>>& writeQueue)
|
||||
{
|
||||
execute(
|
||||
[this, &sequence, &writeQueue](auto& source) {
|
||||
bool res = source->loadInitialLedger(sequence, writeQueue);
|
||||
if (!res)
|
||||
{
|
||||
JLOG(journal_.error()) << "Failed to download initial ledger. "
|
||||
<< " Sequence = " << sequence
|
||||
<< " source = " << source->toString();
|
||||
}
|
||||
return res;
|
||||
},
|
||||
sequence);
|
||||
}
|
||||
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
ETLLoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects)
|
||||
{
|
||||
org::xrpl::rpc::v1::GetLedgerResponse response;
|
||||
bool success = execute(
|
||||
[&response, ledgerSequence, getObjects, this](auto& source) {
|
||||
auto [status, data] =
|
||||
source->fetchLedger(ledgerSequence, getObjects);
|
||||
response = std::move(data);
|
||||
if (status.ok() && response.validated())
|
||||
{
|
||||
JLOG(journal_.info())
|
||||
<< "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< "Error getting ledger = " << ledgerSequence
|
||||
<< " Reply : " << response.DebugString()
|
||||
<< " error_code : " << status.error_code()
|
||||
<< " error_msg : " << status.error_message()
|
||||
<< " source = " << source->toString();
|
||||
return false;
|
||||
}
|
||||
},
|
||||
ledgerSequence);
|
||||
if (success)
|
||||
return response;
|
||||
else
|
||||
return {};
|
||||
}
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||
ETLLoadBalancer::getP2pForwardingStub() const
|
||||
{
|
||||
if (sources_.size() == 0)
|
||||
return nullptr;
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
while (numAttempts < sources_.size())
|
||||
{
|
||||
auto stub = sources_[sourceIdx]->getP2pForwardingStub();
|
||||
if (!stub)
|
||||
{
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
++numAttempts;
|
||||
continue;
|
||||
}
|
||||
return stub;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Json::Value
|
||||
ETLLoadBalancer::forwardToP2p(RPC::JsonContext& context) const
|
||||
{
|
||||
Json::Value res;
|
||||
if (sources_.size() == 0)
|
||||
return res;
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
|
||||
auto mostRecent = etl_.getNetworkValidatedLedgers().tryGetMostRecent();
|
||||
while (numAttempts < sources_.size())
|
||||
{
|
||||
auto increment = [&]() {
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
++numAttempts;
|
||||
};
|
||||
auto& src = sources_[sourceIdx];
|
||||
if (mostRecent && !src->hasLedger(*mostRecent))
|
||||
{
|
||||
increment();
|
||||
continue;
|
||||
}
|
||||
res = src->forwardToP2p(context);
|
||||
if (!res.isMember("forwarded") || res["forwarded"] != true)
|
||||
{
|
||||
increment();
|
||||
continue;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
RPC::Status err = {rpcFAILED_TO_FORWARD};
|
||||
err.inject(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||
ETLSource::getP2pForwardingStub() const
|
||||
{
|
||||
if (!connected_)
|
||||
return nullptr;
|
||||
try
|
||||
{
|
||||
return org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateChannel(
|
||||
beast::IP::Endpoint(
|
||||
boost::asio::ip::make_address(ip_), std::stoi(grpcPort_))
|
||||
.to_string(),
|
||||
grpc::InsecureChannelCredentials()));
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
JLOG(journal_.error()) << "Failed to create grpc stub";
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
Json::Value
|
||||
ETLSource::forwardToP2p(RPC::JsonContext& context) const
|
||||
{
|
||||
JLOG(journal_.debug()) << "Attempting to forward request to tx. "
|
||||
<< "request = " << context.params.toStyledString();
|
||||
|
||||
Json::Value response;
|
||||
if (!connected_)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Attempted to proxy but failed to connect to tx";
|
||||
return response;
|
||||
}
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
|
||||
namespace net = boost::asio; // from <boost/asio.hpp>
|
||||
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
|
||||
Json::Value& request = context.params;
|
||||
try
|
||||
{
|
||||
// The io_context is required for all I/O
|
||||
net::io_context ioc;
|
||||
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioc};
|
||||
|
||||
JLOG(journal_.debug()) << "Creating websocket";
|
||||
auto ws = std::make_unique<websocket::stream<tcp::socket>>(ioc);
|
||||
|
||||
// Look up the domain name
|
||||
auto const results = resolver.resolve(ip_, wsPort_);
|
||||
|
||||
JLOG(journal_.debug()) << "Connecting websocket";
|
||||
// Make the connection on the IP address we get from a lookup
|
||||
net::connect(ws->next_layer(), results.begin(), results.end());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
// and to tell rippled to charge the client IP for RPC
|
||||
// resources. See "secure_gateway" in
|
||||
// https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
|
||||
ws->set_option(websocket::stream_base::decorator(
|
||||
[&context](websocket::request_type& req) {
|
||||
req.set(
|
||||
http::field::user_agent,
|
||||
std::string(BOOST_BEAST_VERSION_STRING) +
|
||||
" websocket-client-coro");
|
||||
req.set(
|
||||
http::field::forwarded,
|
||||
"for=" + context.consumer.to_string());
|
||||
}));
|
||||
JLOG(journal_.debug()) << "client ip: " << context.consumer.to_string();
|
||||
|
||||
JLOG(journal_.debug()) << "Performing websocket handshake";
|
||||
// Perform the websocket handshake
|
||||
ws->handshake(ip_, "/");
|
||||
|
||||
Json::FastWriter fastWriter;
|
||||
|
||||
JLOG(journal_.debug()) << "Sending request";
|
||||
// Send the message
|
||||
ws->write(net::buffer(fastWriter.write(request)));
|
||||
|
||||
beast::flat_buffer buffer;
|
||||
ws->read(buffer);
|
||||
|
||||
Json::Reader reader;
|
||||
if (!reader.parse(
|
||||
static_cast<char const*>(buffer.data().data()), response))
|
||||
{
|
||||
JLOG(journal_.error()) << "Error parsing response";
|
||||
response[jss::error] = "Error parsing response from tx";
|
||||
}
|
||||
JLOG(journal_.debug()) << "Successfully forward request";
|
||||
|
||||
response["forwarded"] = true;
|
||||
return response;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(journal_.error()) << "Encountered exception : " << e.what();
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
template <class Func>
|
||||
bool
|
||||
ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
|
||||
while (!etl_.isStopping())
|
||||
{
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
JLOG(journal_.debug())
|
||||
<< __func__ << " : "
|
||||
<< "Attempting to execute func. ledger sequence = "
|
||||
<< ledgerSequence << " - source = " << source->toString();
|
||||
if (source->hasLedger(ledgerSequence))
|
||||
{
|
||||
bool res = f(source);
|
||||
if (res)
|
||||
{
|
||||
JLOG(journal_.debug())
|
||||
<< __func__ << " : "
|
||||
<< "Successfully executed func at source = "
|
||||
<< source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< __func__ << " : "
|
||||
<< "Failed to execute func at source = "
|
||||
<< source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< __func__ << " : "
|
||||
<< "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0)
|
||||
{
|
||||
// If another process loaded the ledger into the database, we can
|
||||
// abort trying to fetch the ledger from a transaction processing
|
||||
// process
|
||||
if (etl_.getApplication().getLedgerMaster().getLedgerBySeq(
|
||||
ledgerSequence))
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< __func__ << " : "
|
||||
<< "Error executing function. "
|
||||
<< " Tried all sources, but ledger was found in db."
|
||||
<< " Sequence = " << ledgerSequence;
|
||||
return false;
|
||||
}
|
||||
JLOG(journal_.error())
|
||||
<< __func__ << " : "
|
||||
<< "Error executing function "
|
||||
<< " - ledger sequence = " << ledgerSequence
|
||||
<< " - Tried all sources. Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
}
|
||||
}
|
||||
return !etl_.isStopping();
|
||||
}
|
||||
|
||||
void
|
||||
ETLLoadBalancer::start()
|
||||
{
|
||||
for (auto& source : sources_)
|
||||
source->start();
|
||||
}
|
||||
|
||||
void
|
||||
ETLLoadBalancer::stop()
|
||||
{
|
||||
for (auto& source : sources_)
|
||||
source->stop();
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
@@ -1,435 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/reporting/ETLHelpers.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class ReportingETL;
|
||||
|
||||
/// This class manages a connection to a single ETL source. This is almost
|
||||
/// always a p2p node, but really could be another reporting node. This class
|
||||
/// subscribes to the ledgers and transactions_proposed streams of the
|
||||
/// associated p2p node, and keeps track of which ledgers the p2p node has. This
|
||||
/// class also has methods for extracting said ledgers. Lastly this class
|
||||
/// forwards transactions received on the transactions_proposed streams to any
|
||||
/// subscribers.
|
||||
class ETLSource
|
||||
{
|
||||
std::string ip_;
|
||||
|
||||
std::string wsPort_;
|
||||
|
||||
std::string grpcPort_;
|
||||
|
||||
ReportingETL& etl_;
|
||||
|
||||
// a reference to the applications io_service
|
||||
boost::asio::io_context& ioc_;
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
|
||||
std::unique_ptr<boost::beast::websocket::stream<boost::beast::tcp_stream>>
|
||||
ws_;
|
||||
boost::asio::ip::tcp::resolver resolver_;
|
||||
|
||||
boost::beast::flat_buffer readBuffer_;
|
||||
|
||||
std::vector<std::pair<uint32_t, uint32_t>> validatedLedgers_;
|
||||
|
||||
std::string validatedLedgersRaw_;
|
||||
|
||||
NetworkValidatedLedgers& networkValidatedLedgers_;
|
||||
|
||||
beast::Journal journal_;
|
||||
|
||||
Application& app_;
|
||||
|
||||
mutable std::mutex mtx_;
|
||||
|
||||
size_t numFailures_ = 0;
|
||||
|
||||
std::atomic_bool closing_ = false;
|
||||
|
||||
std::atomic_bool connected_ = false;
|
||||
|
||||
// true if this ETL source is forwarding transactions received on the
|
||||
// transactions_proposed stream. There are usually multiple ETL sources,
|
||||
// so to avoid forwarding the same transaction multiple times, we only
|
||||
// forward from one particular ETL source at a time.
|
||||
std::atomic_bool forwardingStream_ = false;
|
||||
|
||||
// The last time a message was received on the ledgers stream
|
||||
std::chrono::system_clock::time_point lastMsgTime_;
|
||||
mutable std::mutex lastMsgTimeMtx_;
|
||||
|
||||
// used for retrying connections
|
||||
boost::asio::steady_timer timer_;
|
||||
|
||||
public:
|
||||
bool
|
||||
isConnected() const
|
||||
{
|
||||
return connected_;
|
||||
}
|
||||
|
||||
std::chrono::system_clock::time_point
|
||||
getLastMsgTime() const
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
return lastMsgTime_;
|
||||
}
|
||||
|
||||
void
|
||||
setLastMsgTime()
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
lastMsgTime_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
/// Create ETL source without gRPC endpoint
|
||||
/// Fetch ledger and load initial ledger will fail for this source
|
||||
/// Primarly used in read-only mode, to monitor when ledgers are validated
|
||||
ETLSource(std::string ip, std::string wsPort, ReportingETL& etl);
|
||||
|
||||
/// Create ETL source with gRPC endpoint
|
||||
ETLSource(
|
||||
std::string ip,
|
||||
std::string wsPort,
|
||||
std::string grpcPort,
|
||||
ReportingETL& etl);
|
||||
|
||||
/// @param sequence ledger sequence to check for
|
||||
/// @return true if this source has the desired ledger
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
for (auto& pair : validatedLedgers_)
|
||||
{
|
||||
if (sequence >= pair.first && sequence <= pair.second)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else if (sequence < pair.first)
|
||||
{
|
||||
// validatedLedgers_ is a sorted list of disjoint ranges
|
||||
// if the sequence comes before this range, the sequence will
|
||||
// come before all subsequent ranges
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// process the validated range received on the ledgers stream. set the
|
||||
/// appropriate member variable
|
||||
/// @param range validated range received on ledgers stream
|
||||
void
|
||||
setValidatedRange(std::string const& range)
|
||||
{
|
||||
std::vector<std::pair<uint32_t, uint32_t>> pairs;
|
||||
std::vector<std::string> ranges;
|
||||
boost::split(ranges, range, boost::is_any_of(","));
|
||||
for (auto& pair : ranges)
|
||||
{
|
||||
std::vector<std::string> minAndMax;
|
||||
|
||||
boost::split(minAndMax, pair, boost::is_any_of("-"));
|
||||
|
||||
if (minAndMax.size() == 1)
|
||||
{
|
||||
uint32_t sequence = std::stoll(minAndMax[0]);
|
||||
pairs.push_back(std::make_pair(sequence, sequence));
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(minAndMax.size() == 2);
|
||||
uint32_t min = std::stoll(minAndMax[0]);
|
||||
uint32_t max = std::stoll(minAndMax[1]);
|
||||
pairs.push_back(std::make_pair(min, max));
|
||||
}
|
||||
}
|
||||
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) {
|
||||
return left.first < right.first;
|
||||
});
|
||||
|
||||
// we only hold the lock here, to avoid blocking while string processing
|
||||
std::lock_guard lck(mtx_);
|
||||
validatedLedgers_ = std::move(pairs);
|
||||
validatedLedgersRaw_ = range;
|
||||
}
|
||||
|
||||
/// @return the validated range of this source
|
||||
/// @note this is only used by server_info
|
||||
std::string
|
||||
getValidatedRange() const
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
|
||||
return validatedLedgersRaw_;
|
||||
}
|
||||
|
||||
/// Close the underlying websocket
|
||||
void
|
||||
stop()
|
||||
{
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Closing websocket";
|
||||
|
||||
assert(ws_);
|
||||
close(false);
|
||||
}
|
||||
|
||||
/// Fetch the specified ledger
|
||||
/// @param ledgerSequence sequence of the ledger to fetch
|
||||
/// @getObjects whether to get the account state diff between this ledger
|
||||
/// and the prior one
|
||||
/// @return the extracted data and the result status
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects = true);
|
||||
|
||||
std::string
|
||||
toString() const
|
||||
{
|
||||
return "{ validated_ledger : " + getValidatedRange() +
|
||||
" , ip : " + ip_ + " , web socket port : " + wsPort_ +
|
||||
", grpc port : " + grpcPort_ + " }";
|
||||
}
|
||||
|
||||
Json::Value
|
||||
toJson() const
|
||||
{
|
||||
Json::Value result(Json::objectValue);
|
||||
result["connected"] = connected_.load();
|
||||
result["validated_ledgers_range"] = getValidatedRange();
|
||||
result["ip"] = ip_;
|
||||
result["websocket_port"] = wsPort_;
|
||||
result["grpc_port"] = grpcPort_;
|
||||
auto last = getLastMsgTime();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_message_arrival_time"] =
|
||||
to_string(std::chrono::floor<std::chrono::microseconds>(last));
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Download a ledger in full
|
||||
/// @param ledgerSequence sequence of the ledger to download
|
||||
/// @param writeQueue queue to push downloaded ledger objects
|
||||
/// @return true if the download was successful
|
||||
bool
|
||||
loadInitialLedger(
|
||||
uint32_t ledgerSequence,
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>>& writeQueue);
|
||||
|
||||
/// Begin sequence of operations to connect to the ETL source and subscribe
|
||||
/// to ledgers and transactions_proposed
|
||||
void
|
||||
start();
|
||||
|
||||
/// Attempt to reconnect to the ETL source
|
||||
void
|
||||
reconnect(boost::beast::error_code ec);
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onResolve(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type results);
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint);
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onHandshake(boost::beast::error_code ec);
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onWrite(boost::beast::error_code ec, size_t size);
|
||||
|
||||
/// Callback
|
||||
void
|
||||
onRead(boost::beast::error_code ec, size_t size);
|
||||
|
||||
/// Handle the most recently received message
|
||||
/// @return true if the message was handled successfully. false on error
|
||||
bool
|
||||
handleMessage();
|
||||
|
||||
/// Close the websocket
|
||||
/// @param startAgain whether to reconnect
|
||||
void
|
||||
close(bool startAgain);
|
||||
|
||||
/// Get grpc stub to forward requests to p2p node
|
||||
/// @return stub to send requests to ETL source
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||
getP2pForwardingStub() const;
|
||||
|
||||
/// Forward a JSON RPC request to a p2p node
|
||||
/// @param context context of RPC request
|
||||
/// @return response received from ETL source
|
||||
Json::Value
|
||||
forwardToP2p(RPC::JsonContext& context) const;
|
||||
};
|
||||
|
||||
/// This class is used to manage connections to transaction processing processes
|
||||
/// This class spawns a listener for each etl source, which listens to messages
|
||||
/// on the ledgers stream (to keep track of which ledgers have been validated by
|
||||
/// the network, and the range of ledgers each etl source has). This class also
|
||||
/// allows requests for ledger data to be load balanced across all possible etl
|
||||
/// sources.
|
||||
class ETLLoadBalancer
|
||||
{
|
||||
private:
|
||||
ReportingETL& etl_;
|
||||
|
||||
beast::Journal journal_;
|
||||
|
||||
std::vector<std::unique_ptr<ETLSource>> sources_;
|
||||
|
||||
public:
|
||||
ETLLoadBalancer(ReportingETL& etl);
|
||||
|
||||
/// Add an ETL source
|
||||
/// @param host host or ip of ETL source
|
||||
/// @param websocketPort port where ETL source accepts websocket connections
|
||||
/// @param grpcPort port where ETL source accepts gRPC requests
|
||||
void
|
||||
add(std::string& host, std::string& websocketPort, std::string& grpcPort);
|
||||
|
||||
/// Add an ETL source without gRPC support. This source will send messages
|
||||
/// on the ledgers and transactions_proposed streams, but will not be able
|
||||
/// to handle the gRPC requests that are used for ETL
|
||||
/// @param host host or ip of ETL source
|
||||
/// @param websocketPort port where ETL source accepts websocket connections
|
||||
void
|
||||
add(std::string& host, std::string& websocketPort);
|
||||
|
||||
/// Load the initial ledger, writing data to the queue
|
||||
/// @param sequence sequence of ledger to download
|
||||
/// @param writeQueue queue to push downloaded data to
|
||||
void
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>>& writeQueue);
|
||||
|
||||
/// Fetch data for a specific ledger. This function will continuously try
|
||||
/// to fetch data for the specified ledger until the fetch succeeds, the
|
||||
/// ledger is found in the database, or the server is shutting down.
|
||||
/// @param ledgerSequence sequence of ledger to fetch data for
|
||||
/// @param getObjects if true, fetch diff between specified ledger and
|
||||
/// previous
|
||||
/// @return the extracted data, if extraction was successful. If the ledger
|
||||
/// was found in the database or the server is shutting down, the optional
|
||||
/// will be empty
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects);
|
||||
|
||||
/// Setup all of the ETL sources and subscribe to the necessary streams
|
||||
void
|
||||
start();
|
||||
|
||||
void
|
||||
stop();
|
||||
|
||||
/// Determine whether messages received on the transactions_proposed stream
|
||||
/// should be forwarded to subscribing clients. The server subscribes to
|
||||
/// transactions_proposed, validations, and manifests on multiple
|
||||
/// ETLSources, yet only forwards messages from one source at any given time
|
||||
/// (to avoid sending duplicate messages to clients).
|
||||
/// @param in ETLSource in question
|
||||
/// @return true if messages should be forwarded
|
||||
bool
|
||||
shouldPropagateStream(ETLSource* in) const
|
||||
{
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
assert(src);
|
||||
// We pick the first ETLSource encountered that is connected
|
||||
if (src->isConnected())
|
||||
{
|
||||
if (src.get() == in)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If no sources connected, then this stream has not been forwarded.
|
||||
return true;
|
||||
}
|
||||
|
||||
Json::Value
|
||||
toJson() const
|
||||
{
|
||||
Json::Value ret(Json::arrayValue);
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
ret.append(src->toJson());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Randomly select a p2p node to forward a gRPC request to
|
||||
/// @return gRPC stub to forward requests to p2p node
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||
getP2pForwardingStub() const;
|
||||
|
||||
/// Forward a JSON RPC request to a randomly selected p2p node
|
||||
/// @param context context of the request
|
||||
/// @return response received from p2p node
|
||||
Json::Value
|
||||
forwardToP2p(RPC::JsonContext& context) const;
|
||||
|
||||
private:
|
||||
/// f is a function that takes an ETLSource as an argument and returns a
|
||||
/// bool. Attempt to execute f for one randomly chosen ETLSource that has
|
||||
/// the specified ledger. If f returns false, another randomly chosen
|
||||
/// ETLSource is used. The process repeats until f returns true.
|
||||
/// @param f function to execute. This function takes the ETL source as an
|
||||
/// argument, and returns a bool.
|
||||
/// @param ledgerSequence f is executed for each ETLSource that has this
|
||||
/// ledger
|
||||
/// @return true if f was eventually executed successfully. false if the
|
||||
/// ledger was found in the database or the server is shutting down
|
||||
template <class Func>
|
||||
bool
|
||||
execute(Func f, uint32_t ledgerSequence);
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -1,84 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/reporting/P2pProxy.h>
|
||||
#include <xrpld/app/reporting/ReportingETL.h>
|
||||
#include <xrpl/json/json_reader.h>
|
||||
#include <xrpl/json/json_writer.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
Json::Value
|
||||
forwardToP2p(RPC::JsonContext& context)
|
||||
{
|
||||
return context.app.getReportingETL().getETLLoadBalancer().forwardToP2p(
|
||||
context);
|
||||
}
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||
getP2pForwardingStub(RPC::Context& context)
|
||||
{
|
||||
return context.app.getReportingETL()
|
||||
.getETLLoadBalancer()
|
||||
.getP2pForwardingStub();
|
||||
}
|
||||
|
||||
// We only forward requests where ledger_index is "current" or "closed"
|
||||
// otherwise, attempt to handle here
|
||||
bool
|
||||
shouldForwardToP2p(RPC::JsonContext& context)
|
||||
{
|
||||
if (!context.app.config().reporting())
|
||||
return false;
|
||||
|
||||
Json::Value& params = context.params;
|
||||
std::string strCommand = params.isMember(jss::command)
|
||||
? params[jss::command].asString()
|
||||
: params[jss::method].asString();
|
||||
|
||||
JLOG(context.j.trace()) << "COMMAND:" << strCommand;
|
||||
JLOG(context.j.trace()) << "REQUEST:" << params;
|
||||
auto handler = RPC::getHandler(
|
||||
context.apiVersion, context.app.config().BETA_RPC_API, strCommand);
|
||||
if (!handler)
|
||||
{
|
||||
JLOG(context.j.error())
|
||||
<< "Error getting handler. command = " << strCommand;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (handler->condition_ == RPC::NEEDS_CURRENT_LEDGER ||
|
||||
handler->condition_ == RPC::NEEDS_CLOSED_LEDGER)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if (params.isMember(jss::ledger_index))
|
||||
{
|
||||
auto indexValue = params[jss::ledger_index];
|
||||
if (indexValue.isString())
|
||||
{
|
||||
auto index = indexValue.asString();
|
||||
return index == "current" || index == "closed";
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
@@ -1,113 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_REPORTING_P2PPROXY_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_P2PPROXY_H_INCLUDED
|
||||
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/detail/Handler.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
namespace ripple {
|
||||
/// Forward a JSON request to a p2p node and return the response
|
||||
/// @param context context of the request
|
||||
/// @return response from p2p node
|
||||
Json::Value
|
||||
forwardToP2p(RPC::JsonContext& context);
|
||||
|
||||
/// Whether a request should be forwarded, based on request parameters
|
||||
/// @param context context of the request
|
||||
/// @return true if should be forwarded
|
||||
bool
|
||||
shouldForwardToP2p(RPC::JsonContext& context);
|
||||
|
||||
template <class Request>
|
||||
bool
|
||||
needCurrentOrClosed(Request& request)
|
||||
{
|
||||
// These are the only gRPC requests that specify a ledger
|
||||
if constexpr (
|
||||
std::is_same<Request, org::xrpl::rpc::v1::GetLedgerRequest>::value ||
|
||||
std::is_same<Request, org::xrpl::rpc::v1::GetLedgerDataRequest>::
|
||||
value ||
|
||||
std::is_same<Request, org::xrpl::rpc::v1::GetLedgerEntryRequest>::value)
|
||||
{
|
||||
if (request.ledger().ledger_case() ==
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::LedgerCase::kShortcut)
|
||||
{
|
||||
if (request.ledger().shortcut() !=
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED &&
|
||||
request.ledger().shortcut() !=
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// GetLedgerDiff specifies two ledgers
|
||||
else if constexpr (std::is_same<
|
||||
Request,
|
||||
org::xrpl::rpc::v1::GetLedgerDiffRequest>::value)
|
||||
{
|
||||
auto help = [](auto specifier) {
|
||||
if (specifier.ledger_case() ==
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::LedgerCase::kShortcut)
|
||||
{
|
||||
if (specifier.shortcut() !=
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::
|
||||
SHORTCUT_VALIDATED &&
|
||||
specifier.shortcut() !=
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::
|
||||
SHORTCUT_UNSPECIFIED)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
return help(request.base_ledger()) || help(request.desired_ledger());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Whether a request should be forwarded, based on request parameters
|
||||
/// @param context context of the request
|
||||
/// @condition required condition for the request
|
||||
/// @return true if should be forwarded
|
||||
template <class Request>
|
||||
bool
|
||||
shouldForwardToP2p(RPC::GRPCContext<Request>& context, RPC::Condition condition)
|
||||
{
|
||||
if (!context.app.config().reporting())
|
||||
return false;
|
||||
if (condition == RPC::NEEDS_CURRENT_LEDGER ||
|
||||
condition == RPC::NEEDS_CLOSED_LEDGER)
|
||||
return true;
|
||||
|
||||
return needCurrentOrClosed(context.params);
|
||||
}
|
||||
|
||||
/// Get stub used to forward gRPC requests to a p2p node
|
||||
/// @param context context of the request
|
||||
/// @return stub to forward requests
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>
|
||||
getP2pForwardingStub(RPC::Context& context);
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -1,108 +0,0 @@
|
||||
Reporting mode is a special operating mode of rippled, designed to handle RPCs
|
||||
for validated data. A server running in reporting mode does not connect to the
|
||||
p2p network, but rather extracts validated data from a node that is connected
|
||||
to the p2p network. To run rippled in reporting mode, you must also run a
|
||||
separate rippled node in p2p mode, to use as an ETL source. Multiple reporting
|
||||
nodes can share access to the same network accessible databases (Postgres and
|
||||
Cassandra); at any given time, only one reporting node will be performing ETL
|
||||
and writing to the databases, while the others simply read from the databases.
|
||||
A server running in reporting mode will forward any requests that require access
|
||||
to the p2p network to a p2p node.
|
||||
|
||||
# Reporting ETL
|
||||
A single reporting node has one or more ETL sources, specified in the config
|
||||
file. A reporting node will subscribe to the "ledgers" stream of each of the ETL
|
||||
sources. This stream sends a message whenever a new ledger is validated. Upon
|
||||
receiving a message on the stream, reporting will then fetch the data associated
|
||||
with the newly validated ledger from one of the ETL sources. The fetch is
|
||||
performed via a gRPC request ("GetLedger"). This request returns the ledger
|
||||
header, transactions+metadata blobs, and every ledger object
|
||||
added/modified/deleted as part of this ledger. ETL then writes all of this data
|
||||
to the databases, and moves on to the next ledger. ETL does not apply
|
||||
transactions, but rather extracts the already computed results of those
|
||||
transactions (all of the added/modified/deleted SHAMap leaf nodes of the state
|
||||
tree). The new SHAMap inner nodes are computed by the ETL writer; this computation mainly
|
||||
involves manipulating child pointers and recomputing hashes, logic which is
|
||||
buried inside of SHAMap.
|
||||
|
||||
If the database is entirely empty, ETL must download an entire ledger in full
|
||||
(as opposed to just the diff, as described above). This download is done via the
|
||||
"GetLedgerData" gRPC request. "GetLedgerData" allows clients to page through an
|
||||
entire ledger over several RPC calls. ETL will page through an entire ledger,
|
||||
and write each object to the database.
|
||||
|
||||
If the database is not empty, the reporting node will first come up in a "soft"
|
||||
read-only mode. In read-only mode, the server does not perform ETL and simply
|
||||
publishes new ledgers as they are written to the database.
|
||||
If the database is not updated within a certain time period
|
||||
(currently hard coded at 20 seconds), the reporting node will begin the ETL
|
||||
process and start writing to the database. Postgres will report an error when
|
||||
trying to write a record with a key that already exists. ETL uses this error to
|
||||
determine that another process is writing to the database, and subsequently
|
||||
falls back to a soft read-only mode. Reporting nodes can also operate in strict
|
||||
read-only mode, in which case they will never write to the database.
|
||||
|
||||
# Database Nuances
|
||||
The database schema for reporting mode does not allow any history gaps.
|
||||
Attempting to write a ledger to a non-empty database where the previous ledger
|
||||
does not exist will return an error.
|
||||
|
||||
The databases must be set up prior to running reporting mode. This requires
|
||||
creating the Postgres database, and setting up the Cassandra keyspace. Reporting
|
||||
mode will create the objects table in Cassandra if the table does not yet exist.
|
||||
|
||||
Creating the Postgres database:
|
||||
```
|
||||
$ psql -h [host] -U [user]
|
||||
postgres=# create database [database];
|
||||
```
|
||||
Creating the keyspace:
|
||||
```
|
||||
$ cqlsh [host] [port]
|
||||
> CREATE KEYSPACE rippled WITH REPLICATION =
|
||||
{'class' : 'SimpleStrategy', 'replication_factor' : 3 };
|
||||
```
|
||||
A replication factor of 3 is recommended. However, when running locally, only a
|
||||
replication factor of 1 is supported.
|
||||
|
||||
Online delete is not supported by reporting mode and must be done manually. The
|
||||
easiest way to do this would be to setup a second Cassandra keyspace and
|
||||
Postgres database, bring up a single reporting mode instance that uses those
|
||||
databases, and start ETL at a ledger of your choosing (via --startReporting on
|
||||
the command line). Once this node is caught up, the other databases can be
|
||||
deleted.
|
||||
|
||||
To delete:
|
||||
```
|
||||
$ psql -h [host] -U [user] -d [database]
|
||||
reporting=$ truncate table ledgers cascade;
|
||||
```
|
||||
```
|
||||
$ cqlsh [host] [port]
|
||||
> truncate table objects;
|
||||
```
|
||||
# Proxy
|
||||
RPCs that require access to the p2p network and/or the open ledger are forwarded
|
||||
from the reporting node to one of the ETL sources. The request is not processed
|
||||
prior to forwarding, and the response is delivered as-is to the client.
|
||||
Reporting will forward any requests that always require p2p/open ledger access
|
||||
(fee and submit, for instance). In addition, any request that explicitly
|
||||
requests data from the open or closed ledger (via setting
|
||||
"ledger_index":"current" or "ledger_index":"closed"), will be forwarded to a
|
||||
p2p node.
|
||||
|
||||
For the stream "transactions_proposed" (AKA "rt_transactions"), reporting
|
||||
subscribes to the "transactions_proposed" streams of each ETL source, and then
|
||||
forwards those messages to any clients subscribed to the same stream on the
|
||||
reporting node. A reporting node will subscribe to the stream on each ETL
|
||||
source, but will only forward the messages from one of the streams at any given
|
||||
time (to avoid sending the same message more than once to the same client).
|
||||
|
||||
# API changes
|
||||
A reporting node defaults to only returning validated data. If a ledger is not
|
||||
specified, the most recently validated ledger is used. This is in contrast to
|
||||
the normal rippled behavior, where the open ledger is used by default.
|
||||
|
||||
Reporting will reject all subscribe requests for streams "server", "manifests",
|
||||
"validations", "peer_status" and "consensus".
|
||||
|
||||
@@ -1,958 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <xrpld/app/reporting/ReportingETL.h>
|
||||
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
#include <xrpl/json/json_reader.h>
|
||||
#include <xrpl/json/json_writer.h>
|
||||
#include <boost/asio/connect.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <cctype>
|
||||
#include <charconv>
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace detail {
|
||||
/// Convenience function for printing out basic ledger info
|
||||
std::string
|
||||
toString(LedgerInfo const& info)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "LedgerInfo { Sequence : " << info.seq
|
||||
<< " Hash : " << strHex(info.hash) << " TxHash : " << strHex(info.txHash)
|
||||
<< " AccountHash : " << strHex(info.accountHash)
|
||||
<< " ParentHash : " << strHex(info.parentHash) << " }";
|
||||
return ss.str();
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
void
|
||||
ReportingETL::consumeLedgerData(
|
||||
std::shared_ptr<Ledger>& ledger,
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>>& writeQueue)
|
||||
{
|
||||
std::shared_ptr<SLE> sle;
|
||||
size_t num = 0;
|
||||
while (!stopping_ && (sle = writeQueue.pop()))
|
||||
{
|
||||
assert(sle);
|
||||
if (!ledger->exists(sle->key()))
|
||||
ledger->rawInsert(sle);
|
||||
|
||||
if (flushInterval_ != 0 && (num % flushInterval_) == 0)
|
||||
{
|
||||
JLOG(journal_.debug()) << "Flushing! key = " << strHex(sle->key());
|
||||
ledger->stateMap().flushDirty(hotACCOUNT_NODE);
|
||||
}
|
||||
++num;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<AccountTransactionsData>
|
||||
ReportingETL::insertTransactions(
|
||||
std::shared_ptr<Ledger>& ledger,
|
||||
org::xrpl::rpc::v1::GetLedgerResponse& data)
|
||||
{
|
||||
std::vector<AccountTransactionsData> accountTxData;
|
||||
for (auto& txn : data.transactions_list().transactions())
|
||||
{
|
||||
auto& raw = txn.transaction_blob();
|
||||
|
||||
SerialIter it{raw.data(), raw.size()};
|
||||
STTx sttx{it};
|
||||
|
||||
auto txSerializer = std::make_shared<Serializer>(sttx.getSerializer());
|
||||
|
||||
TxMeta txMeta{
|
||||
sttx.getTransactionID(), ledger->info().seq, txn.metadata_blob()};
|
||||
|
||||
auto metaSerializer =
|
||||
std::make_shared<Serializer>(txMeta.getAsObject().getSerializer());
|
||||
|
||||
JLOG(journal_.trace())
|
||||
<< __func__ << " : "
|
||||
<< "Inserting transaction = " << sttx.getTransactionID();
|
||||
uint256 nodestoreHash = ledger->rawTxInsertWithHash(
|
||||
sttx.getTransactionID(), txSerializer, metaSerializer);
|
||||
accountTxData.emplace_back(txMeta, std::move(nodestoreHash), journal_);
|
||||
}
|
||||
return accountTxData;
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
ReportingETL::loadInitialLedger(uint32_t startingSequence)
|
||||
{
|
||||
// check that database is actually empty
|
||||
auto ledger = std::const_pointer_cast<Ledger>(
|
||||
app_.getLedgerMaster().getValidatedLedger());
|
||||
if (ledger)
|
||||
{
|
||||
JLOG(journal_.fatal()) << __func__ << " : "
|
||||
<< "Database is not empty";
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
|
||||
// fetch the ledger from the network. This function will not return until
|
||||
// either the fetch is successful, or the server is being shutdown. This
|
||||
// only fetches the ledger header and the transactions+metadata
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> ledgerData{
|
||||
fetchLedgerData(startingSequence)};
|
||||
if (!ledgerData)
|
||||
return {};
|
||||
|
||||
LedgerInfo lgrInfo =
|
||||
deserializeHeader(makeSlice(ledgerData->ledger_header()), true);
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Deserialized ledger header. "
|
||||
<< detail::toString(lgrInfo);
|
||||
|
||||
ledger =
|
||||
std::make_shared<Ledger>(lgrInfo, app_.config(), app_.getNodeFamily());
|
||||
ledger->stateMap().clearSynching();
|
||||
ledger->txMap().clearSynching();
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
std::vector<AccountTransactionsData> accountTxData =
|
||||
insertTransactions(ledger, *ledgerData);
|
||||
#endif
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>> writeQueue;
|
||||
std::thread asyncWriter{[this, &ledger, &writeQueue]() {
|
||||
consumeLedgerData(ledger, writeQueue);
|
||||
}};
|
||||
|
||||
// download the full account state map. This function downloads full ledger
|
||||
// data and pushes the downloaded data into the writeQueue. asyncWriter
|
||||
// consumes from the queue and inserts the data into the Ledger object.
|
||||
// Once the below call returns, all data has been pushed into the queue
|
||||
loadBalancer_.loadInitialLedger(startingSequence, writeQueue);
|
||||
|
||||
// null is used to represent the end of the queue
|
||||
std::shared_ptr<SLE> null;
|
||||
writeQueue.push(null);
|
||||
// wait for the writer to finish
|
||||
asyncWriter.join();
|
||||
|
||||
if (!stopping_)
|
||||
{
|
||||
flushLedger(ledger);
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
#ifdef RIPPLED_REPORTING
|
||||
dynamic_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->writeLedgerAndTransactions(ledger->info(), accountTxData);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
JLOG(journal_.debug()) << "Time to download and store ledger = "
|
||||
<< ((end - start).count()) / 1000000000.0;
|
||||
return ledger;
|
||||
}
|
||||
|
||||
void
|
||||
ReportingETL::flushLedger(std::shared_ptr<Ledger>& ledger)
|
||||
{
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Flushing ledger. "
|
||||
<< detail::toString(ledger->info());
|
||||
// These are recomputed in setImmutable
|
||||
auto& accountHash = ledger->info().accountHash;
|
||||
auto& txHash = ledger->info().txHash;
|
||||
auto& ledgerHash = ledger->info().hash;
|
||||
|
||||
assert(ledger->read(keylet::fees()));
|
||||
ledger->setImmutable(false);
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
auto numFlushed = ledger->stateMap().flushDirty(hotACCOUNT_NODE);
|
||||
|
||||
auto numTxFlushed = ledger->txMap().flushDirty(hotTRANSACTION_NODE);
|
||||
|
||||
{
|
||||
Serializer s(128);
|
||||
s.add32(HashPrefix::ledgerMaster);
|
||||
addRaw(ledger->info(), s);
|
||||
app_.getNodeStore().store(
|
||||
hotLEDGER,
|
||||
std::move(s.modData()),
|
||||
ledger->info().hash,
|
||||
ledger->info().seq);
|
||||
}
|
||||
|
||||
app_.getNodeStore().sync();
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Flushed " << numFlushed
|
||||
<< " nodes to nodestore from stateMap";
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Flushed " << numTxFlushed
|
||||
<< " nodes to nodestore from txMap";
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Flush took "
|
||||
<< (end - start).count() / 1000000000.0
|
||||
<< " seconds";
|
||||
|
||||
if (numFlushed == 0)
|
||||
{
|
||||
JLOG(journal_.fatal()) << __func__ << " : "
|
||||
<< "Flushed 0 nodes from state map";
|
||||
assert(false);
|
||||
}
|
||||
if (numTxFlushed == 0)
|
||||
{
|
||||
JLOG(journal_.warn()) << __func__ << " : "
|
||||
<< "Flushed 0 nodes from tx map";
|
||||
}
|
||||
|
||||
// Make sure calculated hashes are correct
|
||||
if (ledger->stateMap().getHash().as_uint256() != accountHash)
|
||||
{
|
||||
JLOG(journal_.fatal())
|
||||
<< __func__ << " : "
|
||||
<< "State map hash does not match. "
|
||||
<< "Expected hash = " << strHex(accountHash) << "Actual hash = "
|
||||
<< strHex(ledger->stateMap().getHash().as_uint256());
|
||||
Throw<std::runtime_error>("state map hash mismatch");
|
||||
}
|
||||
|
||||
if (ledger->txMap().getHash().as_uint256() != txHash)
|
||||
{
|
||||
JLOG(journal_.fatal())
|
||||
<< __func__ << " : "
|
||||
<< "Tx map hash does not match. "
|
||||
<< "Expected hash = " << strHex(txHash) << "Actual hash = "
|
||||
<< strHex(ledger->txMap().getHash().as_uint256());
|
||||
Throw<std::runtime_error>("tx map hash mismatch");
|
||||
}
|
||||
|
||||
if (ledger->info().hash != ledgerHash)
|
||||
{
|
||||
JLOG(journal_.fatal())
|
||||
<< __func__ << " : "
|
||||
<< "Ledger hash does not match. "
|
||||
<< "Expected hash = " << strHex(ledgerHash)
|
||||
<< "Actual hash = " << strHex(ledger->info().hash);
|
||||
Throw<std::runtime_error>("ledger hash mismatch");
|
||||
}
|
||||
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Successfully flushed ledger! "
|
||||
<< detail::toString(ledger->info());
|
||||
}
|
||||
|
||||
void
|
||||
ReportingETL::publishLedger(std::shared_ptr<Ledger>& ledger)
|
||||
{
|
||||
app_.getOPs().pubLedger(ledger);
|
||||
|
||||
setLastPublish();
|
||||
}
|
||||
|
||||
bool
|
||||
ReportingETL::publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts)
|
||||
{
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Attempting to publish ledger = "
|
||||
<< ledgerSequence;
|
||||
size_t numAttempts = 0;
|
||||
while (!stopping_)
|
||||
{
|
||||
auto ledger = app_.getLedgerMaster().getLedgerBySeq(ledgerSequence);
|
||||
|
||||
if (!ledger)
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< __func__ << " : "
|
||||
<< "Trying to publish. Could not find ledger with sequence = "
|
||||
<< ledgerSequence;
|
||||
// We try maxAttempts times to publish the ledger, waiting one
|
||||
// second in between each attempt.
|
||||
// If the ledger is not present in the database after maxAttempts,
|
||||
// we attempt to take over as the writer. If the takeover fails,
|
||||
// doContinuousETL will return, and this node will go back to
|
||||
// publishing.
|
||||
// If the node is in strict read only mode, we simply
|
||||
// skip publishing this ledger and return false indicating the
|
||||
// publish failed
|
||||
if (numAttempts >= maxAttempts)
|
||||
{
|
||||
JLOG(journal_.error()) << __func__ << " : "
|
||||
<< "Failed to publish ledger after "
|
||||
<< numAttempts << " attempts.";
|
||||
if (!readOnly_)
|
||||
{
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Attempting to become ETL writer";
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.debug())
|
||||
<< __func__ << " : "
|
||||
<< "In strict read-only mode. "
|
||||
<< "Skipping publishing this ledger. "
|
||||
<< "Beginning fast forward.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
++numAttempts;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
publishStrand_.post([this, ledger, fname = __func__]() {
|
||||
app_.getOPs().pubLedger(ledger);
|
||||
setLastPublish();
|
||||
JLOG(journal_.info())
|
||||
<< fname << " : "
|
||||
<< "Published ledger. " << detail::toString(ledger->info());
|
||||
});
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
ReportingETL::fetchLedgerData(uint32_t idx)
|
||||
{
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Attempting to fetch ledger with sequence = "
|
||||
<< idx;
|
||||
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
|
||||
loadBalancer_.fetchLedger(idx, false);
|
||||
JLOG(journal_.trace()) << __func__ << " : "
|
||||
<< "GetLedger reply = " << response->DebugString();
|
||||
return response;
|
||||
}
|
||||
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
ReportingETL::fetchLedgerDataAndDiff(uint32_t idx)
|
||||
{
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Attempting to fetch ledger with sequence = "
|
||||
<< idx;
|
||||
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> response =
|
||||
loadBalancer_.fetchLedger(idx, true);
|
||||
JLOG(journal_.trace()) << __func__ << " : "
|
||||
<< "GetLedger reply = " << response->DebugString();
|
||||
return response;
|
||||
}
|
||||
|
||||
std::pair<std::shared_ptr<Ledger>, std::vector<AccountTransactionsData>>
|
||||
ReportingETL::buildNextLedger(
|
||||
std::shared_ptr<Ledger>& next,
|
||||
org::xrpl::rpc::v1::GetLedgerResponse& rawData)
|
||||
{
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Beginning ledger update";
|
||||
|
||||
LedgerInfo lgrInfo =
|
||||
deserializeHeader(makeSlice(rawData.ledger_header()), true);
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Deserialized ledger header. "
|
||||
<< detail::toString(lgrInfo);
|
||||
|
||||
next->setLedgerInfo(lgrInfo);
|
||||
|
||||
next->stateMap().clearSynching();
|
||||
next->txMap().clearSynching();
|
||||
|
||||
std::vector<AccountTransactionsData> accountTxData{
|
||||
insertTransactions(next, rawData)};
|
||||
|
||||
JLOG(journal_.debug())
|
||||
<< __func__ << " : "
|
||||
<< "Inserted all transactions. Number of transactions = "
|
||||
<< rawData.transactions_list().transactions_size();
|
||||
|
||||
for (auto& obj : rawData.ledger_objects().objects())
|
||||
{
|
||||
auto key = uint256::fromVoidChecked(obj.key());
|
||||
if (!key)
|
||||
throw std::runtime_error("Recevied malformed object ID");
|
||||
|
||||
auto& data = obj.data();
|
||||
|
||||
// indicates object was deleted
|
||||
if (data.size() == 0)
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : "
|
||||
<< "Erasing object = " << *key;
|
||||
if (next->exists(*key))
|
||||
next->rawErase(*key);
|
||||
}
|
||||
else
|
||||
{
|
||||
SerialIter it{data.data(), data.size()};
|
||||
std::shared_ptr<SLE> sle = std::make_shared<SLE>(it, *key);
|
||||
|
||||
if (next->exists(*key))
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : "
|
||||
<< "Replacing object = " << *key;
|
||||
next->rawReplace(sle);
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.trace()) << __func__ << " : "
|
||||
<< "Inserting object = " << *key;
|
||||
next->rawInsert(sle);
|
||||
}
|
||||
}
|
||||
}
|
||||
JLOG(journal_.debug())
|
||||
<< __func__ << " : "
|
||||
<< "Inserted/modified/deleted all objects. Number of objects = "
|
||||
<< rawData.ledger_objects().objects_size();
|
||||
|
||||
if (!rawData.skiplist_included())
|
||||
{
|
||||
next->updateSkipList();
|
||||
JLOG(journal_.warn())
|
||||
<< __func__ << " : "
|
||||
<< "tx process is not sending skiplist. This indicates that the tx "
|
||||
"process is parsing metadata instead of doing a SHAMap diff. "
|
||||
"Make sure tx process is running the same code as reporting to "
|
||||
"use SHAMap diff instead of parsing metadata";
|
||||
}
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Finished ledger update. "
|
||||
<< detail::toString(next->info());
|
||||
return {std::move(next), std::move(accountTxData)};
|
||||
}
|
||||
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
ReportingETL::runETLPipeline(uint32_t startSequence)
|
||||
{
|
||||
/*
|
||||
* Behold, mortals! This function spawns three separate threads, which talk
|
||||
* to each other via 2 different thread safe queues and 1 atomic variable.
|
||||
* All threads and queues are function local. This function returns when all
|
||||
* of the threads exit. There are two termination conditions: the first is
|
||||
* if the load thread encounters a write conflict. In this case, the load
|
||||
* thread sets writeConflict, an atomic bool, to true, which signals the
|
||||
* other threads to stop. The second termination condition is when the
|
||||
* entire server is shutting down, which is detected in one of three ways:
|
||||
* 1. isStopping() returns true if the server is shutting down
|
||||
* 2. networkValidatedLedgers_.waitUntilValidatedByNetwork returns
|
||||
* false, signaling the wait was aborted.
|
||||
* 3. fetchLedgerDataAndDiff returns an empty optional, signaling the fetch
|
||||
* was aborted.
|
||||
* In all cases, the extract thread detects this condition,
|
||||
* and pushes an empty optional onto the transform queue. The transform
|
||||
* thread, upon popping an empty optional, pushes an empty optional onto the
|
||||
* load queue, and then returns. The load thread, upon popping an empty
|
||||
* optional, returns.
|
||||
*/
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Starting etl pipeline";
|
||||
writing_ = true;
|
||||
|
||||
std::shared_ptr<Ledger> parent = std::const_pointer_cast<Ledger>(
|
||||
app_.getLedgerMaster().getLedgerBySeq(startSequence - 1));
|
||||
if (!parent)
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>("runETLPipeline: parent ledger is null");
|
||||
}
|
||||
|
||||
std::atomic_bool writeConflict = false;
|
||||
std::optional<uint32_t> lastPublishedSequence;
|
||||
constexpr uint32_t maxQueueSize = 1000;
|
||||
|
||||
ThreadSafeQueue<std::optional<org::xrpl::rpc::v1::GetLedgerResponse>>
|
||||
transformQueue{maxQueueSize};
|
||||
|
||||
std::thread extracter{[this,
|
||||
&startSequence,
|
||||
&writeConflict,
|
||||
&transformQueue]() {
|
||||
beast::setCurrentThreadName("rippled: ReportingETL extract");
|
||||
uint32_t currentSequence = startSequence;
|
||||
|
||||
// there are two stopping conditions here.
|
||||
// First, if there is a write conflict in the load thread, the ETL
|
||||
// mechanism should stop.
|
||||
// The other stopping condition is if the entire server is shutting
|
||||
// down. This can be detected in a variety of ways. See the comment
|
||||
// at the top of the function
|
||||
while (networkValidatedLedgers_.waitUntilValidatedByNetwork(
|
||||
currentSequence) &&
|
||||
!writeConflict && !isStopping())
|
||||
{
|
||||
auto start = std::chrono::system_clock::now();
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{
|
||||
fetchLedgerDataAndDiff(currentSequence)};
|
||||
// if the fetch is unsuccessful, stop. fetchLedger only returns
|
||||
// false if the server is shutting down, or if the ledger was
|
||||
// found in the database (which means another process already
|
||||
// wrote the ledger that this process was trying to extract;
|
||||
// this is a form of a write conflict). Otherwise,
|
||||
// fetchLedgerDataAndDiff will keep trying to fetch the
|
||||
// specified ledger until successful
|
||||
if (!fetchResponse)
|
||||
{
|
||||
break;
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
auto time = ((end - start).count()) / 1000000000.0;
|
||||
auto tps =
|
||||
fetchResponse->transactions_list().transactions_size() / time;
|
||||
|
||||
JLOG(journal_.debug()) << "Extract phase time = " << time
|
||||
<< " . Extract phase tps = " << tps;
|
||||
|
||||
transformQueue.push(std::move(fetchResponse));
|
||||
++currentSequence;
|
||||
}
|
||||
// empty optional tells the transformer to shut down
|
||||
transformQueue.push({});
|
||||
}};
|
||||
|
||||
ThreadSafeQueue<std::optional<std::pair<
|
||||
std::shared_ptr<Ledger>,
|
||||
std::vector<AccountTransactionsData>>>>
|
||||
loadQueue{maxQueueSize};
|
||||
std::thread transformer{[this,
|
||||
&parent,
|
||||
&writeConflict,
|
||||
&loadQueue,
|
||||
&transformQueue]() {
|
||||
beast::setCurrentThreadName("rippled: ReportingETL transform");
|
||||
|
||||
assert(parent);
|
||||
parent = std::make_shared<Ledger>(*parent, NetClock::time_point{});
|
||||
while (!writeConflict)
|
||||
{
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse> fetchResponse{
|
||||
transformQueue.pop()};
|
||||
// if fetchResponse is an empty optional, the extracter thread has
|
||||
// stopped and the transformer should stop as well
|
||||
if (!fetchResponse)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (isStopping())
|
||||
continue;
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto [next, accountTxData] =
|
||||
buildNextLedger(parent, *fetchResponse);
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
auto duration = ((end - start).count()) / 1000000000.0;
|
||||
JLOG(journal_.debug()) << "transform time = " << duration;
|
||||
// The below line needs to execute before pushing to the queue, in
|
||||
// order to prevent this thread and the loader thread from accessing
|
||||
// the same SHAMap concurrently
|
||||
parent = std::make_shared<Ledger>(*next, NetClock::time_point{});
|
||||
loadQueue.push(
|
||||
std::make_pair(std::move(next), std::move(accountTxData)));
|
||||
}
|
||||
// empty optional tells the loader to shutdown
|
||||
loadQueue.push({});
|
||||
}};
|
||||
|
||||
std::thread loader{[this,
|
||||
&lastPublishedSequence,
|
||||
&loadQueue,
|
||||
&writeConflict]() {
|
||||
beast::setCurrentThreadName("rippled: ReportingETL load");
|
||||
size_t totalTransactions = 0;
|
||||
double totalTime = 0;
|
||||
while (!writeConflict)
|
||||
{
|
||||
std::optional<std::pair<
|
||||
std::shared_ptr<Ledger>,
|
||||
std::vector<AccountTransactionsData>>>
|
||||
result{loadQueue.pop()};
|
||||
// if result is an empty optional, the transformer thread has
|
||||
// stopped and the loader should stop as well
|
||||
if (!result)
|
||||
break;
|
||||
if (isStopping())
|
||||
continue;
|
||||
|
||||
auto& ledger = result->first;
|
||||
auto& accountTxData = result->second;
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
// write to the key-value store
|
||||
flushLedger(ledger);
|
||||
|
||||
auto mid = std::chrono::system_clock::now();
|
||||
// write to RDBMS
|
||||
// if there is a write conflict, some other process has already
|
||||
// written this ledger and has taken over as the ETL writer
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (!dynamic_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->writeLedgerAndTransactions(
|
||||
ledger->info(), accountTxData))
|
||||
writeConflict = true;
|
||||
#endif
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
if (!writeConflict)
|
||||
{
|
||||
publishLedger(ledger);
|
||||
lastPublishedSequence = ledger->info().seq;
|
||||
}
|
||||
// print some performance numbers
|
||||
auto kvTime = ((mid - start).count()) / 1000000000.0;
|
||||
auto relationalTime = ((end - mid).count()) / 1000000000.0;
|
||||
|
||||
size_t numTxns = accountTxData.size();
|
||||
totalTime += kvTime;
|
||||
totalTransactions += numTxns;
|
||||
JLOG(journal_.info())
|
||||
<< "Load phase of etl : "
|
||||
<< "Successfully published ledger! Ledger info: "
|
||||
<< detail::toString(ledger->info())
|
||||
<< ". txn count = " << numTxns
|
||||
<< ". key-value write time = " << kvTime
|
||||
<< ". relational write time = " << relationalTime
|
||||
<< ". key-value tps = " << numTxns / kvTime
|
||||
<< ". relational tps = " << numTxns / relationalTime
|
||||
<< ". total key-value tps = " << totalTransactions / totalTime;
|
||||
}
|
||||
}};
|
||||
|
||||
// wait for all of the threads to stop
|
||||
loader.join();
|
||||
extracter.join();
|
||||
transformer.join();
|
||||
writing_ = false;
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Stopping etl pipeline";
|
||||
|
||||
return lastPublishedSequence;
|
||||
}
|
||||
|
||||
// main loop. The software begins monitoring the ledgers that are validated
|
||||
// by the nework. The member networkValidatedLedgers_ keeps track of the
|
||||
// sequences of ledgers validated by the network. Whenever a ledger is validated
|
||||
// by the network, the software looks for that ledger in the database. Once the
|
||||
// ledger is found in the database, the software publishes that ledger to the
|
||||
// ledgers stream. If a network validated ledger is not found in the database
|
||||
// after a certain amount of time, then the software attempts to take over
|
||||
// responsibility of the ETL process, where it writes new ledgers to the
|
||||
// database. The software will relinquish control of the ETL process if it
|
||||
// detects that another process has taken over ETL.
|
||||
void
|
||||
ReportingETL::monitor()
|
||||
{
|
||||
auto ledger = std::const_pointer_cast<Ledger>(
|
||||
app_.getLedgerMaster().getValidatedLedger());
|
||||
if (!ledger)
|
||||
{
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Database is empty. Will download a ledger "
|
||||
"from the network.";
|
||||
if (startSequence_)
|
||||
{
|
||||
JLOG(journal_.info())
|
||||
<< __func__ << " : "
|
||||
<< "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger "
|
||||
<< *startSequence_;
|
||||
ledger = loadInitialLedger(*startSequence_);
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.info())
|
||||
<< __func__ << " : "
|
||||
<< "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated =
|
||||
networkValidatedLedgers_.getMostRecent();
|
||||
if (mostRecentValidated)
|
||||
{
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Ledger " << *mostRecentValidated
|
||||
<< " has been validated. "
|
||||
<< "Downloading...";
|
||||
ledger = loadInitialLedger(*mostRecentValidated);
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "The wait for the next validated "
|
||||
<< "ledger has been aborted. "
|
||||
<< "Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (startSequence_)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"start sequence specified but db is already populated");
|
||||
}
|
||||
JLOG(journal_.info())
|
||||
<< __func__ << " : "
|
||||
<< "Database already populated. Picking up from the tip of history";
|
||||
}
|
||||
if (!ledger)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< __func__ << " : "
|
||||
<< "Failed to load initial ledger. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
publishLedger(ledger);
|
||||
}
|
||||
uint32_t nextSequence = ledger->info().seq + 1;
|
||||
|
||||
JLOG(journal_.debug()) << __func__ << " : "
|
||||
<< "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = "
|
||||
<< nextSequence;
|
||||
while (!stopping_ &&
|
||||
networkValidatedLedgers_.waitUntilValidatedByNetwork(nextSequence))
|
||||
{
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Ledger with sequence = " << nextSequence
|
||||
<< " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
// attempts to publish the ledger. publishLedger() fails if the
|
||||
// ledger that has been validated by the network is not found in the
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
//
|
||||
// In strict read-only mode, when the software fails to find a
|
||||
// ledger in the database that has been validated by the network,
|
||||
// the software will only try to publish subsequent ledgers once,
|
||||
// until one of those ledgers is found in the database. Once the
|
||||
// software successfully publishes a ledger, the software will fall
|
||||
// back to the normal behavior of trying several times to publish
|
||||
// the ledger that has been validated by the network. In this
|
||||
// manner, a reporting processing running in read-only mode does not
|
||||
// need to restart if the database is wiped.
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool success = publishLedger(nextSequence, timeoutSeconds);
|
||||
if (!success)
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< __func__ << " : "
|
||||
<< "Failed to publish ledger with sequence = " << nextSequence
|
||||
<< " . Beginning ETL";
|
||||
// doContinousETLPipelined returns the most recent sequence
|
||||
// published empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished =
|
||||
runETLPipeline(nextSequence);
|
||||
JLOG(journal_.info()) << __func__ << " : "
|
||||
<< "Aborting ETL. Falling back to publishing";
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
++nextSequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ReportingETL::monitorReadOnly()
|
||||
{
|
||||
JLOG(journal_.debug()) << "Starting reporting in strict read only mode";
|
||||
std::optional<uint32_t> mostRecent =
|
||||
networkValidatedLedgers_.getMostRecent();
|
||||
if (!mostRecent)
|
||||
return;
|
||||
uint32_t sequence = *mostRecent;
|
||||
bool success = true;
|
||||
while (!stopping_ &&
|
||||
networkValidatedLedgers_.waitUntilValidatedByNetwork(sequence))
|
||||
{
|
||||
success = publishLedger(sequence, success ? 30 : 1);
|
||||
++sequence;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ReportingETL::doWork()
|
||||
{
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("rippled: ReportingETL worker");
|
||||
if (readOnly_)
|
||||
monitorReadOnly();
|
||||
else
|
||||
monitor();
|
||||
});
|
||||
}
|
||||
|
||||
ReportingETL::ReportingETL(Application& app)
|
||||
: app_(app)
|
||||
, journal_(app.journal("ReportingETL"))
|
||||
, publishStrand_(app_.getIOService())
|
||||
, loadBalancer_(*this)
|
||||
{
|
||||
// if present, get endpoint from config
|
||||
if (app_.config().exists("reporting"))
|
||||
{
|
||||
#ifndef RIPPLED_REPORTING
|
||||
Throw<std::runtime_error>(
|
||||
"Config file specifies reporting, but software was not built with "
|
||||
"-Dreporting=1. To use reporting, configure CMake with "
|
||||
"-Dreporting=1");
|
||||
#endif
|
||||
if (!app_.config().useTxTables())
|
||||
Throw<std::runtime_error>(
|
||||
"Reporting requires tx tables. Set use_tx_tables=1 in config "
|
||||
"file, under [ledger_tx_tables] section");
|
||||
Section section = app_.config().section("reporting");
|
||||
|
||||
JLOG(journal_.debug()) << "Parsing config info";
|
||||
|
||||
auto& vals = section.values();
|
||||
for (auto& v : vals)
|
||||
{
|
||||
JLOG(journal_.debug()) << "val is " << v;
|
||||
Section source = app_.config().section(v);
|
||||
|
||||
auto optIp = source.get("source_ip");
|
||||
if (!optIp)
|
||||
continue;
|
||||
|
||||
auto optWsPort = source.get("source_ws_port");
|
||||
if (!optWsPort)
|
||||
continue;
|
||||
|
||||
auto optGrpcPort = source.get("source_grpc_port");
|
||||
if (!optGrpcPort)
|
||||
{
|
||||
// add source without grpc port
|
||||
// used in read-only mode to detect when new ledgers have
|
||||
// been validated. Used for publishing
|
||||
if (app_.config().reportingReadOnly())
|
||||
loadBalancer_.add(*optIp, *optWsPort);
|
||||
continue;
|
||||
}
|
||||
|
||||
loadBalancer_.add(*optIp, *optWsPort, *optGrpcPort);
|
||||
}
|
||||
|
||||
// this is true iff --reportingReadOnly was passed via command line
|
||||
readOnly_ = app_.config().reportingReadOnly();
|
||||
|
||||
// if --reportingReadOnly was not passed via command line, check config
|
||||
// file. Command line takes precedence
|
||||
if (!readOnly_)
|
||||
{
|
||||
auto const optRO = section.get("read_only");
|
||||
if (optRO)
|
||||
{
|
||||
readOnly_ = (*optRO == "true" || *optRO == "1");
|
||||
app_.config().setReportingReadOnly(readOnly_);
|
||||
}
|
||||
}
|
||||
|
||||
// lambda throws a useful message if string to integer conversion fails
|
||||
auto asciiToIntThrows =
|
||||
[](auto& dest, std::string const& src, char const* onError) {
|
||||
char const* const srcEnd = src.data() + src.size();
|
||||
auto [ptr, err] = std::from_chars(src.data(), srcEnd, dest);
|
||||
|
||||
if (err == std::errc())
|
||||
// skip whitespace at end of string
|
||||
while (ptr != srcEnd &&
|
||||
std::isspace(static_cast<unsigned char>(*ptr)))
|
||||
++ptr;
|
||||
|
||||
// throw if
|
||||
// o conversion error or
|
||||
// o entire string is not consumed
|
||||
if (err != std::errc() || ptr != srcEnd)
|
||||
Throw<std::runtime_error>(onError + src);
|
||||
};
|
||||
|
||||
// handle command line arguments
|
||||
if (app_.config().START_UP == Config::StartUpType::FRESH && !readOnly_)
|
||||
{
|
||||
asciiToIntThrows(
|
||||
*startSequence_,
|
||||
app_.config().START_LEDGER,
|
||||
"Expected integral START_LEDGER command line argument. Got: ");
|
||||
}
|
||||
// if not passed via command line, check config for start sequence
|
||||
if (!startSequence_)
|
||||
{
|
||||
auto const optStartSeq = section.get("start_sequence");
|
||||
if (optStartSeq)
|
||||
{
|
||||
// set a value so we can dereference
|
||||
startSequence_ = 0;
|
||||
asciiToIntThrows(
|
||||
*startSequence_,
|
||||
*optStartSeq,
|
||||
"Expected integral start_sequence config entry. Got: ");
|
||||
}
|
||||
}
|
||||
|
||||
auto const optFlushInterval = section.get("flush_interval");
|
||||
if (optFlushInterval)
|
||||
asciiToIntThrows(
|
||||
flushInterval_,
|
||||
*optFlushInterval,
|
||||
"Expected integral flush_interval config entry. Got: ");
|
||||
|
||||
auto const optNumMarkers = section.get("num_markers");
|
||||
if (optNumMarkers)
|
||||
asciiToIntThrows(
|
||||
numMarkers_,
|
||||
*optNumMarkers,
|
||||
"Expected integral num_markers config entry. Got: ");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
@@ -1,367 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED
|
||||
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/rdb/RelationalDatabase.h>
|
||||
#include <xrpld/app/reporting/ETLHelpers.h>
|
||||
#include <xrpld/app/reporting/ETLSource.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/net/InfoSub.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/GRPCHandlers.h>
|
||||
#include <xrpld/rpc/Role.h>
|
||||
#include <xrpld/rpc/detail/Handler.h>
|
||||
#include <xrpld/rpc/detail/RPCHelpers.h>
|
||||
#include <xrpld/rpc/detail/Tuning.h>
|
||||
#include <xrpl/protocol/ErrorCodes.h>
|
||||
#include <xrpl/resource/Charge.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
#include <chrono>
|
||||
namespace ripple {
|
||||
|
||||
using AccountTransactionsData = RelationalDatabase::AccountTransactionsData;
|
||||
|
||||
/**
|
||||
* This class is responsible for continuously extracting data from a
|
||||
* p2p node, and writing that data to the databases. Usually, multiple different
|
||||
* processes share access to the same network accessible databases, in which
|
||||
* case only one such process is performing ETL and writing to the database. The
|
||||
* other processes simply monitor the database for new ledgers, and publish
|
||||
* those ledgers to the various subscription streams. If a monitoring process
|
||||
* determines that the ETL writer has failed (no new ledgers written for some
|
||||
* time), the process will attempt to become the ETL writer. If there are
|
||||
* multiple monitoring processes that try to become the ETL writer at the same
|
||||
* time, one will win out, and the others will fall back to
|
||||
* monitoring/publishing. In this sense, this class dynamically transitions from
|
||||
* monitoring to writing and from writing to monitoring, based on the activity
|
||||
* of other processes running on different machines.
|
||||
*/
|
||||
class ReportingETL
|
||||
{
|
||||
private:
|
||||
Application& app_;
|
||||
|
||||
beast::Journal journal_;
|
||||
|
||||
std::thread worker_;
|
||||
|
||||
/// Strand to ensure that ledgers are published in order.
|
||||
/// If ETL is started far behind the network, ledgers will be written and
|
||||
/// published very rapidly. Monitoring processes will publish ledgers as
|
||||
/// they are written. However, to publish a ledger, the monitoring process
|
||||
/// needs to read all of the transactions for that ledger from the database.
|
||||
/// Reading the transactions from the database requires network calls, which
|
||||
/// can be slow. It is imperative however that the monitoring processes keep
|
||||
/// up with the writer, else the monitoring processes will not be able to
|
||||
/// detect if the writer failed. Therefore, publishing each ledger (which
|
||||
/// includes reading all of the transactions from the database) is done from
|
||||
/// the application wide asio io_service, and a strand is used to ensure
|
||||
/// ledgers are published in order
|
||||
boost::asio::io_context::strand publishStrand_;
|
||||
|
||||
/// Mechanism for communicating with ETL sources. ETLLoadBalancer wraps an
|
||||
/// arbitrary number of ETL sources and load balances ETL requests across
|
||||
/// those sources.
|
||||
ETLLoadBalancer loadBalancer_;
|
||||
|
||||
/// Mechanism for detecting when the network has validated a new ledger.
|
||||
/// This class provides a way to wait for a specific ledger to be validated
|
||||
NetworkValidatedLedgers networkValidatedLedgers_;
|
||||
|
||||
/// Whether the software is stopping
|
||||
std::atomic_bool stopping_ = false;
|
||||
|
||||
/// Used to determine when to write to the database during the initial
|
||||
/// ledger download. By default, the software downloads an entire ledger and
|
||||
/// then writes to the database. If flushInterval_ is non-zero, the software
|
||||
/// will write to the database as new ledger data (SHAMap leaf nodes)
|
||||
/// arrives. It is not neccesarily more effient to write the data as it
|
||||
/// arrives, as different SHAMap leaf nodes share the same SHAMap inner
|
||||
/// nodes; flushing prematurely can result in the same SHAMap inner node
|
||||
/// being written to the database more than once. It is recommended to use
|
||||
/// the default value of 0 for this variable; however, different values can
|
||||
/// be experimented with if better performance is desired.
|
||||
size_t flushInterval_ = 0;
|
||||
|
||||
/// This variable controls the number of GetLedgerData calls that will be
|
||||
/// executed in parallel during the initial ledger download. GetLedgerData
|
||||
/// allows clients to page through a ledger over many RPC calls.
|
||||
/// GetLedgerData returns a marker that is used as an offset in a subsequent
|
||||
/// call. If numMarkers_ is greater than 1, there will be multiple chains of
|
||||
/// GetLedgerData calls iterating over different parts of the same ledger in
|
||||
/// parallel. This can dramatically speed up the time to download the
|
||||
/// initial ledger. However, a higher value for this member variable puts
|
||||
/// more load on the ETL source.
|
||||
size_t numMarkers_ = 2;
|
||||
|
||||
/// Whether the process is in strict read-only mode. In strict read-only
|
||||
/// mode, the process will never attempt to become the ETL writer, and will
|
||||
/// only publish ledgers as they are written to the database.
|
||||
bool readOnly_ = false;
|
||||
|
||||
/// Whether the process is writing to the database. Used by server_info
|
||||
std::atomic_bool writing_ = false;
|
||||
|
||||
/// Ledger sequence to start ETL from. If this is empty, ETL will start from
|
||||
/// the next ledger validated by the network. If this is set, and the
|
||||
/// database is already populated, an error is thrown.
|
||||
std::optional<uint32_t> startSequence_;
|
||||
|
||||
/// The time that the most recently published ledger was published. Used by
|
||||
/// server_info
|
||||
std::chrono::time_point<std::chrono::system_clock> lastPublish_;
|
||||
|
||||
std::mutex publishTimeMtx_;
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish()
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(publishTimeMtx_);
|
||||
return lastPublish_;
|
||||
}
|
||||
|
||||
void
|
||||
setLastPublish()
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(publishTimeMtx_);
|
||||
lastPublish_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
/// Download a ledger with specified sequence in full, via GetLedgerData,
|
||||
/// and write the data to the databases. This takes several minutes or
|
||||
/// longer.
|
||||
/// @param sequence the sequence of the ledger to download
|
||||
/// @return The ledger downloaded, with a full transaction and account state
|
||||
/// map
|
||||
std::shared_ptr<Ledger>
|
||||
loadInitialLedger(uint32_t sequence);
|
||||
|
||||
/// Run ETL. Extracts ledgers and writes them to the database, until a write
|
||||
/// conflict occurs (or the server shuts down).
|
||||
/// @note database must already be populated when this function is called
|
||||
/// @param startSequence the first ledger to extract
|
||||
/// @return the last ledger written to the database, if any
|
||||
std::optional<uint32_t>
|
||||
runETLPipeline(uint32_t startSequence);
|
||||
|
||||
/// Monitor the network for newly validated ledgers. Also monitor the
|
||||
/// database to see if any process is writing those ledgers. This function
|
||||
/// is called when the application starts, and will only return when the
|
||||
/// application is shutting down. If the software detects the database is
|
||||
/// empty, this function will call loadInitialLedger(). If the software
|
||||
/// detects ledgers are not being written, this function calls
|
||||
/// runETLPipeline(). Otherwise, this function publishes ledgers as they are
|
||||
/// written to the database.
|
||||
void
|
||||
monitor();
|
||||
|
||||
/// Monitor the database for newly written ledgers.
|
||||
/// Similar to the monitor(), except this function will never call
|
||||
/// runETLPipeline() or loadInitialLedger(). This function only publishes
|
||||
/// ledgers as they are written to the database.
|
||||
void
|
||||
monitorReadOnly();
|
||||
|
||||
/// Extract data for a particular ledger from an ETL source. This function
|
||||
/// continously tries to extract the specified ledger (using all available
|
||||
/// ETL sources) until the extraction succeeds, or the server shuts down.
|
||||
/// @param sequence sequence of the ledger to extract
|
||||
/// @return ledger header and transaction+metadata blobs. Empty optional
|
||||
/// if the server is shutting down
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedgerData(uint32_t sequence);
|
||||
|
||||
/// Extract data for a particular ledger from an ETL source. This function
|
||||
/// continously tries to extract the specified ledger (using all available
|
||||
/// ETL sources) until the extraction succeeds, or the server shuts down.
|
||||
/// @param sequence sequence of the ledger to extract
|
||||
/// @return ledger header, transaction+metadata blobs, and all ledger
|
||||
/// objects created, modified or deleted between this ledger and the parent.
|
||||
/// Empty optional if the server is shutting down
|
||||
std::optional<org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedgerDataAndDiff(uint32_t sequence);
|
||||
|
||||
/// Insert all of the extracted transactions into the ledger
|
||||
/// @param ledger ledger to insert transactions into
|
||||
/// @param data data extracted from an ETL source
|
||||
/// @return struct that contains the neccessary info to write to the
|
||||
/// transctions and account_transactions tables in Postgres (mostly
|
||||
/// transaction hashes, corresponding nodestore hashes and affected
|
||||
/// accounts)
|
||||
std::vector<AccountTransactionsData>
|
||||
insertTransactions(
|
||||
std::shared_ptr<Ledger>& ledger,
|
||||
org::xrpl::rpc::v1::GetLedgerResponse& data);
|
||||
|
||||
/// Build the next ledger using the previous ledger and the extracted data.
|
||||
/// This function calls insertTransactions()
|
||||
/// @note rawData should be data that corresponds to the ledger immediately
|
||||
/// following parent
|
||||
/// @param parent the previous ledger
|
||||
/// @param rawData data extracted from an ETL source
|
||||
/// @return the newly built ledger and data to write to Postgres
|
||||
std::pair<std::shared_ptr<Ledger>, std::vector<AccountTransactionsData>>
|
||||
buildNextLedger(
|
||||
std::shared_ptr<Ledger>& parent,
|
||||
org::xrpl::rpc::v1::GetLedgerResponse& rawData);
|
||||
|
||||
/// Write all new data to the key-value store
|
||||
/// @param ledger ledger with new data to write
|
||||
void
|
||||
flushLedger(std::shared_ptr<Ledger>& ledger);
|
||||
|
||||
/// Attempt to read the specified ledger from the database, and then publish
|
||||
/// that ledger to the ledgers stream.
|
||||
/// @param ledgerSequence the sequence of the ledger to publish
|
||||
/// @param maxAttempts the number of times to attempt to read the ledger
|
||||
/// from the database. 1 attempt per second
|
||||
/// @return whether the ledger was found in the database and published
|
||||
bool
|
||||
publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts = 10);
|
||||
|
||||
/// Publish the passed in ledger
|
||||
/// @param ledger the ledger to publish
|
||||
void
|
||||
publishLedger(std::shared_ptr<Ledger>& ledger);
|
||||
|
||||
/// Consume data from a queue and insert that data into the ledger
|
||||
/// This function will continue to pull from the queue until the queue
|
||||
/// returns nullptr. This is used during the initial ledger download
|
||||
/// @param ledger the ledger to insert data into
|
||||
/// @param writeQueue the queue with extracted data
|
||||
void
|
||||
consumeLedgerData(
|
||||
std::shared_ptr<Ledger>& ledger,
|
||||
ThreadSafeQueue<std::shared_ptr<SLE>>& writeQueue);
|
||||
|
||||
public:
|
||||
explicit ReportingETL(Application& app);
|
||||
|
||||
~ReportingETL()
|
||||
{
|
||||
}
|
||||
|
||||
NetworkValidatedLedgers&
|
||||
getNetworkValidatedLedgers()
|
||||
{
|
||||
return networkValidatedLedgers_;
|
||||
}
|
||||
|
||||
bool
|
||||
isStopping() const
|
||||
{
|
||||
return stopping_;
|
||||
}
|
||||
|
||||
/// Get the number of markers to use during the initial ledger download.
|
||||
/// This is equivelent to the degree of parallelism during the initial
|
||||
/// ledger download
|
||||
/// @return the number of markers
|
||||
uint32_t
|
||||
getNumMarkers()
|
||||
{
|
||||
return numMarkers_;
|
||||
}
|
||||
|
||||
Application&
|
||||
getApplication()
|
||||
{
|
||||
return app_;
|
||||
}
|
||||
|
||||
beast::Journal&
|
||||
getJournal()
|
||||
{
|
||||
return journal_;
|
||||
}
|
||||
|
||||
Json::Value
|
||||
getInfo()
|
||||
{
|
||||
Json::Value result(Json::objectValue);
|
||||
|
||||
result["etl_sources"] = loadBalancer_.toJson();
|
||||
result["is_writer"] = writing_.load();
|
||||
auto last = getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_time"] =
|
||||
to_string(std::chrono::floor<std::chrono::microseconds>(
|
||||
getLastPublish()));
|
||||
return result;
|
||||
}
|
||||
|
||||
/// start all of the necessary components and begin ETL
|
||||
void
|
||||
start()
|
||||
{
|
||||
JLOG(journal_.info()) << "Starting reporting etl";
|
||||
assert(app_.config().reporting());
|
||||
assert(app_.config().standalone());
|
||||
assert(app_.config().reportingReadOnly() == readOnly_);
|
||||
|
||||
stopping_ = false;
|
||||
|
||||
loadBalancer_.start();
|
||||
doWork();
|
||||
}
|
||||
|
||||
void
|
||||
stop()
|
||||
{
|
||||
JLOG(journal_.info()) << "onStop called";
|
||||
JLOG(journal_.debug()) << "Stopping Reporting ETL";
|
||||
stopping_ = true;
|
||||
networkValidatedLedgers_.stop();
|
||||
loadBalancer_.stop();
|
||||
|
||||
JLOG(journal_.debug()) << "Stopped loadBalancer";
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
|
||||
JLOG(journal_.debug()) << "Joined worker thread";
|
||||
}
|
||||
|
||||
ETLLoadBalancer&
|
||||
getETLLoadBalancer()
|
||||
{
|
||||
return loadBalancer_;
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
doWork();
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -129,10 +129,6 @@ private:
|
||||
*/
|
||||
bool RUN_STANDALONE = false;
|
||||
|
||||
bool RUN_REPORTING = false;
|
||||
|
||||
bool REPORTING_READ_ONLY = false;
|
||||
|
||||
bool USE_TX_TABLES = true;
|
||||
|
||||
/** Determines if the server will sign a tx, given an account's secret seed.
|
||||
@@ -362,11 +358,7 @@ public:
|
||||
{
|
||||
return RUN_STANDALONE;
|
||||
}
|
||||
bool
|
||||
reporting() const
|
||||
{
|
||||
return RUN_REPORTING;
|
||||
}
|
||||
|
||||
bool
|
||||
mem_backend() const
|
||||
{
|
||||
@@ -389,18 +381,6 @@ public:
|
||||
return USE_TX_TABLES;
|
||||
}
|
||||
|
||||
bool
|
||||
reportingReadOnly() const
|
||||
{
|
||||
return REPORTING_READ_ONLY;
|
||||
}
|
||||
|
||||
void
|
||||
setReportingReadOnly(bool b)
|
||||
{
|
||||
REPORTING_READ_ONLY = b;
|
||||
}
|
||||
|
||||
bool
|
||||
canSign() const
|
||||
{
|
||||
|
||||
@@ -88,7 +88,6 @@ public:
|
||||
|
||||
Config::StartUpType startUp = Config::NORMAL;
|
||||
bool standAlone = false;
|
||||
bool reporting = false;
|
||||
boost::filesystem::path dataDir;
|
||||
// Indicates whether or not to return the `globalPragma`
|
||||
// from commonPragma()
|
||||
@@ -120,8 +119,7 @@ public:
|
||||
beast::Journal journal)
|
||||
// Use temporary files or regular DB files?
|
||||
: DatabaseCon(
|
||||
setup.standAlone && !setup.reporting &&
|
||||
setup.startUp != Config::LOAD &&
|
||||
setup.standAlone && setup.startUp != Config::LOAD &&
|
||||
setup.startUp != Config::LOAD_FILE &&
|
||||
setup.startUp != Config::REPLAY
|
||||
? ""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,520 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
#ifndef RIPPLE_CORE_PG_H_INCLUDED
|
||||
#define RIPPLE_CORE_PG_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/BasicConfig.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <libpq-fe.h>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// These postgres structs must be freed only by the postgres API.
|
||||
using pg_result_type = std::unique_ptr<PGresult, void (*)(PGresult*)>;
|
||||
using pg_connection_type = std::unique_ptr<PGconn, void (*)(PGconn*)>;
|
||||
|
||||
/** first: command
|
||||
* second: parameter values
|
||||
*
|
||||
* The 2nd member takes an optional string to
|
||||
* distinguish between NULL parameters and empty strings. An empty
|
||||
* item corresponds to a NULL parameter.
|
||||
*
|
||||
* Postgres reads each parameter as a c-string, regardless of actual type.
|
||||
* Binary types (bytea) need to be converted to hex and prepended with
|
||||
* \x ("\\x").
|
||||
*/
|
||||
using pg_params =
|
||||
std::pair<char const*, std::vector<std::optional<std::string>>>;
|
||||
|
||||
/** Parameter values for pg API. */
|
||||
using pg_formatted_params = std::vector<char const*>;
|
||||
|
||||
/** Parameters for managing postgres connections. */
|
||||
struct PgConfig
|
||||
{
|
||||
/** Maximum connections allowed to db. */
|
||||
std::size_t max_connections{std::numeric_limits<std::size_t>::max()};
|
||||
/** Close idle connections past this duration. */
|
||||
std::chrono::seconds timeout{600};
|
||||
|
||||
/** Index of DB connection parameter names. */
|
||||
std::vector<char const*> keywordsIdx;
|
||||
/** DB connection parameter names. */
|
||||
std::vector<std::string> keywords;
|
||||
/** Index of DB connection parameter values. */
|
||||
std::vector<char const*> valuesIdx;
|
||||
/** DB connection parameter values. */
|
||||
std::vector<std::string> values;
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class that operates on postgres query results.
|
||||
*
|
||||
* The functions that return results do not check first whether the
|
||||
* expected results are actually there. Therefore, the caller first needs
|
||||
* to check whether or not a valid response was returned using the operator
|
||||
* bool() overload. If number of tuples or fields are unknown, then check
|
||||
* those. Each result field should be checked for null before attempting
|
||||
* to return results. Finally, the caller must know the type of the field
|
||||
* before calling the corresponding function to return a field. Postgres
|
||||
* internally stores each result field as null-terminated strings.
|
||||
*/
|
||||
class PgResult
|
||||
{
|
||||
// The result object must be freed using the libpq API PQclear() call.
|
||||
pg_result_type result_{nullptr, [](PGresult* result) { PQclear(result); }};
|
||||
std::optional<std::pair<ExecStatusType, std::string>> error_;
|
||||
|
||||
public:
|
||||
/** Constructor for when the process is stopping.
|
||||
*
|
||||
*/
|
||||
PgResult()
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for successful query results.
|
||||
*
|
||||
* @param result Query result.
|
||||
*/
|
||||
explicit PgResult(pg_result_type&& result) : result_(std::move(result))
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for failed query results.
|
||||
*
|
||||
* @param result Query result that contains error information.
|
||||
* @param conn Postgres connection that contains error information.
|
||||
*/
|
||||
PgResult(PGresult* result, PGconn* conn)
|
||||
: error_({PQresultStatus(result), PQerrorMessage(conn)})
|
||||
{
|
||||
}
|
||||
|
||||
/** Return field as a null-terminated string pointer.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
char const*
|
||||
c_str(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetvalue(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' INT type (32 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int32_t
|
||||
asInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int32_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' BIGINT type (64 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int64_t
|
||||
asBigInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int64_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Returns whether the field is NULL or not.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Whether field is NULL.
|
||||
*/
|
||||
bool
|
||||
isNull(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetisnull(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
/** Check whether a valid response occurred.
|
||||
*
|
||||
* @return Whether or not the query returned a valid response.
|
||||
*/
|
||||
operator bool() const
|
||||
{
|
||||
return result_ != nullptr;
|
||||
}
|
||||
|
||||
/** Message describing the query results suitable for diagnostics.
|
||||
*
|
||||
* If error, then the postgres error type and message are returned.
|
||||
* Otherwise, "ok"
|
||||
*
|
||||
* @return Query result message.
|
||||
*/
|
||||
std::string
|
||||
msg() const;
|
||||
|
||||
/** Get number of rows in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result rows.
|
||||
*/
|
||||
int
|
||||
ntuples() const
|
||||
{
|
||||
return PQntuples(result_.get());
|
||||
}
|
||||
|
||||
/** Get number of fields in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result fields.
|
||||
*/
|
||||
int
|
||||
nfields() const
|
||||
{
|
||||
return PQnfields(result_.get());
|
||||
}
|
||||
|
||||
/** Return result status of the command.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
ExecStatusType
|
||||
status() const
|
||||
{
|
||||
return PQresultStatus(result_.get());
|
||||
}
|
||||
};
|
||||
|
||||
/* Class that contains and operates upon a postgres connection. */
|
||||
class Pg
|
||||
{
|
||||
friend class PgPool;
|
||||
friend class PgQuery;
|
||||
|
||||
PgConfig const& config_;
|
||||
beast::Journal const j_;
|
||||
bool& stop_;
|
||||
std::mutex& mutex_;
|
||||
|
||||
// The connection object must be freed using the libpq API PQfinish() call.
|
||||
pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }};
|
||||
|
||||
/** Clear results from the connection.
|
||||
*
|
||||
* Results from previous commands must be cleared before new commands
|
||||
* can be processed. This function should be called on connections
|
||||
* that weren't processed completely before being reused, such as
|
||||
* when being checked-in.
|
||||
*
|
||||
* @return whether or not connection still exists.
|
||||
*/
|
||||
bool
|
||||
clear();
|
||||
|
||||
/** Connect to postgres.
|
||||
*
|
||||
* Idempotently connects to postgres by first checking whether an
|
||||
* existing connection is already present. If connection is not present
|
||||
* or in an errored state, reconnects to the database.
|
||||
*/
|
||||
void
|
||||
connect();
|
||||
|
||||
/** Disconnect from postgres. */
|
||||
void
|
||||
disconnect()
|
||||
{
|
||||
conn_.reset();
|
||||
}
|
||||
|
||||
/** Execute postgres query.
|
||||
*
|
||||
* If parameters are included, then the command should contain only a
|
||||
* single SQL statement. If no parameters, then multiple SQL statements
|
||||
* delimited by semi-colons can be processed. The response is from
|
||||
* the last command executed.
|
||||
*
|
||||
* @param command postgres API command string.
|
||||
* @param nParams postgres API number of parameters.
|
||||
* @param values postgres API array of parameter.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(char const* command, std::size_t nParams, char const* const* values);
|
||||
|
||||
/** Execute postgres query with no parameters.
|
||||
*
|
||||
* @param command Query string.
|
||||
* @return Query result object;
|
||||
*/
|
||||
PgResult
|
||||
query(char const* command)
|
||||
{
|
||||
return query(command, 0, nullptr);
|
||||
}
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command and parameter values.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(pg_params const& dbParams);
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(char const* table, std::string const& records);
|
||||
|
||||
public:
|
||||
/** Constructor for Pg class.
|
||||
*
|
||||
* @param config Config parameters.
|
||||
* @param j Logger object.
|
||||
* @param stop Reference to connection pool's stop flag.
|
||||
* @param mutex Reference to connection pool's mutex.
|
||||
*/
|
||||
Pg(PgConfig const& config,
|
||||
beast::Journal const j,
|
||||
bool& stop,
|
||||
std::mutex& mutex)
|
||||
: config_(config), j_(j), stop_(stop), mutex_(mutex)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Database connection pool.
|
||||
*
|
||||
* Allow re-use of postgres connections. Postgres connections are created
|
||||
* as needed until configurable limit is reached. After use, each connection
|
||||
* is placed in a container ordered by time of use. Each request for
|
||||
* a connection grabs the most recently used connection from the container.
|
||||
* If none are available, a new connection is used (up to configured limit).
|
||||
* Idle connections are destroyed periodically after configurable
|
||||
* timeout duration.
|
||||
*
|
||||
* This should be stored as a shared pointer so PgQuery objects can safely
|
||||
* outlive it.
|
||||
*/
|
||||
class PgPool
|
||||
{
|
||||
friend class PgQuery;
|
||||
|
||||
using clock_type = std::chrono::steady_clock;
|
||||
|
||||
PgConfig config_;
|
||||
beast::Journal const j_;
|
||||
std::mutex mutex_;
|
||||
std::condition_variable cond_;
|
||||
std::size_t connections_{};
|
||||
bool stop_{false};
|
||||
|
||||
/** Idle database connections ordered by timestamp to allow timing out. */
|
||||
std::multimap<std::chrono::time_point<clock_type>, std::unique_ptr<Pg>>
|
||||
idle_;
|
||||
|
||||
/** Get a postgres connection object.
|
||||
*
|
||||
* Return the most recent idle connection in the pool, if available.
|
||||
* Otherwise, return a new connection unless we're at the threshold.
|
||||
* If so, then wait until a connection becomes available.
|
||||
*
|
||||
* @return Postgres object.
|
||||
*/
|
||||
std::unique_ptr<Pg>
|
||||
checkout();
|
||||
|
||||
/** Return a postgres object to the pool for reuse.
|
||||
*
|
||||
* If connection is healthy, place in pool for reuse. After calling this,
|
||||
* the container no longer have a connection unless checkout() is called.
|
||||
*
|
||||
* @param pg Pg object.
|
||||
*/
|
||||
void
|
||||
checkin(std::unique_ptr<Pg>& pg);
|
||||
|
||||
public:
|
||||
/** Connection pool constructor.
|
||||
*
|
||||
* @param pgConfig Postgres config.
|
||||
* @param j Logger object.
|
||||
*/
|
||||
PgPool(Section const& pgConfig, beast::Journal j);
|
||||
|
||||
/** Initiate idle connection timer.
|
||||
*
|
||||
* The PgPool object needs to be fully constructed to support asynchronous
|
||||
* operations.
|
||||
*/
|
||||
void
|
||||
setup();
|
||||
|
||||
/** Prepare for process shutdown. */
|
||||
void
|
||||
stop();
|
||||
|
||||
/** Disconnect idle postgres connections. */
|
||||
void
|
||||
idleSweeper();
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class to query postgres.
|
||||
*
|
||||
* This class should be used by functions outside of this
|
||||
* compilation unit for querying postgres. It automatically acquires and
|
||||
* relinquishes a database connection to handle each query.
|
||||
*/
|
||||
class PgQuery
|
||||
{
|
||||
private:
|
||||
std::shared_ptr<PgPool> pool_;
|
||||
std::unique_ptr<Pg> pg_;
|
||||
|
||||
public:
|
||||
PgQuery() = delete;
|
||||
|
||||
PgQuery(std::shared_ptr<PgPool> const& pool)
|
||||
: pool_(pool), pg_(pool->checkout())
|
||||
{
|
||||
}
|
||||
|
||||
~PgQuery()
|
||||
{
|
||||
pool_->checkin(pg_);
|
||||
}
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command with parameters.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(pg_params const& dbParams)
|
||||
{
|
||||
if (!pg_) // It means we're stopping. Return empty result.
|
||||
return PgResult();
|
||||
return pg_->query(dbParams);
|
||||
}
|
||||
|
||||
/** Execute postgres query with only command statement.
|
||||
*
|
||||
* @param command Command statement.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(char const* command)
|
||||
{
|
||||
return operator()(pg_params{command, {}});
|
||||
}
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(char const* table, std::string const& records)
|
||||
{
|
||||
pg_->bulkInsert(table, records);
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Create Postgres connection pool manager.
|
||||
*
|
||||
* @param pgConfig Configuration for Postgres.
|
||||
* @param j Logger object.
|
||||
* @return Postgres connection pool manager
|
||||
*/
|
||||
std::shared_ptr<PgPool>
|
||||
make_PgPool(Section const& pgConfig, beast::Journal j);
|
||||
|
||||
/** Initialize the Postgres schema.
|
||||
*
|
||||
* This function ensures that the database is running the latest version
|
||||
* of the schema.
|
||||
*
|
||||
* @param pool Postgres connection pool manager.
|
||||
*/
|
||||
void
|
||||
initSchema(std::shared_ptr<PgPool> const& pool);
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif // RIPPLE_CORE_PG_H_INCLUDED
|
||||
#endif // RIPPLED_REPORTING
|
||||
@@ -385,11 +385,6 @@ Config::setup(
|
||||
|
||||
// Update default values
|
||||
load();
|
||||
if (exists("reporting"))
|
||||
{
|
||||
RUN_REPORTING = true;
|
||||
RUN_STANDALONE = true;
|
||||
}
|
||||
{
|
||||
// load() may have set a new value for the dataDir
|
||||
std::string const dbPath(legacy("database_path"));
|
||||
|
||||
@@ -109,7 +109,6 @@ setup_DatabaseCon(Config const& c, std::optional<beast::Journal> j)
|
||||
|
||||
setup.startUp = c.START_UP;
|
||||
setup.standAlone = c.standalone();
|
||||
setup.reporting = c.reporting();
|
||||
setup.dataDir = c.legacy("database_path");
|
||||
if (!setup.standAlone && setup.dataDir.empty())
|
||||
{
|
||||
|
||||
@@ -39,29 +39,6 @@ namespace NodeStore {
|
||||
class Backend
|
||||
{
|
||||
public:
|
||||
template <typename T>
|
||||
struct Counters
|
||||
{
|
||||
Counters() = default;
|
||||
Counters(Counters const&) = default;
|
||||
|
||||
template <typename U>
|
||||
Counters(Counters<U> const& other)
|
||||
: writeDurationUs(other.writeDurationUs)
|
||||
, writeRetries(other.writeRetries)
|
||||
, writesDelayed(other.writesDelayed)
|
||||
, readRetries(other.readRetries)
|
||||
, readErrors(other.readErrors)
|
||||
{
|
||||
}
|
||||
|
||||
T writeDurationUs = {};
|
||||
T writeRetries = {};
|
||||
T writesDelayed = {};
|
||||
T readRetries = {};
|
||||
T readErrors = {};
|
||||
};
|
||||
|
||||
/** Destroy the backend.
|
||||
|
||||
All open files are closed and flushed. If there are batched writes
|
||||
@@ -174,17 +151,6 @@ public:
|
||||
/** Returns the number of file descriptors the backend expects to need. */
|
||||
virtual int
|
||||
fdRequired() const = 0;
|
||||
|
||||
/** Returns read and write stats.
|
||||
|
||||
@note The Counters struct is specific to and only used
|
||||
by CassandraBackend.
|
||||
*/
|
||||
virtual std::optional<Counters<std::uint64_t>>
|
||||
counters() const
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace NodeStore
|
||||
|
||||
@@ -315,17 +315,6 @@ private:
|
||||
virtual void
|
||||
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) = 0;
|
||||
|
||||
/** Retrieve backend read and write stats.
|
||||
|
||||
@note The Counters struct is specific to and only used
|
||||
by CassandraBackend.
|
||||
*/
|
||||
virtual std::optional<Backend::Counters<std::uint64_t>>
|
||||
getCounters() const
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void
|
||||
threadEntry();
|
||||
};
|
||||
|
||||
@@ -1,983 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2020 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <libpq-fe.h>
|
||||
|
||||
#include <xrpld/nodestore/Backend.h>
|
||||
#include <xrpld/nodestore/Factory.h>
|
||||
#include <xrpld/nodestore/Manager.h>
|
||||
#include <xrpld/nodestore/detail/DecodedBlob.h>
|
||||
#include <xrpld/nodestore/detail/EncodedBlob.h>
|
||||
#include <xrpld/nodestore/detail/codec.h>
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/basics/StringUtilities.h>
|
||||
#include <xrpl/basics/contract.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <exception>
|
||||
#include <fstream>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <nudb/nudb.hpp>
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
namespace NodeStore {
|
||||
|
||||
void
|
||||
writeCallback(CassFuture* fut, void* cbData);
|
||||
void
|
||||
readCallback(CassFuture* fut, void* cbData);
|
||||
|
||||
class CassandraBackend : public Backend
|
||||
{
|
||||
private:
|
||||
// convenience function for one-off queries. For normal reads and writes,
|
||||
// use the prepared statements insert_ and select_
|
||||
CassStatement*
|
||||
makeStatement(char const* query, std::size_t params)
|
||||
{
|
||||
CassStatement* ret = cass_statement_new(query, params);
|
||||
CassError rc =
|
||||
cass_statement_set_consistency(ret, CASS_CONSISTENCY_QUORUM);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error setting query consistency: " << query
|
||||
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
beast::Journal const j_;
|
||||
// size of a key
|
||||
size_t const keyBytes_;
|
||||
|
||||
Section const config_;
|
||||
|
||||
std::atomic<bool> open_{false};
|
||||
|
||||
// mutex used for open() and close()
|
||||
std::mutex mutex_;
|
||||
|
||||
std::unique_ptr<CassSession, void (*)(CassSession*)> session_{
|
||||
nullptr,
|
||||
[](CassSession* session) {
|
||||
// Try to disconnect gracefully.
|
||||
CassFuture* fut = cass_session_close(session);
|
||||
cass_future_wait(fut);
|
||||
cass_future_free(fut);
|
||||
cass_session_free(session);
|
||||
}};
|
||||
|
||||
// Database statements cached server side. Using these is more efficient
|
||||
// than making a new statement
|
||||
const CassPrepared* insert_ = nullptr;
|
||||
const CassPrepared* select_ = nullptr;
|
||||
|
||||
// io_context used for exponential backoff for write retries
|
||||
boost::asio::io_context ioContext_;
|
||||
std::optional<boost::asio::io_context::work> work_;
|
||||
std::thread ioThread_;
|
||||
|
||||
// maximum number of concurrent in flight requests. New requests will wait
|
||||
// for earlier requests to finish if this limit is exceeded
|
||||
uint32_t maxRequestsOutstanding = 10000000;
|
||||
std::atomic_uint32_t numRequestsOutstanding_ = 0;
|
||||
|
||||
// mutex and condition_variable to limit the number of concurrent in flight
|
||||
// requests
|
||||
std::mutex throttleMutex_;
|
||||
std::condition_variable throttleCv_;
|
||||
|
||||
// writes are asynchronous. This mutex and condition_variable is used to
|
||||
// wait for all writes to finish
|
||||
std::mutex syncMutex_;
|
||||
std::condition_variable syncCv_;
|
||||
|
||||
Counters<std::atomic<std::uint64_t>> counters_;
|
||||
|
||||
public:
|
||||
CassandraBackend(
|
||||
size_t keyBytes,
|
||||
Section const& keyValues,
|
||||
beast::Journal journal)
|
||||
: j_(journal), keyBytes_(keyBytes), config_(keyValues)
|
||||
{
|
||||
}
|
||||
|
||||
~CassandraBackend() override
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
std::string
|
||||
getName() override
|
||||
{
|
||||
return "cassandra";
|
||||
}
|
||||
|
||||
bool
|
||||
isOpen() override
|
||||
{
|
||||
return open_;
|
||||
}
|
||||
|
||||
// Setup all of the necessary components for talking to the database.
|
||||
// Create the table if it doesn't exist already
|
||||
// @param createIfMissing ignored
|
||||
void
|
||||
open(bool createIfMissing) override
|
||||
{
|
||||
if (open_)
|
||||
{
|
||||
assert(false);
|
||||
JLOG(j_.error()) << "database is already open";
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
CassCluster* cluster = cass_cluster_new();
|
||||
if (!cluster)
|
||||
Throw<std::runtime_error>(
|
||||
"nodestore:: Failed to create CassCluster");
|
||||
|
||||
std::string secureConnectBundle = get(config_, "secure_connect_bundle");
|
||||
|
||||
if (!secureConnectBundle.empty())
|
||||
{
|
||||
/* Setup driver to connect to the cloud using the secure connection
|
||||
* bundle */
|
||||
if (cass_cluster_set_cloud_secure_connection_bundle(
|
||||
cluster, secureConnectBundle.c_str()) != CASS_OK)
|
||||
{
|
||||
JLOG(j_.error()) << "Unable to configure cloud using the "
|
||||
"secure connection bundle: "
|
||||
<< secureConnectBundle;
|
||||
Throw<std::runtime_error>(
|
||||
"nodestore: Failed to connect using secure connection "
|
||||
"bundle");
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string contact_points = get(config_, "contact_points");
|
||||
if (contact_points.empty())
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"nodestore: Missing contact_points in Cassandra config");
|
||||
}
|
||||
CassError rc = cass_cluster_set_contact_points(
|
||||
cluster, contact_points.c_str());
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error setting Cassandra contact_points: "
|
||||
<< contact_points << ", result: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
|
||||
int port = get<int>(config_, "port");
|
||||
if (port)
|
||||
{
|
||||
rc = cass_cluster_set_port(cluster, port);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error setting Cassandra port: " << port
|
||||
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
||||
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
}
|
||||
}
|
||||
cass_cluster_set_token_aware_routing(cluster, cass_true);
|
||||
CassError rc = cass_cluster_set_protocol_version(
|
||||
cluster, CASS_PROTOCOL_VERSION_V4);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error setting cassandra protocol version: "
|
||||
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
||||
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
|
||||
std::string username = get(config_, "username");
|
||||
if (username.size())
|
||||
{
|
||||
std::cout << "user = " << username
|
||||
<< " password = " << get(config_, "password")
|
||||
<< std::endl;
|
||||
cass_cluster_set_credentials(
|
||||
cluster, username.c_str(), get(config_, "password").c_str());
|
||||
}
|
||||
|
||||
unsigned int const ioThreads = get<int>(config_, "io_threads", 4);
|
||||
maxRequestsOutstanding =
|
||||
get<int>(config_, "max_requests_outstanding", 10000000);
|
||||
JLOG(j_.info()) << "Configuring Cassandra driver to use " << ioThreads
|
||||
<< " IO threads. Capping maximum pending requests at "
|
||||
<< maxRequestsOutstanding;
|
||||
rc = cass_cluster_set_num_threads_io(cluster, ioThreads);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error setting Cassandra io threads to "
|
||||
<< ioThreads << ", result: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
|
||||
rc = cass_cluster_set_queue_size_io(
|
||||
cluster,
|
||||
maxRequestsOutstanding); // This number needs to scale w/ the
|
||||
// number of request per sec
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error setting Cassandra max core connections per "
|
||||
"host"
|
||||
<< ", result: " << rc << ", " << cass_error_desc(rc);
|
||||
std::cout << ss.str() << std::endl;
|
||||
return;
|
||||
;
|
||||
}
|
||||
cass_cluster_set_request_timeout(cluster, 2000);
|
||||
|
||||
std::string certfile = get(config_, "certfile");
|
||||
if (certfile.size())
|
||||
{
|
||||
std::ifstream fileStream(
|
||||
boost::filesystem::path(certfile).string(), std::ios::in);
|
||||
if (!fileStream)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "opening config file " << certfile;
|
||||
Throw<std::system_error>(
|
||||
errno, std::generic_category(), ss.str());
|
||||
}
|
||||
std::string cert(
|
||||
std::istreambuf_iterator<char>{fileStream},
|
||||
std::istreambuf_iterator<char>{});
|
||||
if (fileStream.bad())
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "reading config file " << certfile;
|
||||
Throw<std::system_error>(
|
||||
errno, std::generic_category(), ss.str());
|
||||
}
|
||||
|
||||
CassSsl* context = cass_ssl_new();
|
||||
cass_ssl_set_verify_flags(context, CASS_SSL_VERIFY_NONE);
|
||||
rc = cass_ssl_add_trusted_cert(context, cert.c_str());
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error setting Cassandra ssl context: " << rc
|
||||
<< ", " << cass_error_desc(rc);
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
|
||||
cass_cluster_set_ssl(cluster, context);
|
||||
cass_ssl_free(context);
|
||||
}
|
||||
|
||||
std::string keyspace = get(config_, "keyspace");
|
||||
if (keyspace.empty())
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"nodestore: Missing keyspace in Cassandra config");
|
||||
}
|
||||
|
||||
std::string tableName = get(config_, "table_name");
|
||||
if (tableName.empty())
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"nodestore: Missing table name in Cassandra config");
|
||||
}
|
||||
|
||||
cass_cluster_set_connect_timeout(cluster, 10000);
|
||||
|
||||
CassStatement* statement;
|
||||
CassFuture* fut;
|
||||
bool setupSessionAndTable = false;
|
||||
while (!setupSessionAndTable)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
session_.reset(cass_session_new());
|
||||
assert(session_);
|
||||
|
||||
fut = cass_session_connect_keyspace(
|
||||
session_.get(), cluster, keyspace.c_str());
|
||||
rc = cass_future_error_code(fut);
|
||||
cass_future_free(fut);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error connecting Cassandra session keyspace: "
|
||||
<< rc << ", " << cass_error_desc(rc);
|
||||
JLOG(j_.error()) << ss.str();
|
||||
continue;
|
||||
}
|
||||
|
||||
std::stringstream query;
|
||||
query << "CREATE TABLE IF NOT EXISTS " << tableName
|
||||
<< " ( hash blob PRIMARY KEY, object blob)";
|
||||
|
||||
statement = makeStatement(query.str().c_str(), 0);
|
||||
fut = cass_session_execute(session_.get(), statement);
|
||||
rc = cass_future_error_code(fut);
|
||||
cass_future_free(fut);
|
||||
cass_statement_free(statement);
|
||||
if (rc != CASS_OK && rc != CASS_ERROR_SERVER_INVALID_QUERY)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error creating Cassandra table: " << rc
|
||||
<< ", " << cass_error_desc(rc);
|
||||
JLOG(j_.error()) << ss.str();
|
||||
continue;
|
||||
}
|
||||
|
||||
query.str("");
|
||||
query << "SELECT * FROM " << tableName << " LIMIT 1";
|
||||
statement = makeStatement(query.str().c_str(), 0);
|
||||
fut = cass_session_execute(session_.get(), statement);
|
||||
rc = cass_future_error_code(fut);
|
||||
cass_future_free(fut);
|
||||
cass_statement_free(statement);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc == CASS_ERROR_SERVER_INVALID_QUERY)
|
||||
{
|
||||
JLOG(j_.warn()) << "table not here yet, sleeping 1s to "
|
||||
"see if table creation propagates";
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error checking for table: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
JLOG(j_.error()) << ss.str();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
setupSessionAndTable = true;
|
||||
}
|
||||
|
||||
cass_cluster_free(cluster);
|
||||
|
||||
bool setupPreparedStatements = false;
|
||||
while (!setupPreparedStatements)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
std::stringstream query;
|
||||
query << "INSERT INTO " << tableName
|
||||
<< " (hash, object) VALUES (?, ?)";
|
||||
CassFuture* prepare_future =
|
||||
cass_session_prepare(session_.get(), query.str().c_str());
|
||||
|
||||
/* Wait for the statement to prepare and get the result */
|
||||
rc = cass_future_error_code(prepare_future);
|
||||
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
/* Handle error */
|
||||
cass_future_free(prepare_future);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error preparing insert : " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
JLOG(j_.error()) << ss.str();
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the prepared object from the future */
|
||||
insert_ = cass_future_get_prepared(prepare_future);
|
||||
|
||||
/* The future can be freed immediately after getting the prepared
|
||||
* object
|
||||
*/
|
||||
cass_future_free(prepare_future);
|
||||
|
||||
query.str("");
|
||||
query << "SELECT object FROM " << tableName << " WHERE hash = ?";
|
||||
prepare_future =
|
||||
cass_session_prepare(session_.get(), query.str().c_str());
|
||||
|
||||
/* Wait for the statement to prepare and get the result */
|
||||
rc = cass_future_error_code(prepare_future);
|
||||
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
/* Handle error */
|
||||
cass_future_free(prepare_future);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << "nodestore: Error preparing select : " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
JLOG(j_.error()) << ss.str();
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the prepared object from the future */
|
||||
select_ = cass_future_get_prepared(prepare_future);
|
||||
|
||||
/* The future can be freed immediately after getting the prepared
|
||||
* object
|
||||
*/
|
||||
cass_future_free(prepare_future);
|
||||
setupPreparedStatements = true;
|
||||
}
|
||||
|
||||
work_.emplace(ioContext_);
|
||||
ioThread_ = std::thread{[this]() { ioContext_.run(); }};
|
||||
open_ = true;
|
||||
}
|
||||
|
||||
// Close the connection to the database
|
||||
void
|
||||
close() override
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
if (insert_)
|
||||
{
|
||||
cass_prepared_free(insert_);
|
||||
insert_ = nullptr;
|
||||
}
|
||||
if (select_)
|
||||
{
|
||||
cass_prepared_free(select_);
|
||||
select_ = nullptr;
|
||||
}
|
||||
work_.reset();
|
||||
ioThread_.join();
|
||||
}
|
||||
open_ = false;
|
||||
}
|
||||
|
||||
// Synchronously fetch the object with key key and store the result in pno
|
||||
// @param key the key of the object
|
||||
// @param pno object in which to store the result
|
||||
// @return result status of query
|
||||
Status
|
||||
fetch(void const* key, std::shared_ptr<NodeObject>* pno) override
|
||||
{
|
||||
JLOG(j_.trace()) << "Fetching from cassandra";
|
||||
CassStatement* statement = cass_prepared_bind(select_);
|
||||
cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM);
|
||||
CassError rc = cass_statement_bind_bytes(
|
||||
statement, 0, static_cast<cass_byte_t const*>(key), keyBytes_);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
cass_statement_free(statement);
|
||||
JLOG(j_.error()) << "Binding Cassandra fetch query: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
pno->reset();
|
||||
return backendError;
|
||||
}
|
||||
CassFuture* fut;
|
||||
do
|
||||
{
|
||||
fut = cass_session_execute(session_.get(), statement);
|
||||
rc = cass_future_error_code(fut);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "Cassandra fetch error";
|
||||
ss << ", retrying";
|
||||
++counters_.readRetries;
|
||||
ss << ": " << cass_error_desc(rc);
|
||||
JLOG(j_.warn()) << ss.str();
|
||||
}
|
||||
} while (rc != CASS_OK);
|
||||
|
||||
CassResult const* res = cass_future_get_result(fut);
|
||||
cass_statement_free(statement);
|
||||
cass_future_free(fut);
|
||||
|
||||
CassRow const* row = cass_result_first_row(res);
|
||||
if (!row)
|
||||
{
|
||||
cass_result_free(res);
|
||||
pno->reset();
|
||||
return notFound;
|
||||
}
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
rc = cass_value_get_bytes(cass_row_get_column(row, 0), &buf, &bufSize);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
cass_result_free(res);
|
||||
pno->reset();
|
||||
JLOG(j_.error()) << "Cassandra fetch result error: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
++counters_.readErrors;
|
||||
return backendError;
|
||||
}
|
||||
|
||||
nudb::detail::buffer bf;
|
||||
std::pair<void const*, std::size_t> uncompressed =
|
||||
nodeobject_decompress(buf, bufSize, bf);
|
||||
DecodedBlob decoded(key, uncompressed.first, uncompressed.second);
|
||||
cass_result_free(res);
|
||||
|
||||
if (!decoded.wasOk())
|
||||
{
|
||||
pno->reset();
|
||||
JLOG(j_.error()) << "Cassandra error decoding result: " << rc
|
||||
<< ", " << cass_error_desc(rc);
|
||||
++counters_.readErrors;
|
||||
return dataCorrupt;
|
||||
}
|
||||
*pno = decoded.createObject();
|
||||
return ok;
|
||||
}
|
||||
|
||||
struct ReadCallbackData
|
||||
{
|
||||
CassandraBackend& backend;
|
||||
const void* const key;
|
||||
std::shared_ptr<NodeObject>& result;
|
||||
std::condition_variable& cv;
|
||||
|
||||
std::atomic_uint32_t& numFinished;
|
||||
size_t batchSize;
|
||||
|
||||
ReadCallbackData(
|
||||
CassandraBackend& backend,
|
||||
const void* const key,
|
||||
std::shared_ptr<NodeObject>& result,
|
||||
std::condition_variable& cv,
|
||||
std::atomic_uint32_t& numFinished,
|
||||
size_t batchSize)
|
||||
: backend(backend)
|
||||
, key(key)
|
||||
, result(result)
|
||||
, cv(cv)
|
||||
, numFinished(numFinished)
|
||||
, batchSize(batchSize)
|
||||
{
|
||||
}
|
||||
|
||||
ReadCallbackData(ReadCallbackData const& other) = default;
|
||||
};
|
||||
|
||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
||||
{
|
||||
std::size_t const numHashes = hashes.size();
|
||||
JLOG(j_.trace()) << "Fetching " << numHashes
|
||||
<< " records from Cassandra";
|
||||
std::atomic_uint32_t numFinished = 0;
|
||||
std::condition_variable cv;
|
||||
std::mutex mtx;
|
||||
std::vector<std::shared_ptr<NodeObject>> results{numHashes};
|
||||
std::vector<std::shared_ptr<ReadCallbackData>> cbs;
|
||||
cbs.reserve(numHashes);
|
||||
for (std::size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
cbs.push_back(std::make_shared<ReadCallbackData>(
|
||||
*this,
|
||||
static_cast<void const*>(hashes[i]),
|
||||
results[i],
|
||||
cv,
|
||||
numFinished,
|
||||
numHashes));
|
||||
read(*cbs[i]);
|
||||
}
|
||||
assert(results.size() == cbs.size());
|
||||
|
||||
std::unique_lock<std::mutex> lck(mtx);
|
||||
cv.wait(lck, [&numFinished, &numHashes]() {
|
||||
return numFinished == numHashes;
|
||||
});
|
||||
|
||||
JLOG(j_.trace()) << "Fetched " << numHashes
|
||||
<< " records from Cassandra";
|
||||
return {results, ok};
|
||||
}
|
||||
|
||||
void
|
||||
read(ReadCallbackData& data)
|
||||
{
|
||||
CassStatement* statement = cass_prepared_bind(select_);
|
||||
cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM);
|
||||
CassError rc = cass_statement_bind_bytes(
|
||||
statement, 0, static_cast<cass_byte_t const*>(data.key), keyBytes_);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
size_t batchSize = data.batchSize;
|
||||
if (++(data.numFinished) == batchSize)
|
||||
data.cv.notify_all();
|
||||
cass_statement_free(statement);
|
||||
JLOG(j_.error()) << "Binding Cassandra fetch query: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
return;
|
||||
}
|
||||
|
||||
CassFuture* fut = cass_session_execute(session_.get(), statement);
|
||||
|
||||
cass_statement_free(statement);
|
||||
|
||||
cass_future_set_callback(fut, readCallback, static_cast<void*>(&data));
|
||||
cass_future_free(fut);
|
||||
}
|
||||
|
||||
struct WriteCallbackData
|
||||
{
|
||||
CassandraBackend* backend;
|
||||
// The shared pointer to the node object must exist until it's
|
||||
// confirmed persisted. Otherwise, it can become deleted
|
||||
// prematurely if other copies are removed from caches.
|
||||
std::shared_ptr<NodeObject> no;
|
||||
std::optional<NodeStore::EncodedBlob> e;
|
||||
std::pair<void const*, std::size_t> compressed;
|
||||
std::chrono::steady_clock::time_point begin;
|
||||
// The data is stored in this buffer. The void* in the above member
|
||||
// is a pointer into the below buffer
|
||||
nudb::detail::buffer bf;
|
||||
std::atomic<std::uint64_t>& totalWriteRetries;
|
||||
|
||||
uint32_t currentRetries = 0;
|
||||
|
||||
WriteCallbackData(
|
||||
CassandraBackend* f,
|
||||
std::shared_ptr<NodeObject> const& nobj,
|
||||
std::atomic<std::uint64_t>& retries)
|
||||
: backend(f), no(nobj), totalWriteRetries(retries)
|
||||
{
|
||||
e.emplace(no);
|
||||
|
||||
compressed =
|
||||
NodeStore::nodeobject_compress(e->getData(), e->getSize(), bf);
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
write(WriteCallbackData& data, bool isRetry)
|
||||
{
|
||||
{
|
||||
// We limit the total number of concurrent inflight writes. This is
|
||||
// a client side throttling to prevent overloading the database.
|
||||
// This is mostly useful when the very first ledger is being written
|
||||
// in full, which is several millions records. On sufficiently large
|
||||
// Cassandra clusters, this throttling is not needed; the default
|
||||
// value of maxRequestsOutstanding is 10 million, which is more
|
||||
// records than are present in any single ledger
|
||||
std::unique_lock<std::mutex> lck(throttleMutex_);
|
||||
if (!isRetry && numRequestsOutstanding_ > maxRequestsOutstanding)
|
||||
{
|
||||
JLOG(j_.trace()) << __func__ << " : "
|
||||
<< "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
++counters_.writesDelayed;
|
||||
throttleCv_.wait(lck, [this]() {
|
||||
return numRequestsOutstanding_ < maxRequestsOutstanding;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
CassStatement* statement = cass_prepared_bind(insert_);
|
||||
cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM);
|
||||
CassError rc = cass_statement_bind_bytes(
|
||||
statement,
|
||||
0,
|
||||
static_cast<cass_byte_t const*>(data.e->getKey()),
|
||||
keyBytes_);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
cass_statement_free(statement);
|
||||
std::stringstream ss;
|
||||
ss << "Binding cassandra insert hash: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
JLOG(j_.error()) << __func__ << " : " << ss.str();
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
rc = cass_statement_bind_bytes(
|
||||
statement,
|
||||
1,
|
||||
static_cast<cass_byte_t const*>(data.compressed.first),
|
||||
data.compressed.second);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
cass_statement_free(statement);
|
||||
std::stringstream ss;
|
||||
ss << "Binding cassandra insert object: " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
JLOG(j_.error()) << __func__ << " : " << ss.str();
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
data.begin = std::chrono::steady_clock::now();
|
||||
CassFuture* fut = cass_session_execute(session_.get(), statement);
|
||||
cass_statement_free(statement);
|
||||
|
||||
cass_future_set_callback(fut, writeCallback, static_cast<void*>(&data));
|
||||
cass_future_free(fut);
|
||||
}
|
||||
|
||||
void
|
||||
store(std::shared_ptr<NodeObject> const& no) override
|
||||
{
|
||||
JLOG(j_.trace()) << "Writing to cassandra";
|
||||
WriteCallbackData* data =
|
||||
new WriteCallbackData(this, no, counters_.writeRetries);
|
||||
|
||||
++numRequestsOutstanding_;
|
||||
write(*data, false);
|
||||
}
|
||||
|
||||
void
|
||||
storeBatch(Batch const& batch) override
|
||||
{
|
||||
for (auto const& no : batch)
|
||||
{
|
||||
store(no);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
sync() override
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(syncMutex_);
|
||||
|
||||
syncCv_.wait(lck, [this]() { return numRequestsOutstanding_ == 0; });
|
||||
}
|
||||
|
||||
// Iterate through entire table and execute f(). Used for import only,
|
||||
// with database not being written to, so safe to paginate through
|
||||
// objects table with LIMIT x OFFSET y.
|
||||
void
|
||||
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>("not implemented");
|
||||
}
|
||||
|
||||
int
|
||||
getWriteLoad() override
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
setDeletePath() override
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
fdRequired() const override
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::optional<Counters<std::uint64_t>>
|
||||
counters() const override
|
||||
{
|
||||
return counters_;
|
||||
}
|
||||
|
||||
friend void
|
||||
writeCallback(CassFuture* fut, void* cbData);
|
||||
|
||||
friend void
|
||||
readCallback(CassFuture* fut, void* cbData);
|
||||
};
|
||||
|
||||
// Process the result of an asynchronous read. Retry on error
|
||||
// @param fut cassandra future associated with the read
|
||||
// @param cbData struct that holds the request parameters
|
||||
void
|
||||
readCallback(CassFuture* fut, void* cbData)
|
||||
{
|
||||
CassandraBackend::ReadCallbackData& requestParams =
|
||||
*static_cast<CassandraBackend::ReadCallbackData*>(cbData);
|
||||
|
||||
CassError rc = cass_future_error_code(fut);
|
||||
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
++(requestParams.backend.counters_.readRetries);
|
||||
JLOG(requestParams.backend.j_.warn())
|
||||
<< "Cassandra fetch error : " << rc << " : " << cass_error_desc(rc)
|
||||
<< " - retrying";
|
||||
// Retry right away. The only time the cluster should ever be overloaded
|
||||
// is when the very first ledger is being written in full (millions of
|
||||
// writes at once), during which no reads should be occurring. If reads
|
||||
// are timing out, the code/architecture should be modified to handle
|
||||
// greater read load, as opposed to just exponential backoff
|
||||
requestParams.backend.read(requestParams);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto finish = [&requestParams]() {
|
||||
size_t batchSize = requestParams.batchSize;
|
||||
if (++(requestParams.numFinished) == batchSize)
|
||||
requestParams.cv.notify_all();
|
||||
};
|
||||
CassResult const* res = cass_future_get_result(fut);
|
||||
|
||||
CassRow const* row = cass_result_first_row(res);
|
||||
if (!row)
|
||||
{
|
||||
cass_result_free(res);
|
||||
JLOG(requestParams.backend.j_.error())
|
||||
<< "Cassandra fetch get row error : " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
finish();
|
||||
return;
|
||||
}
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
rc = cass_value_get_bytes(cass_row_get_column(row, 0), &buf, &bufSize);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
cass_result_free(res);
|
||||
JLOG(requestParams.backend.j_.error())
|
||||
<< "Cassandra fetch get bytes error : " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
++requestParams.backend.counters_.readErrors;
|
||||
finish();
|
||||
return;
|
||||
}
|
||||
nudb::detail::buffer bf;
|
||||
std::pair<void const*, std::size_t> uncompressed =
|
||||
nodeobject_decompress(buf, bufSize, bf);
|
||||
DecodedBlob decoded(
|
||||
requestParams.key, uncompressed.first, uncompressed.second);
|
||||
cass_result_free(res);
|
||||
|
||||
if (!decoded.wasOk())
|
||||
{
|
||||
JLOG(requestParams.backend.j_.fatal())
|
||||
<< "Cassandra fetch error - data corruption : " << rc << ", "
|
||||
<< cass_error_desc(rc);
|
||||
++requestParams.backend.counters_.readErrors;
|
||||
finish();
|
||||
return;
|
||||
}
|
||||
requestParams.result = decoded.createObject();
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
||||
// Process the result of an asynchronous write. Retry on error
|
||||
// @param fut cassandra future associated with the write
|
||||
// @param cbData struct that holds the request parameters
|
||||
void
|
||||
writeCallback(CassFuture* fut, void* cbData)
|
||||
{
|
||||
CassandraBackend::WriteCallbackData& requestParams =
|
||||
*static_cast<CassandraBackend::WriteCallbackData*>(cbData);
|
||||
CassandraBackend& backend = *requestParams.backend;
|
||||
auto rc = cass_future_error_code(fut);
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
JLOG(backend.j_.error())
|
||||
<< "ERROR!!! Cassandra insert error: " << rc << ", "
|
||||
<< cass_error_desc(rc) << ", retrying ";
|
||||
++requestParams.totalWriteRetries;
|
||||
// exponential backoff with a max wait of 2^10 ms (about 1 second)
|
||||
auto wait = std::chrono::milliseconds(
|
||||
lround(std::pow(2, std::min(10u, requestParams.currentRetries))));
|
||||
++requestParams.currentRetries;
|
||||
std::shared_ptr<boost::asio::steady_timer> timer =
|
||||
std::make_shared<boost::asio::steady_timer>(
|
||||
backend.ioContext_, std::chrono::steady_clock::now() + wait);
|
||||
timer->async_wait([timer, &requestParams, &backend](
|
||||
const boost::system::error_code& error) {
|
||||
backend.write(requestParams, true);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
backend.counters_.writeDurationUs +=
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
std::chrono::steady_clock::now() - requestParams.begin)
|
||||
.count();
|
||||
--(backend.numRequestsOutstanding_);
|
||||
|
||||
backend.throttleCv_.notify_all();
|
||||
if (backend.numRequestsOutstanding_ == 0)
|
||||
backend.syncCv_.notify_all();
|
||||
delete &requestParams;
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class CassandraFactory : public Factory
|
||||
{
|
||||
public:
|
||||
CassandraFactory()
|
||||
{
|
||||
Manager::instance().insert(*this);
|
||||
}
|
||||
|
||||
~CassandraFactory() override
|
||||
{
|
||||
Manager::instance().erase(*this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getName() const override
|
||||
{
|
||||
return "cassandra";
|
||||
}
|
||||
|
||||
std::unique_ptr<Backend>
|
||||
createInstance(
|
||||
size_t keyBytes,
|
||||
Section const& keyValues,
|
||||
std::size_t burstSize,
|
||||
Scheduler& scheduler,
|
||||
beast::Journal journal) override
|
||||
{
|
||||
return std::make_unique<CassandraBackend>(keyBytes, keyValues, journal);
|
||||
}
|
||||
};
|
||||
|
||||
static CassandraFactory cassandraFactory;
|
||||
|
||||
} // namespace NodeStore
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -373,15 +373,6 @@ Database::getCountsJson(Json::Value& obj)
|
||||
obj[jss::node_written_bytes] = std::to_string(storeSz_);
|
||||
obj[jss::node_read_bytes] = std::to_string(fetchSz_);
|
||||
obj[jss::node_reads_duration_us] = std::to_string(fetchDurationUs_);
|
||||
|
||||
if (auto c = getCounters())
|
||||
{
|
||||
obj[jss::node_read_errors] = std::to_string(c->readErrors);
|
||||
obj[jss::node_read_retries] = std::to_string(c->readRetries);
|
||||
obj[jss::node_write_retries] = std::to_string(c->writeRetries);
|
||||
obj[jss::node_writes_delayed] = std::to_string(c->writesDelayed);
|
||||
obj[jss::node_writes_duration_us] = std::to_string(c->writeDurationUs);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace NodeStore
|
||||
|
||||
@@ -156,12 +156,6 @@ private:
|
||||
{
|
||||
backend_->for_each(f);
|
||||
}
|
||||
|
||||
std::optional<Backend::Counters<std::uint64_t>>
|
||||
getCounters() const override
|
||||
{
|
||||
return backend_->counters();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace NodeStore
|
||||
|
||||
@@ -55,12 +55,6 @@ ManagerImp::make_Backend(
|
||||
auto factory{find(type)};
|
||||
if (!factory)
|
||||
{
|
||||
#ifndef RIPPLED_REPORTING
|
||||
if (boost::iequals(type, "cassandra"))
|
||||
Throw<std::runtime_error>(
|
||||
"To use Cassandra as a nodestore, build rippled with "
|
||||
"-Dreporting=ON");
|
||||
#endif
|
||||
missing_backend();
|
||||
}
|
||||
|
||||
|
||||
@@ -105,20 +105,10 @@ canHaveDeliveredAmount(
|
||||
{
|
||||
// These lambdas are used to compute the values lazily
|
||||
auto const getFix1623Enabled = [&context]() -> bool {
|
||||
if (context.app.config().reporting())
|
||||
{
|
||||
auto const view = context.ledgerMaster.getValidatedLedger();
|
||||
if (!view)
|
||||
return false;
|
||||
return view->rules().enabled(fix1623);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto const view = context.app.openLedger().current();
|
||||
if (!view)
|
||||
return false;
|
||||
return view->rules().enabled(fix1623);
|
||||
}
|
||||
auto const view = context.app.openLedger().current();
|
||||
if (!view)
|
||||
return false;
|
||||
return view->rules().enabled(fix1623);
|
||||
};
|
||||
|
||||
return canHaveDeliveredAmountHelp(
|
||||
|
||||
@@ -107,11 +107,7 @@ Handler const handlerArray[]{
|
||||
{"feature", byRef(&doFeature), Role::USER, NO_CONDITION},
|
||||
{"fee", byRef(&doFee), Role::USER, NEEDS_CURRENT_LEDGER},
|
||||
{"fetch_info", byRef(&doFetchInfo), Role::ADMIN, NO_CONDITION},
|
||||
#ifdef RIPPLED_REPORTING
|
||||
{"gateway_balances", byRef(&doGatewayBalances), Role::ADMIN, NO_CONDITION},
|
||||
#else
|
||||
{"gateway_balances", byRef(&doGatewayBalances), Role::USER, NO_CONDITION},
|
||||
#endif
|
||||
{"get_counts", byRef(&doGetCounts), Role::ADMIN, NO_CONDITION},
|
||||
{"get_aggregate_price",
|
||||
byRef(&doGetAggregatePrice),
|
||||
|
||||
@@ -81,22 +81,6 @@ template <class T>
|
||||
error_code_i
|
||||
conditionMet(Condition condition_required, T& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
{
|
||||
if (condition_required == NEEDS_CURRENT_LEDGER)
|
||||
{
|
||||
return rpcNO_CURRENT;
|
||||
}
|
||||
else if (condition_required == NEEDS_CLOSED_LEDGER)
|
||||
{
|
||||
return rpcNO_CLOSED;
|
||||
}
|
||||
else
|
||||
{
|
||||
return rpcSUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
if (context.app.getOPs().isAmendmentBlocked() &&
|
||||
(condition_required != NO_CONDITION))
|
||||
{
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include <xrpld/app/ledger/LedgerToJson.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/reporting/P2pProxy.h>
|
||||
#include <xrpld/core/Config.h>
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
#include <xrpld/net/InfoSub.h>
|
||||
@@ -206,11 +205,6 @@ callMethod(
|
||||
perfLog.rpcFinish(name, curId);
|
||||
return ret;
|
||||
}
|
||||
catch (ReportingShouldProxy&)
|
||||
{
|
||||
result = forwardToP2p(context);
|
||||
return rpcSUCCESS;
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
perfLog.rpcError(name, curId);
|
||||
@@ -226,36 +220,9 @@ callMethod(
|
||||
|
||||
} // namespace
|
||||
|
||||
void
|
||||
injectReportingWarning(RPC::JsonContext& context, Json::Value& result)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
{
|
||||
Json::Value warnings{Json::arrayValue};
|
||||
Json::Value& w = warnings.append(Json::objectValue);
|
||||
w[jss::id] = warnRPC_REPORTING;
|
||||
w[jss::message] =
|
||||
"This is a reporting server. "
|
||||
" The default behavior of a reporting server is to only"
|
||||
" return validated data. If you are looking for not yet"
|
||||
" validated data, include \"ledger_index : current\""
|
||||
" in your request, which will cause this server to forward"
|
||||
" the request to a p2p node. If the forward is successful"
|
||||
" the response will include \"forwarded\" : \"true\"";
|
||||
result[jss::warnings] = std::move(warnings);
|
||||
}
|
||||
}
|
||||
|
||||
Status
|
||||
doCommand(RPC::JsonContext& context, Json::Value& result)
|
||||
{
|
||||
if (shouldForwardToP2p(context))
|
||||
{
|
||||
result = forwardToP2p(context);
|
||||
injectReportingWarning(context, result);
|
||||
// this return value is ignored
|
||||
return rpcSUCCESS;
|
||||
}
|
||||
Handler const* handler = nullptr;
|
||||
if (auto error = fillHandler(context, handler))
|
||||
{
|
||||
@@ -285,7 +252,6 @@ doCommand(RPC::JsonContext& context, Json::Value& result)
|
||||
else
|
||||
{
|
||||
auto ret = callMethod(context, method, handler->name_, result);
|
||||
injectReportingWarning(context, result);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -415,9 +415,9 @@ getAccountNamespace(
|
||||
namespace {
|
||||
|
||||
bool
|
||||
isValidatedOld(LedgerMaster& ledgerMaster, bool standaloneOrReporting)
|
||||
isValidatedOld(LedgerMaster& ledgerMaster, bool standalone)
|
||||
{
|
||||
if (standaloneOrReporting)
|
||||
if (standalone)
|
||||
return false;
|
||||
|
||||
return ledgerMaster.getValidatedLedgerAge() > Tuning::maxValidatedLedgerAge;
|
||||
@@ -457,12 +457,10 @@ ledgerFromRequest(T& ledger, JsonContext& context)
|
||||
|
||||
auto const index = indexValue.asString();
|
||||
|
||||
if (index == "current" ||
|
||||
(index.empty() && !context.app.config().reporting()))
|
||||
if (index == "current" || index.empty())
|
||||
return getLedger(ledger, LedgerShortcut::CURRENT, context);
|
||||
|
||||
if (index == "validated" ||
|
||||
(index.empty() && context.app.config().reporting()))
|
||||
if (index == "validated")
|
||||
return getLedger(ledger, LedgerShortcut::VALIDATED, context);
|
||||
|
||||
if (index == "closed")
|
||||
@@ -528,13 +526,8 @@ ledgerFromSpecifier(
|
||||
[[fallthrough]];
|
||||
case LedgerCase::LEDGER_NOT_SET: {
|
||||
auto const shortcut = specifier.shortcut();
|
||||
// note, unspecified defaults to validated in reporting mode
|
||||
if (shortcut ==
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED ||
|
||||
(shortcut ==
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::
|
||||
SHORTCUT_UNSPECIFIED &&
|
||||
context.app.config().reporting()))
|
||||
org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED)
|
||||
{
|
||||
return getLedger(ledger, LedgerShortcut::VALIDATED, context);
|
||||
}
|
||||
@@ -591,9 +584,6 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context)
|
||||
ledger = context.ledgerMaster.getLedgerBySeq(ledgerIndex);
|
||||
if (ledger == nullptr)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return {rpcLGR_NOT_FOUND, "ledgerNotFound"};
|
||||
|
||||
auto cur = context.ledgerMaster.getCurrentLedger();
|
||||
if (cur->info().seq == ledgerIndex)
|
||||
{
|
||||
@@ -628,10 +618,7 @@ template <class T>
|
||||
Status
|
||||
getLedger(T& ledger, LedgerShortcut shortcut, Context& context)
|
||||
{
|
||||
if (isValidatedOld(
|
||||
context.ledgerMaster,
|
||||
context.app.config().standalone() ||
|
||||
context.app.config().reporting()))
|
||||
if (isValidatedOld(context.ledgerMaster, context.app.config().standalone()))
|
||||
{
|
||||
if (context.apiVersion == 1)
|
||||
return {rpcNO_NETWORK, "InsufficientNetworkMode"};
|
||||
@@ -654,10 +641,6 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context)
|
||||
{
|
||||
if (shortcut == LedgerShortcut::CURRENT)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return {
|
||||
rpcLGR_NOT_FOUND,
|
||||
"Reporting does not track current ledger"};
|
||||
auto cur = context.ledgerMaster.getCurrentLedger();
|
||||
|
||||
if constexpr (is_assignable_shared_ptr<
|
||||
@@ -671,9 +654,6 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context)
|
||||
}
|
||||
else if (shortcut == LedgerShortcut::CLOSED)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return {
|
||||
rpcLGR_NOT_FOUND, "Reporting does not track closed ledger"};
|
||||
ledger = context.ledgerMaster.getClosedLedger();
|
||||
assert(!ledger->open());
|
||||
}
|
||||
@@ -1162,9 +1142,6 @@ getAPIVersionNumber(Json::Value const& jv, bool betaEnabled)
|
||||
std::variant<std::shared_ptr<Ledger const>, Json::Value>
|
||||
getLedgerByContext(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
auto const hasHash = context.params.isMember(jss::ledger_hash);
|
||||
auto const hasIndex = context.params.isMember(jss::ledger_index);
|
||||
std::uint32_t ledgerIndex = 0;
|
||||
|
||||
@@ -827,11 +827,7 @@ transactionSign(
|
||||
if (!preprocResult.second)
|
||||
return preprocResult.first;
|
||||
|
||||
std::shared_ptr<const ReadView> ledger;
|
||||
if (app.config().reporting())
|
||||
ledger = app.getLedgerMaster().getValidatedLedger();
|
||||
else
|
||||
ledger = app.openLedger().current();
|
||||
std::shared_ptr<const ReadView> ledger = app.openLedger().current();
|
||||
// Make sure the STTx makes a legitimate Transaction.
|
||||
std::pair<Json::Value, Transaction::pointer> txn =
|
||||
transactionConstructImpl(preprocResult.second, ledger->rules(), app);
|
||||
|
||||
@@ -22,9 +22,7 @@
|
||||
#include <xrpld/app/misc/DeliverMax.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/misc/Transaction.h>
|
||||
#include <xrpld/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <xrpld/core/Pg.h>
|
||||
#include <xrpld/ledger/ReadView.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/DeliveredAmount.h>
|
||||
@@ -218,16 +216,6 @@ std::pair<AccountTxResult, RPC::Status>
|
||||
doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args)
|
||||
{
|
||||
context.loadType = Resource::feeMediumBurdenRPC;
|
||||
if (context.app.config().reporting())
|
||||
{
|
||||
auto const db = dynamic_cast<PostgresDatabase*>(
|
||||
&context.app.getRelationalDatabase());
|
||||
|
||||
if (!db)
|
||||
Throw<std::runtime_error>("Failed to get relational database");
|
||||
|
||||
return db->getAccountTx(args);
|
||||
}
|
||||
|
||||
AccountTxResult result;
|
||||
|
||||
@@ -391,8 +379,6 @@ populateJsonResponse(
|
||||
response[jss::marker][jss::ledger] = result.marker->ledgerSeq;
|
||||
response[jss::marker][jss::seq] = result.marker->txnSeq;
|
||||
}
|
||||
if (context.app.config().reporting())
|
||||
response["used_postgres"] = true;
|
||||
}
|
||||
|
||||
JLOG(context.j.debug()) << __func__ << " : finished";
|
||||
|
||||
@@ -34,9 +34,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doCanDelete(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return RPC::make_error(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
if (!context.app.getSHAMapStore().advisoryDelete())
|
||||
return RPC::make_error(rpcNOT_ENABLED);
|
||||
|
||||
|
||||
@@ -37,9 +37,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doConnect(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
if (context.app.config().standalone())
|
||||
return "cannot connect in standalone mode";
|
||||
|
||||
|
||||
@@ -30,9 +30,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doConsensusInfo(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
Json::Value ret(Json::objectValue);
|
||||
|
||||
ret[jss::info] = context.netOps.getConsensusInfo();
|
||||
|
||||
@@ -35,9 +35,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doFeature(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
if (context.params.isMember(jss::feature))
|
||||
{
|
||||
// ensure that the `feature` param is a string
|
||||
|
||||
@@ -30,9 +30,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doFetchInfo(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
Json::Value ret(Json::objectValue);
|
||||
|
||||
if (context.params.isMember(jss::clear) &&
|
||||
|
||||
@@ -71,7 +71,7 @@ getCountsJson(Application& app, int minObjectCount)
|
||||
ret[k] = v;
|
||||
}
|
||||
|
||||
if (!app.config().reporting() && app.config().useTxTables())
|
||||
if (app.config().useTxTables())
|
||||
{
|
||||
auto const db =
|
||||
dynamic_cast<SQLiteDatabase*>(&app.getRelationalDatabase());
|
||||
|
||||
@@ -36,7 +36,7 @@ doLedgerAccept(RPC::JsonContext& context)
|
||||
{
|
||||
Json::Value jvResult;
|
||||
|
||||
if (!context.app.config().standalone() || context.app.config().reporting())
|
||||
if (!context.app.config().standalone())
|
||||
{
|
||||
jvResult[jss::error] = "notStandAlone";
|
||||
}
|
||||
|
||||
@@ -40,8 +40,7 @@ LedgerHandler::check()
|
||||
{
|
||||
auto const& params = context_.params;
|
||||
bool needsLedger = params.isMember(jss::ledger) ||
|
||||
params.isMember(jss::ledger_hash) ||
|
||||
params.isMember(jss::ledger_index) || context_.app.config().reporting();
|
||||
params.isMember(jss::ledger_hash) || params.isMember(jss::ledger_index);
|
||||
if (!needsLedger)
|
||||
return Status::OK;
|
||||
|
||||
|
||||
@@ -29,9 +29,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doManifest(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
auto& params = context.params;
|
||||
|
||||
if (!params.isMember(jss::public_key))
|
||||
|
||||
@@ -31,9 +31,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doPeers(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
Json::Value jvResult(Json::objectValue);
|
||||
|
||||
jvResult[jss::peers] = context.app.overlay().json();
|
||||
|
||||
@@ -34,9 +34,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doPeerReservationsAdd(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
auto const& params = context.params;
|
||||
|
||||
if (!params.isMember(jss::public_key))
|
||||
@@ -90,9 +87,6 @@ doPeerReservationsAdd(RPC::JsonContext& context)
|
||||
Json::Value
|
||||
doPeerReservationsDel(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
auto const& params = context.params;
|
||||
|
||||
// We repeat much of the parameter parsing from `doPeerReservationsAdd`.
|
||||
@@ -120,9 +114,6 @@ doPeerReservationsDel(RPC::JsonContext& context)
|
||||
Json::Value
|
||||
doPeerReservationsList(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
auto const& reservations = context.app.peerReservations().list();
|
||||
// Enumerate the reservations in context.app.peerReservations()
|
||||
// as a Json::Value.
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/AmendmentTable.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/reporting/P2pProxy.h>
|
||||
#include <xrpld/rpc/detail/TransactionSign.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
#include <xrpl/json/json_writer.h>
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
|
||||
#include <xrpld/app/main/Application.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/reporting/P2pProxy.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/Role.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
@@ -49,15 +48,6 @@ doServerInfo(RPC::JsonContext& context)
|
||||
context.params.isMember(jss::counters) &&
|
||||
context.params[jss::counters].asBool());
|
||||
|
||||
if (context.app.config().reporting())
|
||||
{
|
||||
Json::Value const proxied = forwardToP2p(context);
|
||||
auto const lf = proxied[jss::result][jss::info][jss::load_factor];
|
||||
auto const vq = proxied[jss::result][jss::info][jss::validation_quorum];
|
||||
ret[jss::info][jss::validation_quorum] = vq.isNull() ? 1 : vq;
|
||||
ret[jss::info][jss::load_factor] = lf.isNull() ? 1 : lf;
|
||||
}
|
||||
|
||||
ret[jss::native_currency_code] = systemCurrencyCode();
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -129,8 +129,6 @@ doSubscribe(RPC::JsonContext& context)
|
||||
std::string streamName = it.asString();
|
||||
if (streamName == "server")
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
context.netOps.subServer(
|
||||
ispSub, jvResult, context.role == Role::ADMIN);
|
||||
}
|
||||
@@ -162,16 +160,12 @@ doSubscribe(RPC::JsonContext& context)
|
||||
}
|
||||
else if (streamName == "peer_status")
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
if (context.role != Role::ADMIN)
|
||||
return rpcError(rpcNO_PERMISSION);
|
||||
context.netOps.subPeerStatus(ispSub);
|
||||
}
|
||||
else if (streamName == "consensus")
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
context.netOps.subConsensus(ispSub);
|
||||
}
|
||||
else
|
||||
|
||||
@@ -70,128 +70,9 @@ struct TxArgs
|
||||
std::optional<std::pair<uint32_t, uint32_t>> ledgerRange;
|
||||
};
|
||||
|
||||
std::pair<TxResult, RPC::Status>
|
||||
doTxPostgres(RPC::Context& context, TxArgs const& args)
|
||||
{
|
||||
if (!context.app.config().reporting())
|
||||
{
|
||||
assert(false);
|
||||
Throw<std::runtime_error>(
|
||||
"Called doTxPostgres yet not in reporting mode");
|
||||
}
|
||||
|
||||
TxResult res;
|
||||
res.searchedAll = TxSearched::unknown;
|
||||
|
||||
if (!args.hash)
|
||||
return {
|
||||
res,
|
||||
{rpcNOT_IMPL,
|
||||
"Use of CTIDs on reporting mode is not currently supported."}};
|
||||
|
||||
JLOG(context.j.debug()) << "Fetching from postgres";
|
||||
Transaction::Locator locator =
|
||||
Transaction::locate(*(args.hash), context.app);
|
||||
|
||||
std::pair<std::shared_ptr<STTx const>, std::shared_ptr<STObject const>>
|
||||
pair;
|
||||
// database returned the nodestore hash. Fetch the txn directly from the
|
||||
// nodestore. Don't traverse the transaction SHAMap
|
||||
if (locator.isFound())
|
||||
{
|
||||
auto start = std::chrono::system_clock::now();
|
||||
if (auto obj = context.app.getNodeFamily().db().fetchNodeObject(
|
||||
locator.getNodestoreHash(), locator.getLedgerSequence()))
|
||||
{
|
||||
auto node = SHAMapTreeNode::makeFromPrefix(
|
||||
makeSlice(obj->getData()),
|
||||
SHAMapHash{locator.getNodestoreHash()});
|
||||
if (!node)
|
||||
{
|
||||
assert(false);
|
||||
return {res, {rpcINTERNAL, "Error making SHAMap node"}};
|
||||
}
|
||||
auto item = (static_cast<SHAMapLeafNode*>(node.get()))->peekItem();
|
||||
if (!item)
|
||||
{
|
||||
assert(false);
|
||||
return {res, {rpcINTERNAL, "Error reading SHAMap node"}};
|
||||
}
|
||||
|
||||
auto [sttx, meta] = deserializeTxPlusMeta(*item);
|
||||
JLOG(context.j.debug()) << "Successfully fetched from db";
|
||||
|
||||
if (!sttx || !meta)
|
||||
{
|
||||
assert(false);
|
||||
return {res, {rpcINTERNAL, "Error deserializing SHAMap node"}};
|
||||
}
|
||||
std::string reason;
|
||||
res.txn = std::make_shared<Transaction>(sttx, reason, context.app);
|
||||
res.txn->setLedger(locator.getLedgerSequence());
|
||||
res.txn->setStatus(COMMITTED);
|
||||
if (args.binary)
|
||||
{
|
||||
SerialIter it(item->slice());
|
||||
it.skip(it.getVLDataLength()); // skip transaction
|
||||
Blob blob = it.getVL();
|
||||
res.meta = std::move(blob);
|
||||
}
|
||||
else
|
||||
{
|
||||
res.meta = std::make_shared<TxMeta>(
|
||||
*(args.hash), res.txn->getLedger(), *meta);
|
||||
}
|
||||
res.validated = true;
|
||||
|
||||
auto const ledgerInfo =
|
||||
context.app.getRelationalDatabase().getLedgerInfoByIndex(
|
||||
locator.getLedgerSequence());
|
||||
res.closeTime = ledgerInfo->closeTime;
|
||||
res.ledgerHash = ledgerInfo->hash;
|
||||
|
||||
return {res, rpcSUCCESS};
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(context.j.error()) << "Failed to fetch from db";
|
||||
assert(false);
|
||||
return {res, {rpcINTERNAL, "Containing SHAMap node not found"}};
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
JLOG(context.j.debug()) << "tx flat fetch time : "
|
||||
<< ((end - start).count() / 1000000000.0);
|
||||
}
|
||||
// database did not find the transaction, and returned the ledger range
|
||||
// that was searched
|
||||
else
|
||||
{
|
||||
if (args.ledgerRange)
|
||||
{
|
||||
auto range = locator.getLedgerRangeSearched();
|
||||
auto min = args.ledgerRange->first;
|
||||
auto max = args.ledgerRange->second;
|
||||
if (min >= range.lower() && max <= range.upper())
|
||||
{
|
||||
res.searchedAll = TxSearched::all;
|
||||
}
|
||||
else
|
||||
{
|
||||
res.searchedAll = TxSearched::some;
|
||||
}
|
||||
}
|
||||
return {res, rpcTXN_NOT_FOUND};
|
||||
}
|
||||
// database didn't return anything. This shouldn't happen
|
||||
assert(false);
|
||||
return {res, {rpcINTERNAL, "unexpected Postgres response"}};
|
||||
}
|
||||
|
||||
std::pair<TxResult, RPC::Status>
|
||||
doTxHelp(RPC::Context& context, TxArgs args)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return doTxPostgres(context, args);
|
||||
TxResult result;
|
||||
|
||||
ClosedInterval<uint32_t> range;
|
||||
@@ -351,7 +232,7 @@ populateJsonResponse(
|
||||
}
|
||||
|
||||
// Note, result.ledgerHash is only set in a closed or validated
|
||||
// ledger - as seen in `doTxHelp` and `doTxPostgres`
|
||||
// ledger - as seen in `doTxHelp`
|
||||
if (result.ledgerHash)
|
||||
response[jss::ledger_hash] = to_string(*result.ledgerHash);
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include <xrpld/app/misc/Transaction.h>
|
||||
#include <xrpld/app/rdb/RelationalDatabase.h>
|
||||
#include <xrpld/core/DatabaseCon.h>
|
||||
#include <xrpld/core/Pg.h>
|
||||
#include <xrpld/core/SociDB.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/Role.h>
|
||||
@@ -60,8 +59,6 @@ doTxHistory(RPC::JsonContext& context)
|
||||
Json::Value obj;
|
||||
Json::Value& txs = obj[jss::txs];
|
||||
obj[jss::index] = startIndex;
|
||||
if (context.app.config().reporting())
|
||||
obj["used_postgres"] = true;
|
||||
|
||||
for (auto const& t : trans)
|
||||
{
|
||||
|
||||
@@ -29,8 +29,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doUnlList(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
Json::Value obj(Json::objectValue);
|
||||
|
||||
context.app.validators().for_each_listed(
|
||||
|
||||
@@ -28,9 +28,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doValidatorListSites(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
return context.app.validatorSites().getJson();
|
||||
}
|
||||
|
||||
|
||||
@@ -28,9 +28,6 @@ namespace ripple {
|
||||
Json::Value
|
||||
doValidators(RPC::JsonContext& context)
|
||||
{
|
||||
if (context.app.config().reporting())
|
||||
return rpcError(rpcREPORTING_UNSUPPORTED);
|
||||
|
||||
return context.app.validators().getJson();
|
||||
}
|
||||
|
||||
|
||||
@@ -65,8 +65,6 @@ public:
|
||||
sweep() = 0;
|
||||
|
||||
/** Acquire ledger that has a missing node by ledger sequence
|
||||
*
|
||||
* Throw if in reporting mode.
|
||||
*
|
||||
* @param refNum Sequence of ledger to acquire.
|
||||
* @param nodeHash Hash of missing node to report in throw.
|
||||
|
||||
@@ -69,14 +69,6 @@ void
|
||||
NodeFamily::missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash)
|
||||
{
|
||||
JLOG(j_.error()) << "Missing node in " << seq;
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "Node not read, likely a Cassandra error in ledger seq " << seq
|
||||
<< " object hash " << nodeHash;
|
||||
Throw<std::runtime_error>(ss.str());
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> lock(maxSeqMutex_);
|
||||
if (maxSeq_ == 0)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user