Compare commits

..

10 Commits

Author SHA1 Message Date
Denis Angell
8e4d75ec39 fix local building 2025-03-28 14:21:53 +01:00
Niq Dudfield
0b675465b4 Fix ServerDefinitions_test regression intro in #475 (#477) 2025-03-19 12:32:27 +10:00
Niq Dudfield
d088ad61a9 Prevent dangling reference in getHash() (#475)
Replace temporary uint256 with static variable when returning fallback hash
to avoid returning a const reference to a local temporary object.
2025-03-18 18:37:18 +10:00
Niq Dudfield
ef77b02d7f CI Release Builder (#455) 2025-03-11 13:19:28 +01:00
RichardAH
7385828983 Touch Amendment (#294) 2025-03-06 08:25:42 +01:00
Niq Dudfield
88b01514c1 fix: remove negative rate test failing on MacOS (#452) 2025-03-03 13:12:13 +01:00
Denis Angell
aeece15096 [fix] github runner (#451)
Co-authored-by: Niq Dudfield <ndudfield@gmail.com>
2025-03-03 09:55:51 +01:00
tequ
89cacb1258 Enhance shell script error handling and debugging on GHA (#447) 2025-02-24 10:33:21 +01:00
tequ
8ccff44e8c Fix Error handling on build action (#412) 2025-02-24 18:16:21 +10:00
tequ
420240a2ab Fixed not to use a large fixed range in the magic_enum. (#436) 2025-02-24 17:46:42 +10:00
29 changed files with 1833 additions and 2850 deletions

View File

@@ -2,37 +2,104 @@ name: Build using Docker
on:
push:
branches: [ "dev", "candidate", "release", "jshooks" ]
branches: ["dev", "candidate", "release", "jshooks"]
pull_request:
branches: [ "dev", "candidate", "release", "jshooks" ]
branches: ["dev", "candidate", "release", "jshooks"]
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: false
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP: 1
jobs:
checkout:
runs-on: [self-hosted, vanity]
outputs:
checkout_path: ${{ steps.vars.outputs.checkout_path }}
steps:
- uses: actions/checkout@v4
with:
clean: false
- name: Prepare checkout path
id: vars
run: |
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | sed -e 's/[^a-zA-Z0-9._-]/-/g')
CHECKOUT_PATH="${SAFE_BRANCH}-${{ github.sha }}"
echo "checkout_path=${CHECKOUT_PATH}" >> "$GITHUB_OUTPUT"
- uses: actions/checkout@v4
with:
path: ${{ steps.vars.outputs.checkout_path }}
clean: true
fetch-depth: 2 # Only get the last 2 commits, to avoid fetching all history
checkpatterns:
runs-on: [self-hosted, vanity]
needs: checkout
defaults:
run:
working-directory: ${{ needs.checkout.outputs.checkout_path }}
steps:
- name: Check for suspicious patterns
run: /bin/bash suspicious_patterns.sh
- name: Check for suspicious patterns
run: /bin/bash suspicious_patterns.sh
build:
runs-on: [self-hosted, vanity]
needs: checkpatterns
needs: [checkpatterns, checkout]
defaults:
run:
working-directory: ${{ needs.checkout.outputs.checkout_path }}
steps:
- name: Build using Docker
run: /bin/bash release-builder.sh
- name: Set Cleanup Script Path
run: |
echo "JOB_CLEANUP_SCRIPT=$(mktemp)" >> $GITHUB_ENV
- name: Build using Docker
run: /bin/bash release-builder.sh
- name: Stop Container (Cleanup)
if: always()
run: |
echo "Running cleanup script: $JOB_CLEANUP_SCRIPT"
/bin/bash -e -x "$JOB_CLEANUP_SCRIPT"
CLEANUP_EXIT_CODE=$?
if [[ "$CLEANUP_EXIT_CODE" -eq 0 ]]; then
echo "Cleanup script succeeded."
rm -f "$JOB_CLEANUP_SCRIPT"
echo "Cleanup script removed."
else
echo "⚠️ Cleanup script failed! Keeping for debugging: $JOB_CLEANUP_SCRIPT"
fi
if [[ "${DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP}" == "1" ]]; then
echo "🔍 Checking for leftover containers..."
BUILD_CONTAINERS=$(docker ps --format '{{.Names}}' | grep '^xahaud_cached_builder' || echo "")
if [[ -n "$BUILD_CONTAINERS" ]]; then
echo "⚠️ WARNING: Some build containers are still running"
echo "$BUILD_CONTAINERS"
else
echo "✅ No build containers found"
fi
fi
tests:
runs-on: [self-hosted, vanity]
needs: build
needs: [build, checkout]
defaults:
run:
working-directory: ${{ needs.checkout.outputs.checkout_path }}
steps:
- name: Unit tests
run: /bin/bash docker-unit-tests.sh
- name: Unit tests
run: /bin/bash docker-unit-tests.sh
cleanup:
runs-on: [self-hosted, vanity]
needs: [tests, checkout]
if: always()
steps:
- name: Cleanup workspace
run: |
CHECKOUT_PATH="${{ needs.checkout.outputs.checkout_path }}"
echo "Cleaning workspace for ${CHECKOUT_PATH}"
rm -rf "${{ github.workspace }}/${CHECKOUT_PATH}"

View File

@@ -1,48 +0,0 @@
# - Find MySQL
find_path(MYSQL_INCLUDE_DIR
NAMES mysql.h
PATHS
/usr/include/mysql
/usr/local/include/mysql
/opt/mysql/mysql/include
DOC "MySQL include directory"
)
find_library(MYSQL_LIBRARY
NAMES mysqlclient
PATHS
/usr/lib
/usr/lib/x86_64-linux-gnu
/usr/lib/mysql
/usr/local/lib/mysql
/opt/mysql/mysql/lib
DOC "MySQL client library"
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(MySQL
REQUIRED_VARS
MYSQL_LIBRARY
MYSQL_INCLUDE_DIR
)
if(MYSQL_FOUND)
set(MYSQL_INCLUDE_DIRS ${MYSQL_INCLUDE_DIR})
set(MYSQL_LIBRARIES ${MYSQL_LIBRARY})
# Create an imported target
if(NOT TARGET MySQL::MySQL)
add_library(MySQL::MySQL UNKNOWN IMPORTED)
set_target_properties(MySQL::MySQL PROPERTIES
IMPORTED_LOCATION "${MYSQL_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${MYSQL_INCLUDE_DIR}"
)
endif()
mark_as_advanced(MYSQL_INCLUDE_DIR MYSQL_LIBRARY)
else()
message(FATAL_ERROR "Could not find MySQL development files")
endif()
message(STATUS "Using MySQL include dir: ${MYSQL_INCLUDE_DIR}")
message(STATUS "Using MySQL library: ${MYSQL_LIBRARY}")

View File

@@ -540,7 +540,6 @@ target_sources (rippled PRIVATE
#]===============================]
src/ripple/nodestore/backend/CassandraFactory.cpp
src/ripple/nodestore/backend/RWDBFactory.cpp
src/ripple/nodestore/backend/MySQLFactory.cpp
src/ripple/nodestore/backend/MemoryFactory.cpp
src/ripple/nodestore/backend/FlatmapFactory.cpp
src/ripple/nodestore/backend/NuDBFactory.cpp
@@ -756,6 +755,7 @@ if (tests)
src/test/app/Taker_test.cpp
src/test/app/TheoreticalQuality_test.cpp
src/test/app/Ticket_test.cpp
src/test/app/Touch_test.cpp
src/test/app/Transaction_ordering_test.cpp
src/test/app/TrustAndBalance_test.cpp
src/test/app/TxQ_test.cpp

View File

@@ -1,56 +0,0 @@
#[===================================================================[
dep: MySQL
MySQL client library integration for rippled (static linking)
#]===================================================================]
# Create an IMPORTED target for MySQL
add_library(mysql_client UNKNOWN IMPORTED)
# Find MySQL client library and headers
find_path(MYSQL_INCLUDE_DIR
NAMES mysql.h
PATHS
/usr/include/mysql
/usr/local/include/mysql
/opt/mysql/mysql/include
DOC "MySQL include directory"
)
# Modified to specifically look for static library
find_library(MYSQL_LIBRARY
NAMES libmysqlclient.a mysqlclient.a # Look for static libraries first
PATHS
/usr/lib
/usr/lib/x86_64-linux-gnu
/usr/lib/mysql
/usr/local/lib/mysql
/opt/mysql/mysql/lib
DOC "MySQL client static library"
NO_DEFAULT_PATH # Prevents finding dynamic library first
)
# Set properties on the imported target
if(MYSQL_INCLUDE_DIR AND MYSQL_LIBRARY)
set_target_properties(mysql_client PROPERTIES
IMPORTED_LOCATION "${MYSQL_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${MYSQL_INCLUDE_DIR}"
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" # Added for static linking
IMPORTED_LINK_INTERFACE_MULTIPLICITY "1" # Added for static linking
)
message(STATUS "Found MySQL include dir: ${MYSQL_INCLUDE_DIR}")
message(STATUS "Found MySQL library: ${MYSQL_LIBRARY}")
else()
message(FATAL_ERROR "Could not find MySQL static development files. Please install libmysqlclient-dev")
endif()
# Add MySQL backend source to rippled sources
list(APPEND rippled_src
src/ripple/nodestore/backend/MySQLBackend.cpp)
# Link MySQL to rippled
target_link_libraries(ripple_libs
INTERFACE
mysql_client
)
# Create an alias target for consistency with other deps
add_library(deps::mysql ALIAS mysql_client)

View File

@@ -75,7 +75,6 @@ include(deps/gRPC)
include(deps/cassandra)
include(deps/Postgres)
include(deps/WasmEdge)
include(deps/MySQL)
###

View File

@@ -1,4 +1,11 @@
#!/bin/bash
#!/bin/bash -u
# We use set -e and bash with -u to bail on first non zero exit code of any
# processes launched or upon any unbound variable.
# We use set -x to print commands before running them to help with
# debugging.
set -ex
set -e
echo "START INSIDE CONTAINER - CORE"
@@ -23,7 +30,7 @@ fi
perl -i -pe "s/^(\\s*)-DBUILD_SHARED_LIBS=OFF/\\1-DBUILD_SHARED_LIBS=OFF\\n\\1-DROCKSDB_BUILD_SHARED=OFF/g" Builds/CMake/deps/Rocksdb.cmake &&
mv Builds/CMake/deps/WasmEdge.cmake Builds/CMake/deps/WasmEdge.old &&
echo "find_package(LLVM REQUIRED CONFIG)
message(STATUS \"Found LLVM ${LLVM_PACKAGE_VERSION}\")
message(STATUS \"Found LLVM \${LLVM_PACKAGE_VERSION}\")
message(STATUS \"Using LLVMConfig.cmake in: \${LLVM_DIR}\")
add_library (wasmedge STATIC IMPORTED GLOBAL)
set_target_properties(wasmedge PROPERTIES IMPORTED_LOCATION \${WasmEdge_LIB})

View File

@@ -1,4 +1,11 @@
#!/bin/bash
#!/bin/bash -u
# We use set -e and bash with -u to bail on first non zero exit code of any
# processes launched or upon any unbound variable.
# We use set -x to print commands before running them to help with
# debugging.
set -ex
set -e
echo "START INSIDE CONTAINER - FULL"
@@ -19,7 +26,7 @@ yum-config-manager --disable centos-sclo-sclo
####
cd /io;
mkdir src/certs;
mkdir -p src/certs;
curl --silent -k https://raw.githubusercontent.com/RichardAH/rippled-release-builder/main/ca-bundle/certbundle.h -o src/certs/certbundle.h;
if [ "`grep certbundle.h src/ripple/net/impl/RegisterSSLCerts.cpp | wc -l`" -eq "0" ]
then
@@ -66,17 +73,10 @@ then
#endif/g" src/ripple/net/impl/RegisterSSLCerts.cpp &&
sed -i "s/#include <ripple\/net\/RegisterSSLCerts.h>/\0\n#include <certs\/certbundle.h>/g" src/ripple/net/impl/RegisterSSLCerts.cpp
fi
mkdir .nih_c;
mkdir .nih_toolchain;
mkdir -p .nih_c;
mkdir -p .nih_toolchain;
cd .nih_toolchain &&
(cat > /etc/yum.repos.d/MariaDB.repo << EOF
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.5/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
EOF ) &&
yum install -y wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-10-binutils zlib-static ncurses-static MariaDB-devel MariaDB-shared -y \
yum install -y wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-10-binutils zlib-static ncurses-static -y \
devtoolset-7-gcc-c++ \
devtoolset-9-gcc-c++ \
devtoolset-10-gcc-c++ \
@@ -122,7 +122,7 @@ tar -xf libunwind-13.0.1.src.tar.xz &&
cp -r libunwind-13.0.1.src/include libunwind-13.0.1.src/src lld-13.0.1.src/ &&
cd lld-13.0.1.src &&
rm -rf build CMakeCache.txt &&
mkdir build &&
mkdir -p build &&
cd build &&
cmake .. -DLLVM_LIBRARY_DIR=/usr/lib64/llvm13/lib/ -DCMAKE_INSTALL_PREFIX=/usr/lib64/llvm13/ -DCMAKE_BUILD_TYPE=Release &&
make -j$3 install &&
@@ -132,7 +132,7 @@ cd ../../ &&
echo "-- Build WasmEdge --" &&
( wget -nc -q https://github.com/WasmEdge/WasmEdge/archive/refs/tags/0.11.2.zip; unzip -o 0.11.2.zip; ) &&
cd WasmEdge-0.11.2 &&
( mkdir build; echo "" ) &&
( mkdir -p build; echo "" ) &&
cd build &&
export BOOST_ROOT="/usr/local/src/boost_1_86_0" &&
export Boost_LIBRARY_DIRS="/usr/local/lib" &&

View File

@@ -1,4 +1,4 @@
#!/bin/bash
echo "Mounting $(pwd)/io in ubuntu and running unit tests"
docker run --rm -i -v $(pwd):/io ubuntu sh -c '/io/release-build/xahaud -u'

View File

@@ -1,4 +1,9 @@
#!/bin/bash
#!/bin/bash -u
# We use set -e and bash with -u to bail on first non zero exit code of any
# processes launched or upon any unbound variable.
# We use set -x to print commands before running them to help with
# debugging.
set -ex
echo "START BUILDING (HOST)"
@@ -6,13 +11,37 @@ echo "Cleaning previously built binary"
rm -f release-build/xahaud
BUILD_CORES=$(echo "scale=0 ; `nproc` / 1.337" | bc)
GITHUB_REPOSITORY=${GITHUB_REPOSITORY:-""}
GITHUB_SHA=${GITHUB_SHA:-"local"}
GITHUB_RUN_NUMBER=${GITHUB_RUN_NUMBER:-"0"}
GITHUB_WORKFLOW=${GITHUB_WORKFLOW:-"local"}
GITHUB_REF=${GITHUB_REF:-"local"}
if [[ "$GITHUB_REPOSITORY" == "" ]]; then
#Default
BUILD_CORES=8
fi
CONTAINER_NAME=xahaud_cached_builder_$(echo "$GITHUB_ACTOR" | awk '{print tolower($0)}')
EXIT_IF_CONTAINER_RUNNING=${EXIT_IF_CONTAINER_RUNNING:-1}
# Ensure still works outside of GH Actions by setting these to /dev/null
# GA will run this script and then delete it at the end of the job
JOB_CLEANUP_SCRIPT=${JOB_CLEANUP_SCRIPT:-/dev/null}
NORMALIZED_WORKFLOW=$(echo "$GITHUB_WORKFLOW" | tr -c 'a-zA-Z0-9' '-')
NORMALIZED_REF=$(echo "$GITHUB_REF" | tr -c 'a-zA-Z0-9' '-')
CONTAINER_NAME="xahaud_cached_builder_${NORMALIZED_WORKFLOW}-${NORMALIZED_REF}"
# Check if the container is already running
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
echo "⚠️ A running container (${CONTAINER_NAME}) was detected."
if [[ "$EXIT_IF_CONTAINER_RUNNING" -eq 1 ]]; then
echo "❌ EXIT_IF_CONTAINER_RUNNING is set. Exiting."
exit 1
else
echo "🛑 Stopping the running container: ${CONTAINER_NAME}"
docker stop "${CONTAINER_NAME}"
fi
fi
echo "-- BUILD CORES: $BUILD_CORES"
echo "-- GITHUB_REPOSITORY: $GITHUB_REPOSITORY"
@@ -55,6 +84,8 @@ else
# GH Action, runner
echo "GH Action, runner, clean & re-create create persistent container"
docker rm -f $CONTAINER_NAME
echo "echo 'Stopping container: $CONTAINER_NAME'" >> "$JOB_CLEANUP_SCRIPT"
echo "docker stop --time=15 \"$CONTAINER_NAME\" || echo 'Failed to stop container or container not running'" >> "$JOB_CLEANUP_SCRIPT"
docker run -di --user 0:$(id -g) --name $CONTAINER_NAME -v /data/builds:/data/builds -v `pwd`:/io --network host ghcr.io/foobarwidget/holy-build-box-x64 /hbb_exe/activate-exec bash
docker exec -i $CONTAINER_NAME /hbb_exe/activate-exec bash -x /io/build-full.sh "$GITHUB_REPOSITORY" "$GITHUB_SHA" "$BUILD_CORES" "$GITHUB_RUN_NUMBER"
docker stop $CONTAINER_NAME

File diff suppressed because it is too large Load Diff

View File

@@ -20,7 +20,6 @@
#include <ripple/app/main/Application.h>
#include <ripple/app/rdb/RelationalDatabase.h>
#include <ripple/app/rdb/backend/FlatmapDatabase.h>
#include <ripple/app/rdb/backend/MySQLDatabase.h>
#include <ripple/app/rdb/backend/RWDBDatabase.h>
#include <ripple/core/ConfigSections.h>
#include <ripple/nodestore/DatabaseShard.h>
@@ -43,7 +42,6 @@ RelationalDatabase::init(
bool use_postgres = false;
bool use_rwdb = false;
bool use_flatmap = false;
bool use_mysql = false;
if (config.reporting())
{
@@ -66,10 +64,6 @@ RelationalDatabase::init(
{
use_flatmap = true;
}
else if (boost::iequals(get(rdb_section, "backend"), "mysql"))
{
use_mysql = true;
}
else
{
Throw<std::runtime_error>(
@@ -99,10 +93,6 @@ RelationalDatabase::init(
{
return getFlatmapDatabase(app, config, jobQueue);
}
else if (use_mysql)
{
return getMySQLDatabase(app, config, jobQueue);
}
return std::unique_ptr<RelationalDatabase>();
}

View File

@@ -1079,6 +1079,24 @@ Transactor::checkMultiSign(PreclaimContext const& ctx)
//------------------------------------------------------------------------------
// increment the touch counter on an account
static void
touchAccount(ApplyView& view, AccountID const& id)
{
if (!view.rules().enabled(featureTouch))
return;
std::shared_ptr<SLE> sle = view.peek(keylet::account(id));
if (!sle)
return;
uint64_t tc =
sle->isFieldPresent(sfTouchCount) ? sle->getFieldU64(sfTouchCount) : 0;
sle->setFieldU64(sfTouchCount, tc + 1);
view.update(sle);
}
static void
removeUnfundedOffers(
ApplyView& view,
@@ -1519,6 +1537,8 @@ Transactor::doTSH(
if ((!canRollback && strong) || (canRollback && !strong))
continue;
touchAccount(view, tshAccountID);
auto klTshHook = keylet::hook(tshAccountID);
auto tshHook = view.read(klTshHook);

View File

@@ -36,8 +36,6 @@ using IniFileSections = std::map<std::string, std::vector<std::string>>;
//------------------------------------------------------------------------------
class Config;
/** Holds a collection of configuration values.
A configuration file contains zero or more sections.
*/
@@ -50,22 +48,11 @@ private:
std::vector<std::string> values_;
bool had_trailing_comments_ = false;
Config const* parent_;
using const_iterator = decltype(lookup_)::const_iterator;
public:
// throws if no parent for this section
Config const&
getParent() const
{
if (!parent_)
Throw<std::runtime_error>("No parent_ for config section");
return *parent_;
}
/** Create an empty section. */
explicit Section(std::string const& name = "", Config* parent = nullptr);
explicit Section(std::string const& name = "");
/** Returns the name of this section. */
std::string const&
@@ -231,8 +218,6 @@ private:
std::map<std::string, Section, boost::beast::iless> map_;
public:
virtual ~BasicConfig() = default;
/** Returns `true` if a section with the given name exists. */
bool
exists(std::string const& name) const;

View File

@@ -24,10 +24,7 @@
namespace ripple {
class Config;
Section::Section(std::string const& name, Config* parent)
: name_(name), parent_(parent)
Section::Section(std::string const& name) : name_(name)
{
}
@@ -178,14 +175,12 @@ BasicConfig::legacy(std::string const& sectionName) const
void
BasicConfig::build(IniFileSections const& ifs)
{
Config* config_this = dynamic_cast<Config*>(this);
for (auto const& entry : ifs)
{
auto const result = map_.emplace(
std::piecewise_construct,
std::make_tuple(entry.first),
std::make_tuple(
entry.first, config_this)); // Will be nullptr if cast failed
std::make_tuple(entry.first));
result.first->second.append(entry.second);
}
}

View File

@@ -175,17 +175,6 @@ public:
// Network parameters
uint32_t NETWORK_ID = 0;
struct MysqlSettings
{
std::string host;
std::string user;
std::string pass;
std::string name;
uint16_t port;
};
std::optional<MysqlSettings> mysql;
// DEPRECATED - Fee units for a reference transction.
// Only provided for backwards compatibility in a couple of places
static constexpr std::uint32_t FEE_UNITS_DEPRECATED = 10;

View File

@@ -102,7 +102,6 @@ struct ConfigSection
#define SECTION_NETWORK_ID "network_id"
#define SECTION_IMPORT_VL_KEYS "import_vl_keys"
#define SECTION_DATAGRAM_MONITOR "datagram_monitor"
#define SECTION_MYSQL_SETTINGS "mysql_settings"
} // namespace ripple

View File

@@ -756,30 +756,6 @@ Config::loadFromString(std::string const& fileContents)
SERVER_DOMAIN = strTemp;
}
if (exists(SECTION_MYSQL_SETTINGS))
{
auto const sec = section(SECTION_MYSQL_SETTINGS);
if (!sec.exists("host") || !sec.exists("user") || !sec.exists("pass") ||
!sec.exists("port") || !sec.exists("name"))
{
Throw<std::runtime_error>(
"[mysql_settings] requires host=, user=, pass=, name= and "
"port= keys.");
}
MysqlSettings my;
my.host = *sec.get("host");
my.user = *sec.get("user");
my.pass = *sec.get("pass");
my.name = *sec.get("name");
std::string portStr = *sec.get("port");
my.port = beast::lexicalCastThrow<int>(portStr);
mysql = my;
}
if (exists(SECTION_OVERLAY))
{
auto const sec = section(SECTION_OVERLAY);

View File

@@ -1,966 +0,0 @@
#ifndef RIPPLE_NODESTORE_MYSQLBACKEND_H_INCLUDED
#define RIPPLE_NODESTORE_MYSQLBACKEND_H_INCLUDED
#include <ripple/basics/contract.h>
#include <ripple/nodestore/Factory.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
#include <ripple/nodestore/impl/EncodedBlob.h>
#include <ripple/nodestore/impl/codec.h>
#include <boost/beast/core/string.hpp>
#include <chrono>
#include <cstdint>
#include <map>
#include <memory>
#include <mutex>
#include <mysql/mysql.h>
#include <queue>
#include <sstream>
#include <thread>
namespace ripple {
namespace NodeStore {
// SQL statements as constants
static constexpr auto CREATE_DATABASE = R"SQL(
CREATE DATABASE IF NOT EXISTS `%s`
CHARACTER SET utf8mb4
COLLATE utf8mb4_unicode_ci
)SQL";
static constexpr auto CREATE_TABLE = R"SQL(
CREATE TABLE IF NOT EXISTS `%s` (
hash BINARY(32) PRIMARY KEY,
data MEDIUMBLOB NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
INDEX idx_created_at (created_at)
) ENGINE=InnoDB
)SQL";
static constexpr auto INSERT_NODE = R"SQL(
INSERT INTO %s (hash, data)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE data = VALUES(data)
)SQL";
static constexpr auto SET_ISOLATION_LEVEL = R"SQL(
SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
)SQL";
class MySQLConnection
{
private:
std::unique_ptr<MYSQL, decltype(&mysql_close)> mysql_;
Config const& config_;
beast::Journal journal_;
static constexpr int MAX_RETRY_ATTEMPTS = 3;
static constexpr auto RETRY_DELAY_MS = 1000;
bool
connect()
{
mysql_.reset(mysql_init(nullptr));
if (!mysql_)
return false;
// Set connection options
unsigned int timeout = 5;
mysql_options(mysql_.get(), MYSQL_OPT_CONNECT_TIMEOUT, &timeout);
uint8_t const reconnect = 1;
mysql_options(mysql_.get(), MYSQL_OPT_RECONNECT, &reconnect);
// Connect without database first
auto* conn = mysql_real_connect(
mysql_.get(),
config_.mysql->host.c_str(),
config_.mysql->user.c_str(),
config_.mysql->pass.c_str(),
nullptr, // No database selected yet
config_.mysql->port,
nullptr,
CLIENT_MULTI_STATEMENTS);
if (!conn)
return false;
// Set isolation level for dirty reads
if (mysql_query(mysql_.get(), SET_ISOLATION_LEVEL))
{
JLOG(journal_.warn()) << "Failed to set isolation level: "
<< mysql_error(mysql_.get());
return false;
}
// Create database (unconditionally)
std::string query(1024, '\0');
int length = snprintf(
&query[0],
query.size(),
CREATE_DATABASE,
config_.mysql->name.c_str());
query.resize(length);
if (mysql_query(mysql_.get(), query.c_str()))
{
JLOG(journal_.error())
<< "Failed to create database: " << mysql_error(mysql_.get());
return false;
}
// Now select the database
if (mysql_select_db(mysql_.get(), config_.mysql->name.c_str()))
{
JLOG(journal_.error())
<< "Failed to select database: " << mysql_error(mysql_.get());
return false;
}
return true;
}
public:
MySQLConnection(Config const& config, beast::Journal journal)
: mysql_(nullptr, mysql_close), config_(config), journal_(journal)
{
if (!config_.mysql.has_value())
throw std::runtime_error(
"[mysql_settings] stanza missing from config!");
if (config_.mysql->name.empty())
throw std::runtime_error(
"Database name missing from mysql_settings!");
if (!connect())
{
Throw<std::runtime_error>(
std::string("Failed to connect to MySQL: ") +
(mysql_ ? mysql_error(mysql_.get()) : "initialization failed"));
}
}
MYSQL*
get()
{
return mysql_.get();
}
bool
ensureConnection()
{
for (int attempt = 0; attempt < MAX_RETRY_ATTEMPTS; ++attempt)
{
if (!mysql_ || mysql_ping(mysql_.get()) != 0)
{
JLOG(journal_.warn())
<< "MySQL connection lost, attempting reconnect (attempt "
<< (attempt + 1) << "/" << MAX_RETRY_ATTEMPTS << ")";
if (connect())
return true;
if (attempt < MAX_RETRY_ATTEMPTS - 1)
{
std::this_thread::sleep_for(
std::chrono::milliseconds(RETRY_DELAY_MS));
}
}
else
{
return true;
}
}
return false;
}
// Helper method to execute a query with retry logic
bool
executeQuery(std::string const& query)
{
for (int attempt = 0; attempt < MAX_RETRY_ATTEMPTS; ++attempt)
{
if (ensureConnection() && !mysql_query(mysql_.get(), query.c_str()))
return true;
if (attempt < MAX_RETRY_ATTEMPTS - 1)
{
std::this_thread::sleep_for(
std::chrono::milliseconds(RETRY_DELAY_MS));
}
}
return false;
}
};
static thread_local std::unique_ptr<MySQLConnection> threadConnection_;
class MySQLBackend : public Backend
{
private:
std::string name_;
beast::Journal journal_;
bool isOpen_{false};
Config const& config_;
static constexpr std::size_t BATCH_SIZE = 1000;
static constexpr std::size_t MAX_CACHE_SIZE =
100000; // Maximum number of entries
static constexpr std::size_t CACHE_CLEANUP_THRESHOLD =
120000; // When to trigger cleanup
using DataStore = std::map<uint256, std::vector<std::uint8_t>>;
DataStore cache_;
std::mutex cacheMutex_;
// LRU tracking for cache management
struct CacheEntry
{
std::chrono::steady_clock::time_point last_access;
size_t size;
bool pending{false};
};
std::map<uint256, CacheEntry> cacheMetadata_;
std::mutex metadataMutex_;
std::atomic<size_t> currentCacheSize_{0};
// Background write queue
struct WriteOp
{
uint256 hash;
std::vector<std::uint8_t> data;
};
std::queue<WriteOp> writeQueue_;
std::mutex queueMutex_;
std::condition_variable queueCV_;
std::atomic<bool> shouldStop_{false};
std::thread writeThread_;
MySQLConnection*
getConnection()
{
if (!threadConnection_)
{
threadConnection_ =
std::make_unique<MySQLConnection>(config_, journal_);
}
return threadConnection_.get();
}
std::string
sanitizeTableName(std::string name)
{
name.erase(
std::unique(
name.begin(),
std::transform(
name.begin(),
name.end(),
name.begin(),
[](char c) { return std::isalnum(c) ? c : '_'; })),
name.end());
return "nodes_" + name;
}
void
cleanupCache()
{
if (currentCacheSize_.load() < CACHE_CLEANUP_THRESHOLD)
return;
// Collect entries sorted by last access time
std::vector<std::pair<uint256, std::chrono::steady_clock::time_point>>
entries;
{
std::lock_guard<std::mutex> metadataLock(metadataMutex_);
for (const auto& [hash, metadata] : cacheMetadata_)
{
if (!metadata.pending)
entries.emplace_back(hash, metadata.last_access);
}
}
// Sort by access time, oldest first
std::sort(
entries.begin(), entries.end(), [](const auto& a, const auto& b) {
return a.second < b.second;
});
// Remove oldest entries until we're below target size
size_t removedSize = 0;
for (const auto& entry : entries)
{
if (currentCacheSize_.load() <= MAX_CACHE_SIZE)
break;
{
std::lock_guard<std::mutex> metadataLock(metadataMutex_);
auto metaIt = cacheMetadata_.find(entry.first);
if (metaIt != cacheMetadata_.end())
{
removedSize += metaIt->second.size;
cacheMetadata_.erase(metaIt);
}
}
{
std::lock_guard<std::mutex> cacheLock(cacheMutex_);
cache_.erase(entry.first);
}
currentCacheSize_--;
}
JLOG(journal_.debug())
<< "Cache cleanup removed " << removedSize
<< " bytes, current size: " << currentCacheSize_.load();
}
void
updateCacheMetadata(const uint256& hash, size_t size)
{
CacheEntry entry{std::chrono::steady_clock::now(), size};
{
std::lock_guard<std::mutex> metadataLock(metadataMutex_);
cacheMetadata_[hash] = entry;
}
if (++currentCacheSize_ >= CACHE_CLEANUP_THRESHOLD)
{
cleanupCache();
}
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
{
if (!isOpen_)
return notFound;
uint256 const hash(uint256::fromVoid(key));
// Check cache first
{
std::lock_guard<std::mutex> cacheLock(cacheMutex_);
auto it = cache_.find(hash);
if (it != cache_.end())
{
// Update access time
{
std::lock_guard<std::mutex> metadataLock(metadataMutex_);
auto metaIt = cacheMetadata_.find(hash);
if (metaIt != cacheMetadata_.end())
{
metaIt->second.last_access =
std::chrono::steady_clock::now();
}
}
nudb::detail::buffer decompressed;
auto const result = nodeobject_decompress(
it->second.data(), it->second.size(), decompressed);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (decoded.wasOk())
{
*pObject = decoded.createObject();
return ok;
}
}
}
// If not in cache, fetch from MySQL
return fetchFromMySQL(key, pObject);
}
void
startWriteThread()
{
writeThread_ = std::thread([this]() {
while (!shouldStop_)
{
std::vector<WriteOp> batch;
{
std::unique_lock<std::mutex> lock(queueMutex_);
queueCV_.wait_for(
lock, std::chrono::milliseconds(100), [this]() {
return !writeQueue_.empty() || shouldStop_;
});
// Grab up to BATCH_SIZE operations
while (!writeQueue_.empty() && batch.size() < BATCH_SIZE)
{
batch.push_back(std::move(writeQueue_.front()));
writeQueue_.pop();
}
}
if (!batch.empty())
{
auto* conn = getConnection();
if (!conn->ensureConnection())
continue;
if (mysql_query(conn->get(), "START TRANSACTION"))
continue;
bool success = true;
for (auto const& op : batch)
{
MYSQL_STMT* stmt = mysql_stmt_init(conn->get());
if (!stmt)
{
success = false;
break;
}
std::string const sql = "INSERT INTO " + name_ +
" (hash, data) VALUES (?, ?) " +
"ON DUPLICATE KEY UPDATE data = VALUES(data)";
if (mysql_stmt_prepare(stmt, sql.c_str(), sql.length()))
{
mysql_stmt_close(stmt);
success = false;
break;
}
MYSQL_BIND bind[2];
std::memset(bind, 0, sizeof(bind));
bind[0].buffer_type = MYSQL_TYPE_BLOB;
bind[0].buffer = const_cast<void*>(
static_cast<void const*>(op.hash.data()));
bind[0].buffer_length = op.hash.size();
bind[1].buffer_type = MYSQL_TYPE_BLOB;
bind[1].buffer = const_cast<uint8_t*>(op.data.data());
bind[1].buffer_length = op.data.size();
if (mysql_stmt_bind_param(stmt, bind))
{
mysql_stmt_close(stmt);
success = false;
break;
}
if (mysql_stmt_execute(stmt))
{
mysql_stmt_close(stmt);
success = false;
break;
}
mysql_stmt_close(stmt);
}
if (success)
{
if (mysql_query(conn->get(), "COMMIT") == 0)
{
// Clear pending flag for successfully written
// entries
std::lock_guard<std::mutex> metadataLock(
metadataMutex_);
for (const auto& op : batch)
{
auto it = cacheMetadata_.find(op.hash);
if (it != cacheMetadata_.end())
it->second.pending = false;
}
}
}
else
mysql_query(conn->get(), "ROLLBACK");
}
}
});
}
void
queueWrite(uint256 const& hash, std::vector<std::uint8_t> const& data)
{
{
std::lock_guard<std::mutex> metadataLock(metadataMutex_);
auto& entry = cacheMetadata_[hash];
entry.pending = true;
}
std::lock_guard<std::mutex> lock(queueMutex_);
writeQueue_.push({hash, data});
queueCV_.notify_one();
}
Status
fetchFromMySQL(void const* key, std::shared_ptr<NodeObject>* pObject)
{
auto* conn = getConnection();
if (!conn->ensureConnection())
{
JLOG(journal_.warn()) << "fetch: Failed to ensure connection";
return dataCorrupt;
}
uint256 const hash(uint256::fromVoid(key));
MYSQL_STMT* stmt = mysql_stmt_init(conn->get());
if (!stmt)
{
JLOG(journal_.warn()) << "fetch: Failed to init stmt";
return dataCorrupt;
}
std::string const sql = "SELECT data FROM " + name_ + " WHERE hash = ?";
if (mysql_stmt_prepare(stmt, sql.c_str(), sql.length()))
{
JLOG(journal_.warn()) << "fetch: Failed to prepare stmt";
mysql_stmt_close(stmt);
return dataCorrupt;
}
MYSQL_BIND bindParam;
std::memset(&bindParam, 0, sizeof(bindParam));
bindParam.buffer_type = MYSQL_TYPE_BLOB;
bindParam.buffer =
const_cast<void*>(static_cast<void const*>(hash.data()));
bindParam.buffer_length = hash.size();
if (mysql_stmt_bind_param(stmt, &bindParam))
{
JLOG(journal_.warn()) << "fetch: Failed to bind param";
mysql_stmt_close(stmt);
return dataCorrupt;
}
if (mysql_stmt_execute(stmt))
{
mysql_stmt_close(stmt);
return notFound;
}
MYSQL_BIND bindResult;
std::memset(&bindResult, 0, sizeof(bindResult));
uint64_t length = 0;
#if MYSQL_VERSION_ID < 80000
char
#else
bool
#endif
is_null = 0;
bindResult.buffer_type = MYSQL_TYPE_BLOB;
bindResult.length = &length;
bindResult.is_null = &is_null;
std::vector<uint8_t> buffer(16 * 1024 * 1024); // 16MB buffer
bindResult.buffer = buffer.data();
bindResult.buffer_length = buffer.size();
if (mysql_stmt_bind_result(stmt, &bindResult))
{
JLOG(journal_.warn()) << "fetch: Failed to bind result";
mysql_stmt_close(stmt);
return dataCorrupt;
}
if (mysql_stmt_store_result(stmt))
{
JLOG(journal_.warn()) << "fetch: Failed to store result";
mysql_stmt_close(stmt);
return dataCorrupt;
}
if (mysql_stmt_num_rows(stmt) == 0)
{
mysql_stmt_close(stmt);
return notFound;
}
if (mysql_stmt_fetch(stmt))
{
JLOG(journal_.warn()) << "fetch: Failed to fetch stmt";
mysql_stmt_close(stmt);
return dataCorrupt;
}
mysql_stmt_close(stmt);
// Add to cache
std::vector<uint8_t> cached_data(
buffer.begin(), buffer.begin() + length);
cache_.insert_or_assign(hash, cached_data);
updateCacheMetadata(hash, length);
nudb::detail::buffer decompressed;
auto const result = nodeobject_decompress(
cached_data.data(), cached_data.size(), decompressed);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
JLOG(journal_.warn()) << "fetch: Failed to decode blob";
return dataCorrupt;
}
*pObject = decoded.createObject();
return ok;
}
public:
MySQLBackend(
std::size_t keyBytes,
Section const& keyValues,
beast::Journal journal)
: name_(sanitizeTableName(get(keyValues, "path", "nodestore")))
, journal_(journal)
, config_(keyValues.getParent())
{
startWriteThread();
}
~MySQLBackend()
{
shouldStop_ = true;
queueCV_.notify_all();
if (writeThread_.joinable())
writeThread_.join();
}
std::string
getName() override
{
return name_;
}
void
open(bool createIfMissing) override
{
if (isOpen_)
Throw<std::runtime_error>("database already open");
auto* conn = getConnection();
if (!conn->ensureConnection())
Throw<std::runtime_error>("Failed to establish MySQL connection");
if (createIfMissing)
createTable();
isOpen_ = true;
}
bool
isOpen() override
{
return isOpen_;
}
void
close() override
{
// Wait for write queue to empty
{
std::unique_lock<std::mutex> lock(queueMutex_);
while (!writeQueue_.empty())
{
queueCV_.wait(lock);
}
}
threadConnection_.reset();
cache_.clear();
cacheMetadata_.clear();
currentCacheSize_ = 0;
isOpen_ = false;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
std::vector<uint256 const*> mysqlFetch;
mysqlFetch.reserve(hashes.size());
// First try cache
for (auto const& h : hashes)
{
auto it = cache_.find(*h);
if (it != cache_.end())
{
// Update access time
auto metaIt = cacheMetadata_.find(*h);
if (metaIt != cacheMetadata_.end())
{
metaIt->second.last_access =
std::chrono::steady_clock::now();
}
nudb::detail::buffer decompressed;
auto const result = nodeobject_decompress(
it->second.data(), it->second.size(), decompressed);
DecodedBlob decoded(h->data(), result.first, result.second);
if (decoded.wasOk())
{
results.push_back(decoded.createObject());
continue;
}
}
mysqlFetch.push_back(h);
results.push_back(nullptr); // Placeholder for MySQL fetch
}
// If everything was in cache, return early
if (mysqlFetch.empty())
return {results, ok};
// Fetch remaining from MySQL
auto* conn = getConnection();
if (!conn->ensureConnection())
return {results, dataCorrupt};
if (mysql_query(conn->get(), "START TRANSACTION"))
return {results, dataCorrupt};
try
{
for (size_t i = 0; i < mysqlFetch.size(); ++i)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetchFromMySQL(mysqlFetch[i]->data(), &nObj);
// Find the original position in results
auto originalPos = std::distance(
hashes.begin(),
std::find(hashes.begin(), hashes.end(), mysqlFetch[i]));
results[originalPos] = (status == ok ? nObj : nullptr);
}
if (mysql_query(conn->get(), "COMMIT"))
return {results, dataCorrupt};
return {results, ok};
}
catch (...)
{
mysql_query(conn->get(), "ROLLBACK");
throw;
}
}
void
store(std::shared_ptr<NodeObject> const& object) override
{
if (!isOpen_ || !object)
return;
EncodedBlob encoded(object);
nudb::detail::buffer compressed;
auto const result = nodeobject_compress(
encoded.getData(), encoded.getSize(), compressed);
std::vector<std::uint8_t> data(
static_cast<const std::uint8_t*>(result.first),
static_cast<const std::uint8_t*>(result.first) + result.second);
// Update cache immediately
cache_.insert_or_assign(object->getHash(), data);
updateCacheMetadata(object->getHash(), data.size());
// Queue async write to MySQL
queueWrite(object->getHash(), data);
}
void
storeBatch(Batch const& batch) override
{
for (auto const& e : batch)
{
if (!e)
continue;
EncodedBlob encoded(e);
nudb::detail::buffer compressed;
auto const result = nodeobject_compress(
encoded.getData(), encoded.getSize(), compressed);
std::vector<std::uint8_t> data(
static_cast<const std::uint8_t*>(result.first),
static_cast<const std::uint8_t*>(result.first) + result.second);
// Update cache immediately
cache_.insert_or_assign(e->getHash(), data);
updateCacheMetadata(e->getHash(), data.size());
// Queue async write to MySQL
queueWrite(e->getHash(), data);
}
}
void
sync() override
{
// Wait for write queue to empty
std::unique_lock<std::mutex> lock(queueMutex_);
while (!writeQueue_.empty())
{
queueCV_.wait(lock);
}
}
void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
{
if (!isOpen_)
return;
// First, process all cached entries
std::vector<std::pair<uint256, std::vector<std::uint8_t>>>
cached_entries;
for (const auto& entry : cache_)
{
cached_entries.push_back(entry);
}
for (const auto& entry : cached_entries)
{
nudb::detail::buffer decompressed;
auto const result = nodeobject_decompress(
entry.second.data(), entry.second.size(), decompressed);
DecodedBlob decoded(
entry.first.data(), result.first, result.second);
if (decoded.wasOk())
f(decoded.createObject());
}
// Then fetch any remaining entries from MySQL
auto* conn = getConnection();
if (!conn->ensureConnection())
return;
if (mysql_query(
conn->get(),
("SELECT hash, data FROM " + name_ + " ORDER BY created_at")
.c_str()))
return;
MYSQL_RES* result = mysql_store_result(conn->get());
if (!result)
return;
MYSQL_ROW row;
while ((row = mysql_fetch_row(result)))
{
unsigned long* lengths = mysql_fetch_lengths(result);
if (!lengths)
continue;
uint256 hash;
std::memcpy(hash.data(), row[0], hash.size());
// Skip if already processed from cache
if (cache_.find(hash) != cache_.end())
continue;
nudb::detail::buffer decompressed;
auto const decomp_result = nodeobject_decompress(
row[1], static_cast<std::size_t>(lengths[1]), decompressed);
DecodedBlob decoded(
hash.data(), decomp_result.first, decomp_result.second);
if (decoded.wasOk())
{
auto obj = decoded.createObject();
f(obj);
// Add to cache for future use
std::vector<std::uint8_t> data(
reinterpret_cast<const std::uint8_t*>(row[1]),
reinterpret_cast<const std::uint8_t*>(row[1]) + lengths[1]);
cache_.insert_or_assign(hash, std::move(data));
updateCacheMetadata(hash, lengths[1]);
}
}
mysql_free_result(result);
}
int
getWriteLoad() override
{
std::lock_guard<std::mutex> lock(queueMutex_);
return static_cast<int>(writeQueue_.size());
}
void
setDeletePath() override
{
close();
}
int
fdRequired() const override
{
return 1;
}
private:
void
createTable()
{
auto* conn = getConnection();
if (!conn->ensureConnection())
Throw<std::runtime_error>("Failed to connect to MySQL server");
std::string query(1024, '\0');
int length =
snprintf(&query[0], query.size(), CREATE_TABLE, name_.c_str());
query.resize(length);
if (!conn->executeQuery(query))
{
JLOG(journal_.error())
<< "Failed to create table: " << mysql_error(conn->get());
Throw<std::runtime_error>("Failed to create table");
}
}
};
class MySQLFactory : public Factory
{
public:
MySQLFactory()
{
Manager::instance().insert(*this);
}
~MySQLFactory() override
{
Manager::instance().erase(*this);
}
std::string
getName() const override
{
return "MySQL";
}
std::unique_ptr<Backend>
createInstance(
std::size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override
{
return std::make_unique<MySQLBackend>(keyBytes, keyValues, journal);
}
};
static MySQLFactory mysqlFactory;
} // namespace NodeStore
} // namespace ripple
#endif // RIPPLE_NODESTORE_MYSQLBACKEND_H_INCLUDED

View File

@@ -74,7 +74,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 77;
static constexpr std::size_t numFeatures = 78;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated
@@ -362,6 +362,7 @@ extern uint256 const fix240819;
extern uint256 const fixPageCap;
extern uint256 const fix240911;
extern uint256 const fixFloatDivide;
extern uint256 const featureTouch;
extern uint256 const fixReduceImport;
extern uint256 const fixXahauV3;
extern uint256 const fix20250131;

View File

@@ -433,6 +433,7 @@ extern SF_UINT64 const sfReferenceCount;
extern SF_UINT64 const sfRewardAccumulator;
extern SF_UINT64 const sfAccountCount;
extern SF_UINT64 const sfAccountIndex;
extern SF_UINT64 const sfTouchCount;
// 128-bit
extern SF_UINT128 const sfEmailHash;

View File

@@ -468,6 +468,7 @@ REGISTER_FIX (fix240819, Supported::yes, VoteBehavior::De
REGISTER_FIX (fixPageCap, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FIX (fix240911, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FIX (fixFloatDivide, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FEATURE(Touch, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FIX (fixReduceImport, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FIX (fixXahauV3, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FIX (fix20250131, Supported::yes, VoteBehavior::DefaultYes);

View File

@@ -66,6 +66,7 @@ LedgerFormats::LedgerFormats()
{sfGovernanceFlags, soeOPTIONAL},
{sfGovernanceMarks, soeOPTIONAL},
{sfAccountIndex, soeOPTIONAL},
{sfTouchCount, soeOPTIONAL},
},
commonFields);

View File

@@ -183,6 +183,7 @@ CONSTRUCT_TYPED_SFIELD(sfEmitBurden, "EmitBurden", UINT64,
CONSTRUCT_TYPED_SFIELD(sfHookInstructionCount, "HookInstructionCount", UINT64, 17);
CONSTRUCT_TYPED_SFIELD(sfHookReturnCode, "HookReturnCode", UINT64, 18);
CONSTRUCT_TYPED_SFIELD(sfReferenceCount, "ReferenceCount", UINT64, 19);
CONSTRUCT_TYPED_SFIELD(sfTouchCount, "TouchCount", UINT64, 97);
CONSTRUCT_TYPED_SFIELD(sfAccountIndex, "AccountIndex", UINT64, 98);
CONSTRUCT_TYPED_SFIELD(sfAccountCount, "AccountCount", UINT64, 99);
CONSTRUCT_TYPED_SFIELD(sfRewardAccumulator, "RewardAccumulator", UINT64, 100);

View File

@@ -36,12 +36,12 @@
#include <magic/magic_enum.h>
#include <sstream>
#define MAGIC_ENUM(x) \
#define MAGIC_ENUM(x, _min, _max) \
template <> \
struct magic_enum::customize::enum_range<x> \
{ \
static constexpr int min = -20000; \
static constexpr int max = 20000; \
static constexpr int min = _min; \
static constexpr int max = _max; \
};
#define MAGIC_ENUM_16(x) \
@@ -59,14 +59,14 @@
static constexpr bool is_flags = true; \
};
MAGIC_ENUM(ripple::SerializedTypeID);
MAGIC_ENUM(ripple::LedgerEntryType);
MAGIC_ENUM(ripple::TELcodes);
MAGIC_ENUM(ripple::TEMcodes);
MAGIC_ENUM(ripple::TEFcodes);
MAGIC_ENUM(ripple::TERcodes);
MAGIC_ENUM(ripple::TEScodes);
MAGIC_ENUM(ripple::TECcodes);
MAGIC_ENUM(ripple::SerializedTypeID, -2, 10004);
MAGIC_ENUM(ripple::LedgerEntryType, 0, 255);
MAGIC_ENUM(ripple::TELcodes, -399, 300);
MAGIC_ENUM(ripple::TEMcodes, -299, -200);
MAGIC_ENUM(ripple::TEFcodes, -199, -100);
MAGIC_ENUM(ripple::TERcodes, -99, -1);
MAGIC_ENUM(ripple::TEScodes, 0, 1);
MAGIC_ENUM(ripple::TECcodes, 100, 255);
MAGIC_ENUM_16(ripple::TxType);
MAGIC_ENUM_FLAG(ripple::UniversalFlags);
MAGIC_ENUM_FLAG(ripple::AccountSetFlags);
@@ -464,11 +464,9 @@ public:
{
if (!defsHash)
{
// should be unreachable
// if this does happen we don't want 0 xor 0 so use a random value
// here
return uint256(
static const uint256 fallbackHash(
"DF4220E93ADC6F5569063A01B4DC79F8DB9553B6A3222ADE23DEA0");
return fallbackHash;
}
return *defsHash;
}

View File

@@ -42,6 +42,8 @@ class Discrepancy_test : public beast::unit_test::suite
using namespace test::jtx;
Env env{*this, features};
bool const withTouch = env.current()->rules().enabled(featureTouch);
Account A1{"A1"};
Account A2{"A2"};
Account A3{"A3"};
@@ -107,7 +109,8 @@ class Discrepancy_test : public beast::unit_test::suite
auto meta = jrr[jss::meta];
uint64_t sumPrev{0};
uint64_t sumFinal{0};
BEAST_EXPECT(meta[sfAffectedNodes.fieldName].size() == 9);
BEAST_EXPECT(
meta[sfAffectedNodes.fieldName].size() == withTouch ? 11 : 10);
for (auto const& an : meta[sfAffectedNodes.fieldName])
{
Json::Value node;
@@ -127,12 +130,17 @@ class Discrepancy_test : public beast::unit_test::suite
Json::Value finalFields = node.isMember(sfFinalFields.fieldName)
? node[sfFinalFields.fieldName]
: node[sfNewFields.fieldName];
if (prevFields)
sumPrev += beast::lexicalCastThrow<std::uint64_t>(
prevFields[sfBalance.fieldName].asString());
if (finalFields)
sumFinal += beast::lexicalCastThrow<std::uint64_t>(
finalFields[sfBalance.fieldName].asString());
// withTouch: "Touched" account does not update Balance
if (prevFields.isMember(sfBalance.fieldName))
{
if (prevFields)
sumPrev += beast::lexicalCastThrow<std::uint64_t>(
prevFields[sfBalance.fieldName].asString());
if (finalFields)
sumFinal += beast::lexicalCastThrow<std::uint64_t>(
finalFields[sfBalance.fieldName].asString());
}
}
}
// the difference in balances (final and prev) should be the
@@ -147,6 +155,7 @@ public:
using namespace test::jtx;
auto const sa = supported_amendments();
testXRPDiscrepancy(sa - featureFlowCross);
testXRPDiscrepancy(sa - featureTouch);
testXRPDiscrepancy(sa);
}
};

View File

@@ -60,6 +60,7 @@ class Freeze_test : public beast::unit_test::suite
using namespace test::jtx;
Env env(*this, features);
bool const withTouch = env.current()->rules().enabled(featureTouch);
Account G1{"G1"};
Account alice{"alice"};
@@ -113,7 +114,7 @@ class Freeze_test : public beast::unit_test::suite
env(trust(G1, bob["USD"](0), tfSetFreeze));
auto affected = env.meta()->getJson(
JsonOptions::none)[sfAffectedNodes.fieldName];
if (!BEAST_EXPECT(checkArraySize(affected, 2u)))
if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u)))
return;
auto ff =
affected[1u][sfModifiedNode.fieldName][sfFinalFields.fieldName];
@@ -131,10 +132,10 @@ class Freeze_test : public beast::unit_test::suite
env(offer(bob, G1["USD"](5), XRP(25)));
auto affected = env.meta()->getJson(
JsonOptions::none)[sfAffectedNodes.fieldName];
if (!BEAST_EXPECT(checkArraySize(affected, 5u)))
if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 6u : 5u)))
return;
auto ff =
affected[3u][sfModifiedNode.fieldName][sfFinalFields.fieldName];
auto ff = affected[withTouch ? 4u : 3u][sfModifiedNode.fieldName]
[sfFinalFields.fieldName];
BEAST_EXPECT(
ff[sfHighLimit.fieldName] ==
bob["USD"](100).value().getJson(JsonOptions::none));
@@ -199,7 +200,7 @@ class Freeze_test : public beast::unit_test::suite
env(trust(G1, bob["USD"](0), tfClearFreeze));
auto affected = env.meta()->getJson(
JsonOptions::none)[sfAffectedNodes.fieldName];
if (!BEAST_EXPECT(checkArraySize(affected, 2u)))
if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u)))
return;
auto ff =
affected[1u][sfModifiedNode.fieldName][sfFinalFields.fieldName];
@@ -377,6 +378,7 @@ class Freeze_test : public beast::unit_test::suite
using namespace test::jtx;
Env env(*this, features);
bool const withTouch = env.current()->rules().enabled(featureTouch);
Account G1{"G1"};
Account A1{"A1"};
@@ -417,7 +419,7 @@ class Freeze_test : public beast::unit_test::suite
env(trust(G1, A1["USD"](0), tfSetFreeze));
auto affected =
env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName];
if (!BEAST_EXPECT(checkArraySize(affected, 1u)))
if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 2u : 1u)))
return;
auto let =
@@ -432,6 +434,7 @@ class Freeze_test : public beast::unit_test::suite
using namespace test::jtx;
Env env(*this, features);
bool const withTouch = env.current()->rules().enabled(featureTouch);
Account G1{"G1"};
Account A2{"A2"};
@@ -475,7 +478,7 @@ class Freeze_test : public beast::unit_test::suite
env(trust(G1, A3["USD"](0), tfSetFreeze));
auto affected =
env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName];
if (!BEAST_EXPECT(checkArraySize(affected, 2u)))
if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u)))
return;
auto ff =
affected[1u][sfModifiedNode.fieldName][sfFinalFields.fieldName];
@@ -505,9 +508,10 @@ class Freeze_test : public beast::unit_test::suite
env(trust(G1, A4["USD"](0), tfSetFreeze));
affected =
env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName];
if (!BEAST_EXPECT(checkArraySize(affected, 2u)))
if (!BEAST_EXPECT(checkArraySize(affected, withTouch ? 3u : 2u)))
return;
ff = affected[0u][sfModifiedNode.fieldName][sfFinalFields.fieldName];
ff = affected[withTouch ? 1u : 0u][sfModifiedNode.fieldName]
[sfFinalFields.fieldName];
BEAST_EXPECT(
ff[sfLowLimit.fieldName] ==
G1["USD"](0).value().getJson(JsonOptions::none));
@@ -521,7 +525,7 @@ class Freeze_test : public beast::unit_test::suite
env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName];
if (!BEAST_EXPECT(checkArraySize(affected, 8u)))
return;
auto created = affected[0u][sfCreatedNode.fieldName];
auto created = affected[5u][sfCreatedNode.fieldName];
BEAST_EXPECT(
created[sfNewFields.fieldName][jss::Account] == A2.human());
env.close();
@@ -543,8 +547,9 @@ public:
testOffersWhenFrozen(features);
};
using namespace test::jtx;
auto const sa = supported_amendments() - featureXahauGenesis;
auto const sa = supported_amendments();
testAll(sa - featureFlowCross);
testAll(sa - featureTouch);
testAll(sa);
}
};

View File

@@ -2102,9 +2102,10 @@ struct Remit_test : public beast::unit_test::suite
std::string result;
TER code;
};
std::array<TestRateData, 10> testCases = {{
// We test only rates that that can fit in a STI_UINT32.
// Negative rates can't be serdes so there is no need to test them.
std::array<TestRateData, 9> testCases = {{
{0.0, USD(100), "900", tesSUCCESS},
{-1.0, USD(100), "900", temBAD_TRANSFER_RATE},
{0.9, USD(100), "900", temBAD_TRANSFER_RATE},
{1.0, USD(100), "900", tesSUCCESS},
{1.1, USD(100), "890", tesSUCCESS},

1411
src/test/app/Touch_test.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1138,6 +1138,209 @@ public:
}
}
const std::vector<uint8_t> TshHook = {
0x00U, 0x61U, 0x73U, 0x6DU, 0x01U, 0x00U, 0x00U, 0x00U, 0x01U, 0x28U,
0x06U, 0x60U, 0x05U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U, 0x7EU,
0x60U, 0x04U, 0x7FU, 0x7FU, 0x7FU, 0x7FU, 0x01U, 0x7EU, 0x60U, 0x00U,
0x01U, 0x7EU, 0x60U, 0x03U, 0x7FU, 0x7FU, 0x7EU, 0x01U, 0x7EU, 0x60U,
0x02U, 0x7FU, 0x7FU, 0x01U, 0x7FU, 0x60U, 0x01U, 0x7FU, 0x01U, 0x7EU,
0x02U, 0x45U, 0x05U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x05U, 0x74U, 0x72U,
0x61U, 0x63U, 0x65U, 0x00U, 0x00U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU,
0x6FU, 0x74U, 0x78U, 0x6EU, 0x5FU, 0x70U, 0x61U, 0x72U, 0x61U, 0x6DU,
0x00U, 0x01U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x0AU, 0x68U, 0x6FU, 0x6FU,
0x6BU, 0x5FU, 0x61U, 0x67U, 0x61U, 0x69U, 0x6EU, 0x00U, 0x02U, 0x03U,
0x65U, 0x6EU, 0x76U, 0x06U, 0x61U, 0x63U, 0x63U, 0x65U, 0x70U, 0x74U,
0x00U, 0x03U, 0x03U, 0x65U, 0x6EU, 0x76U, 0x02U, 0x5FU, 0x67U, 0x00U,
0x04U, 0x03U, 0x02U, 0x01U, 0x05U, 0x05U, 0x03U, 0x01U, 0x00U, 0x02U,
0x06U, 0x2BU, 0x07U, 0x7FU, 0x01U, 0x41U, 0xC0U, 0x8BU, 0x04U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0xBCU,
0x0BU, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x80U, 0x08U, 0x0BU, 0x7FU, 0x00U,
0x41U, 0xC0U, 0x8BU, 0x04U, 0x0BU, 0x7FU, 0x00U, 0x41U, 0x00U, 0x0BU,
0x7FU, 0x00U, 0x41U, 0x01U, 0x0BU, 0x07U, 0x08U, 0x01U, 0x04U, 0x68U,
0x6FU, 0x6FU, 0x6BU, 0x00U, 0x05U, 0x0AU, 0x8EU, 0x84U, 0x00U, 0x01U,
0x8AU, 0x84U, 0x00U, 0x02U, 0x09U, 0x7EU, 0x05U, 0x7FU, 0x02U, 0x40U,
0x02U, 0x40U, 0x23U, 0x00U, 0x21U, 0x0AU, 0x20U, 0x0AU, 0x41U, 0x10U,
0x6BU, 0x21U, 0x0AU, 0x20U, 0x0AU, 0x24U, 0x00U, 0x20U, 0x0AU, 0x20U,
0x00U, 0x36U, 0x02U, 0x0CU, 0x41U, 0x9EU, 0x0BU, 0x41U, 0x0FU, 0x41U,
0xC1U, 0x09U, 0x41U, 0x0EU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U, 0x02U,
0x20U, 0x02U, 0x1AU, 0x20U, 0x0AU, 0x41U, 0x0BU, 0x6AU, 0x21U, 0x00U,
0x20U, 0x00U, 0x41U, 0x01U, 0x41U, 0xBDU, 0x09U, 0x41U, 0x03U, 0x10U,
0x01U, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x01U, 0x51U, 0x21U, 0x00U,
0x20U, 0x00U, 0x41U, 0x01U, 0x71U, 0x21U, 0x00U, 0x20U, 0x00U, 0x45U,
0x21U, 0x00U, 0x20U, 0x00U, 0x45U, 0x21U, 0x00U, 0x0BU, 0x20U, 0x00U,
0x04U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x10U, 0x02U, 0x21U, 0x03U,
0x20U, 0x03U, 0x1AU, 0x0BU, 0x01U, 0x0BU, 0x05U, 0x01U, 0x0BU, 0x0BU,
0x02U, 0x7EU, 0x02U, 0x40U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U,
0x00U, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U,
0x40U, 0x20U, 0x00U, 0x0EU, 0x03U, 0x02U, 0x01U, 0x00U, 0x04U, 0x0BU,
0x02U, 0x40U, 0x02U, 0x40U, 0x02U, 0x40U, 0x41U, 0xDBU, 0x09U, 0x41U,
0xC3U, 0x00U, 0x41U, 0x80U, 0x08U, 0x41U, 0xC2U, 0x00U, 0x41U, 0x00U,
0x10U, 0x00U, 0x21U, 0x04U, 0x20U, 0x04U, 0x1AU, 0x0BU, 0x0CU, 0x06U,
0x0BU, 0x00U, 0x0BU, 0x00U, 0x0BU, 0x02U, 0x40U, 0x02U, 0x40U, 0x02U,
0x40U, 0x41U, 0x9FU, 0x0AU, 0x41U, 0x3DU, 0x41U, 0xC2U, 0x08U, 0x41U,
0x3CU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U, 0x05U, 0x20U, 0x05U, 0x1AU,
0x0BU, 0x0CU, 0x05U, 0x0BU, 0x00U, 0x0BU, 0x00U, 0x0BU, 0x02U, 0x40U,
0x02U, 0x40U, 0x02U, 0x40U, 0x41U, 0xDDU, 0x0AU, 0x41U, 0xC0U, 0x00U,
0x41U, 0xFEU, 0x08U, 0x41U, 0x3FU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U,
0x06U, 0x20U, 0x06U, 0x1AU, 0x0BU, 0x01U, 0x0BU, 0x0BU, 0x0BU, 0x0BU,
0x0BU, 0x02U, 0x7EU, 0x02U, 0x7EU, 0x41U, 0xAEU, 0x0BU, 0x41U, 0x0DU,
0x41U, 0xCFU, 0x09U, 0x41U, 0x0CU, 0x41U, 0x00U, 0x10U, 0x00U, 0x21U,
0x07U, 0x20U, 0x07U, 0x1AU, 0x20U, 0x0AU, 0x41U, 0x07U, 0x6AU, 0x21U,
0x0CU, 0x20U, 0x0CU, 0x21U, 0x00U, 0x20U, 0x0AU, 0x20U, 0x00U, 0x36U,
0x02U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U, 0x00U, 0x20U,
0x00U, 0xADU, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x18U, 0x88U, 0x21U,
0x01U, 0x20U, 0x01U, 0x42U, 0xFFU, 0x01U, 0x83U, 0x21U, 0x01U, 0x20U,
0x01U, 0xA7U, 0x21U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x00U, 0x21U,
0x0BU, 0x20U, 0x0BU, 0x20U, 0x00U, 0x3AU, 0x00U, 0x00U, 0x20U, 0x0AU,
0x28U, 0x02U, 0x0CU, 0x21U, 0x00U, 0x20U, 0x00U, 0xADU, 0x21U, 0x01U,
0x20U, 0x01U, 0x42U, 0x10U, 0x88U, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U,
0xFFU, 0x01U, 0x83U, 0x21U, 0x01U, 0x20U, 0x01U, 0xA7U, 0x21U, 0x00U,
0x20U, 0x0AU, 0x28U, 0x02U, 0x00U, 0x21U, 0x0BU, 0x20U, 0x0BU, 0x20U,
0x00U, 0x3AU, 0x00U, 0x01U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U,
0x00U, 0x20U, 0x00U, 0xADU, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x08U,
0x88U, 0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0xFFU, 0x01U, 0x83U, 0x21U,
0x01U, 0x20U, 0x01U, 0xA7U, 0x21U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U,
0x00U, 0x21U, 0x0BU, 0x20U, 0x0BU, 0x20U, 0x00U, 0x3AU, 0x00U, 0x02U,
0x20U, 0x0AU, 0x28U, 0x02U, 0x0CU, 0x21U, 0x00U, 0x20U, 0x00U, 0xADU,
0x21U, 0x01U, 0x20U, 0x01U, 0x42U, 0x00U, 0x88U, 0x21U, 0x01U, 0x20U,
0x01U, 0x42U, 0xFFU, 0x01U, 0x83U, 0x21U, 0x01U, 0x20U, 0x01U, 0xA7U,
0x21U, 0x00U, 0x20U, 0x0AU, 0x28U, 0x02U, 0x00U, 0x21U, 0x0BU, 0x20U,
0x0BU, 0x20U, 0x00U, 0x3AU, 0x00U, 0x03U, 0x20U, 0x0CU, 0x21U, 0x00U,
0x20U, 0x00U, 0x41U, 0x04U, 0x42U, 0x1CU, 0x10U, 0x03U, 0x21U, 0x08U,
0x20U, 0x08U, 0x1AU, 0x41U, 0x01U, 0x41U, 0x01U, 0x10U, 0x04U, 0x21U,
0x0DU, 0x20U, 0x0DU, 0x1AU, 0x20U, 0x0AU, 0x41U, 0x10U, 0x6AU, 0x21U,
0x00U, 0x20U, 0x00U, 0x24U, 0x00U, 0x42U, 0x00U, 0x21U, 0x09U, 0x42U,
0x00U, 0x0BU, 0x0BU, 0x0BU, 0x0BU, 0x0BU, 0xC3U, 0x03U, 0x01U, 0x00U,
0x41U, 0x80U, 0x08U, 0x0BU, 0xBBU, 0x03U, 0x74U, 0x73U, 0x68U, 0x2EU,
0x63U, 0x3AU, 0x20U, 0x57U, 0x65U, 0x61U, 0x6BU, 0x20U, 0x41U, 0x67U,
0x61U, 0x69U, 0x6EU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U,
0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U, 0x20U, 0x74U,
0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU,
0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U,
0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U,
0x72U, 0x00U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x57U,
0x65U, 0x61U, 0x6BU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U,
0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U, 0x20U, 0x74U,
0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU,
0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U,
0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U,
0x72U, 0x00U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x53U,
0x74U, 0x72U, 0x6FU, 0x6EU, 0x67U, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U,
0x63U, 0x75U, 0x74U, 0x65U, 0x20U, 0x42U, 0x45U, 0x46U, 0x4FU, 0x52U,
0x45U, 0x20U, 0x74U, 0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U,
0x69U, 0x6FU, 0x6EU, 0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U,
0x6CU, 0x69U, 0x65U, 0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U,
0x64U, 0x67U, 0x65U, 0x72U, 0x00U, 0x41U, 0x41U, 0x57U, 0x00U, 0x74U,
0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x53U, 0x74U, 0x61U, 0x72U,
0x74U, 0x2EU, 0x00U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U,
0x45U, 0x6EU, 0x64U, 0x2EU, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U, 0x2EU,
0x63U, 0x3AU, 0x20U, 0x57U, 0x65U, 0x61U, 0x6BU, 0x20U, 0x41U, 0x67U,
0x61U, 0x69U, 0x6EU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U,
0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U, 0x20U, 0x74U,
0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU,
0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U,
0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U,
0x72U, 0x22U, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU,
0x20U, 0x57U, 0x65U, 0x61U, 0x6BU, 0x2EU, 0x20U, 0x45U, 0x78U, 0x65U,
0x63U, 0x75U, 0x74U, 0x65U, 0x20U, 0x41U, 0x46U, 0x54U, 0x45U, 0x52U,
0x20U, 0x74U, 0x72U, 0x61U, 0x6EU, 0x73U, 0x61U, 0x63U, 0x74U, 0x69U,
0x6FU, 0x6EU, 0x20U, 0x69U, 0x73U, 0x20U, 0x61U, 0x70U, 0x70U, 0x6CU,
0x69U, 0x65U, 0x64U, 0x20U, 0x74U, 0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U,
0x67U, 0x65U, 0x72U, 0x22U, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U, 0x2EU,
0x63U, 0x3AU, 0x20U, 0x53U, 0x74U, 0x72U, 0x6FU, 0x6EU, 0x67U, 0x2EU,
0x20U, 0x45U, 0x78U, 0x65U, 0x63U, 0x75U, 0x74U, 0x65U, 0x20U, 0x42U,
0x45U, 0x46U, 0x4FU, 0x52U, 0x45U, 0x20U, 0x74U, 0x72U, 0x61U, 0x6EU,
0x73U, 0x61U, 0x63U, 0x74U, 0x69U, 0x6FU, 0x6EU, 0x20U, 0x69U, 0x73U,
0x20U, 0x61U, 0x70U, 0x70U, 0x6CU, 0x69U, 0x65U, 0x64U, 0x20U, 0x74U,
0x6FU, 0x20U, 0x6CU, 0x65U, 0x64U, 0x67U, 0x65U, 0x72U, 0x22U, 0x00U,
0x22U, 0x74U, 0x73U, 0x68U, 0x2EU, 0x63U, 0x3AU, 0x20U, 0x53U, 0x74U,
0x61U, 0x72U, 0x74U, 0x2EU, 0x22U, 0x00U, 0x22U, 0x74U, 0x73U, 0x68U,
0x2EU, 0x63U, 0x3AU, 0x20U, 0x45U, 0x6EU, 0x64U, 0x2EU, 0x22U};
void static overrideFlag(Json::Value& jv)
{
jv[jss::Flags] = 0b00000001U;
}
void
setTSHHook(jtx::Env& env, jtx::Account const& account)
{
using namespace test::jtx;
env(hook(account, {{hso(TshHook, overrideFlag)}}, 0),
fee(XRP(2)),
ter(tesSUCCESS));
env.close();
}
void
testAccount(FeatureBitset features)
{
testcase("AccountWithHookStream");
using namespace std::chrono_literals;
using namespace jtx;
Env env(*this, features);
auto const alice = Account("alice");
auto const bob = Account("bob");
auto const gw = Account("gw");
auto const USD = gw["USD"];
env.fund(XRP(10000), alice, bob, gw);
env.trust(USD(20000), alice, bob);
env.close();
auto wsc = makeWSClient(env.app().config());
Json::Value stream;
bool const withTouch = env.current()->rules().enabled(featureTouch);
{
// RPC subscribe to account stream
stream[jss::accounts] = Json::arrayValue;
stream[jss::accounts].append(bob.human());
auto jv = wsc->invoke("subscribe", stream);
if (wsc->version() == 2)
{
BEAST_EXPECT(
jv.isMember(jss::jsonrpc) && jv[jss::jsonrpc] == "2.0");
BEAST_EXPECT(
jv.isMember(jss::ripplerpc) && jv[jss::ripplerpc] == "2.0");
BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5);
}
BEAST_EXPECT(jv[jss::result][jss::status] == "success");
}
// Test Invoke Tx
{
setTSHHook(env, bob);
// Submit and Close
env(invoke::invoke(alice),
invoke::dest(bob),
fee(XRP(1)),
ter(tesSUCCESS));
env.close();
// Check stream update
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
if (jv[jss::transaction][jss::TransactionType] == "Invoke")
return true;
return withTouch ? false : true;
}));
}
// RPC unsubscribe
auto jv = wsc->invoke("unsubscribe", stream);
if (wsc->version() == 2)
{
BEAST_EXPECT(
jv.isMember(jss::jsonrpc) && jv[jss::jsonrpc] == "2.0");
BEAST_EXPECT(
jv.isMember(jss::ripplerpc) && jv[jss::ripplerpc] == "2.0");
BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5);
}
BEAST_EXPECT(jv[jss::status] == "success");
}
void
run() override
{
@@ -1155,6 +1358,8 @@ public:
testSubErrors(false);
testSubByUrl();
testHistoryTxStream();
testAccount(all);
testAccount(all - featureTouch);
}
};