Merge branch 'ripple:develop' into PaychanAndEscrowForTokens

This commit is contained in:
RichardAH
2022-04-05 14:41:20 +02:00
committed by GitHub
131 changed files with 4847 additions and 1968 deletions

View File

@@ -3,9 +3,6 @@ contact_links:
- name: XRP Ledger Documentation
url: https://xrpl.org/
about: All things about XRPL
- name: General question for the community
url: https://forum.xpring.io/c/community/
about: Please ask and answer questions here.
- name: Security bug bounty program
url: https://ripple.com/bug-bounty/
about: Please report security-relevant bugs in our software here.

2
.gitignore vendored
View File

@@ -104,3 +104,5 @@ Builds/VisualStudio2015/*.sdf
CMakeSettings.json
compile_commands.json
.clangd
packages
pkg_out

View File

@@ -395,7 +395,7 @@ target_sources (rippled PRIVATE
src/ripple/app/paths/Pathfinder.cpp
src/ripple/app/paths/RippleCalc.cpp
src/ripple/app/paths/RippleLineCache.cpp
src/ripple/app/paths/RippleState.cpp
src/ripple/app/paths/TrustLine.cpp
src/ripple/app/paths/impl/BookStep.cpp
src/ripple/app/paths/impl/DirectStep.cpp
src/ripple/app/paths/impl/PaySteps.cpp
@@ -733,6 +733,7 @@ if (tests)
src/test/basics/contract_test.cpp
src/test/basics/FeeUnits_test.cpp
src/test/basics/hardened_hash_test.cpp
src/test/basics/join_test.cpp
src/test/basics/mulDiv_test.cpp
src/test/basics/tagged_integer_test.cpp
#[===============================[
@@ -891,6 +892,7 @@ if (tests)
src/test/protocol/InnerObjectFormats_test.cpp
src/test/protocol/Issue_test.cpp
src/test/protocol/KnownFormatToGRPC_test.cpp
src/test/protocol/Hooks_test.cpp
src/test/protocol/PublicKey_test.cpp
src/test/protocol/Quality_test.cpp
src/test/protocol/STAccount_test.cpp
@@ -989,17 +991,18 @@ if (is_ci)
target_compile_definitions(rippled PRIVATE RIPPLED_RUNNING_IN_CI)
endif ()
if (reporting)
target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING)
endif ()
if(reporting)
set_target_properties(rippled PROPERTIES OUTPUT_NAME rippled-reporting)
get_target_property(BIN_NAME rippled OUTPUT_NAME)
message(STATUS "Reporting mode build: rippled renamed ${BIN_NAME}")
target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING)
endif()
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.16)
# any files that don't play well with unity should be added here
if (tests)
set_source_files_properties(
# these two seem to produce conflicts in beast teardown template methods
src/test/rpc/ValidatorRPC_test.cpp
src/test/rpc/ShardArchiveHandler_test.cpp
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
endif () #tests
endif ()
# any files that don't play well with unity should be added here
if (tests)
set_source_files_properties(
# these two seem to produce conflicts in beast teardown template methods
src/test/rpc/ValidatorRPC_test.cpp
src/test/rpc/ShardArchiveHandler_test.cpp
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
endif () #tests

View File

@@ -48,12 +48,15 @@ if (is_root_project)
Builds/containers/centos-builder/Dockerfile
Builds/containers/centos-builder/centos_setup.sh
Builds/containers/centos-builder/extras.sh
Builds/containers/shared/build_deps.sh
Builds/containers/shared/rippled.service
Builds/containers/shared/update_sources.sh
Builds/containers/shared/update-rippled.sh
Builds/containers/shared/update_sources.sh
Builds/containers/shared/rippled.service
Builds/containers/shared/rippled-reporting.service
Builds/containers/shared/build_deps.sh
Builds/containers/packaging/rpm/rippled.spec
Builds/containers/packaging/rpm/build_rpm.sh
Builds/containers/packaging/rpm/50-rippled.preset
Builds/containers/packaging/rpm/50-rippled-reporting.preset
bin/getRippledInfo
)
exclude_from_default (rpm_container)
@@ -86,7 +89,7 @@ if (is_root_project)
add_custom_target (dpkg_container
docker build
--pull
--build-arg DIST_TAG=16.04
--build-arg DIST_TAG=18.04
--build-arg GIT_COMMIT=${commit_hash}
-t rippled-dpkg-builder:${container_label}
$<$<BOOL:${dpkg_cache_from}>:--cache-from=${dpkg_cache_from}>
@@ -96,28 +99,40 @@ if (is_root_project)
USES_TERMINAL
COMMAND_EXPAND_LISTS
SOURCES
Builds/containers/packaging/dpkg/debian/rippled-reporting.links
Builds/containers/packaging/dpkg/debian/copyright
Builds/containers/packaging/dpkg/debian/rules
Builds/containers/packaging/dpkg/debian/rippled-reporting.install
Builds/containers/packaging/dpkg/debian/rippled-reporting.postinst
Builds/containers/packaging/dpkg/debian/rippled.links
Builds/containers/packaging/dpkg/debian/rippled.prerm
Builds/containers/packaging/dpkg/debian/rippled.postinst
Builds/containers/packaging/dpkg/debian/rippled-dev.install
Builds/containers/packaging/dpkg/debian/dirs
Builds/containers/packaging/dpkg/debian/rippled.postrm
Builds/containers/packaging/dpkg/debian/rippled.conffiles
Builds/containers/packaging/dpkg/debian/compat
Builds/containers/packaging/dpkg/debian/source/format
Builds/containers/packaging/dpkg/debian/source/local-options
Builds/containers/packaging/dpkg/debian/README.Debian
Builds/containers/packaging/dpkg/debian/rippled.install
Builds/containers/packaging/dpkg/debian/rippled.preinst
Builds/containers/packaging/dpkg/debian/docs
Builds/containers/packaging/dpkg/debian/control
Builds/containers/packaging/dpkg/debian/rippled-reporting.dirs
Builds/containers/packaging/dpkg/build_dpkg.sh
Builds/containers/ubuntu-builder/Dockerfile
Builds/containers/ubuntu-builder/ubuntu_setup.sh
bin/getRippledInfo
Builds/containers/shared/install_cmake.sh
Builds/containers/shared/install_boost.sh
Builds/containers/shared/update-rippled.sh
Builds/containers/shared/update_sources.sh
Builds/containers/shared/build_deps.sh
Builds/containers/shared/rippled.service
Builds/containers/shared/update_sources.sh
Builds/containers/shared/update-rippled.sh
Builds/containers/packaging/dpkg/build_dpkg.sh
Builds/containers/packaging/dpkg/debian/README.Debian
Builds/containers/packaging/dpkg/debian/conffiles
Builds/containers/packaging/dpkg/debian/control
Builds/containers/packaging/dpkg/debian/copyright
Builds/containers/packaging/dpkg/debian/dirs
Builds/containers/packaging/dpkg/debian/docs
Builds/containers/packaging/dpkg/debian/rippled-dev.install
Builds/containers/packaging/dpkg/debian/rippled.install
Builds/containers/packaging/dpkg/debian/rippled.links
Builds/containers/packaging/dpkg/debian/rippled.postinst
Builds/containers/packaging/dpkg/debian/rippled.postrm
Builds/containers/packaging/dpkg/debian/rippled.preinst
Builds/containers/packaging/dpkg/debian/rippled.prerm
Builds/containers/packaging/dpkg/debian/rules
bin/getRippledInfo
Builds/containers/shared/rippled-reporting.service
Builds/containers/shared/rippled-logrotate
Builds/containers/shared/update-rippled-cron
)
exclude_from_default (dpkg_container)
add_custom_target (dpkg
@@ -187,4 +202,3 @@ if (is_root_project)
message (STATUS "docker NOT found -- won't be able to build containers for packaging")
endif ()
endif ()

View File

@@ -10,13 +10,8 @@ option (tests "Build tests" ON)
option (unity "Creates a build using UNITY support in cmake. This is the default" ON)
if (unity)
if (CMAKE_VERSION VERSION_LESS 3.16)
message (WARNING "unity option only supported for with cmake 3.16+ (please upgrade)")
set (unity OFF CACHE BOOL "unity only available for cmake 3.16+" FORCE)
else ()
if (NOT is_ci)
set (CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
endif ()
if (NOT is_ci)
set (CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
endif ()
endif ()
if (is_gcc OR is_clang)

View File

@@ -1,6 +1,6 @@
option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF)
if (validator_keys AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
if (validator_keys)
git_branch (current_branch)
# default to tracking VK develop branch unless we are on master/release
if (NOT (current_branch STREQUAL "master" OR current_branch STREQUAL "release"))
@@ -20,5 +20,3 @@ if (validator_keys AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
endif ()
add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys)
endif ()

View File

@@ -125,7 +125,7 @@ if (local_libarchive)
--build .
--config $<CONFIG>
--target archive_static
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
$<$<BOOL:${is_multiconfig}>:
COMMAND
${CMAKE_COMMAND} -E copy

View File

@@ -43,7 +43,7 @@ else()
--build .
--config $<CONFIG>
--target lz4_static
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
$<$<BOOL:${is_multiconfig}>:
COMMAND
${CMAKE_COMMAND} -E copy

View File

@@ -8,40 +8,24 @@
if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build
add_library (nudb INTERFACE)
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
FetchContent_Declare(
nudb_src
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
GIT_TAG 2.0.5
)
FetchContent_GetProperties(nudb_src)
if(NOT nudb_src_POPULATED)
message (STATUS "Pausing to download NuDB...")
FetchContent_Populate(nudb_src)
endif()
else ()
ExternalProject_Add (nudb_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
GIT_TAG 2.0.5
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
TEST_COMMAND ""
INSTALL_COMMAND ""
)
ExternalProject_Get_Property (nudb_src SOURCE_DIR)
set (nudb_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${nudb_src_SOURCE_DIR}/include)
add_dependencies (nudb nudb_src)
endif ()
file(TO_CMAKE_PATH "${nudb_src_SOURCE_DIR}" nudb_src_SOURCE_DIR)
# specify as system includes so as to avoid warnings
target_include_directories (nudb SYSTEM INTERFACE ${nudb_src_SOURCE_DIR}/include)
target_link_libraries (nudb
INTERFACE
Boost::thread
Boost::system)
add_library (NIH::nudb ALIAS nudb)
target_link_libraries (ripple_libs INTERFACE NIH::nudb)
FetchContent_Declare(
nudb_src
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
GIT_TAG 2.0.5
)
FetchContent_GetProperties(nudb_src)
if(NOT nudb_src_POPULATED)
message (STATUS "Pausing to download NuDB...")
FetchContent_Populate(nudb_src)
endif()
endif ()
file(TO_CMAKE_PATH "${nudb_src_SOURCE_DIR}" nudb_src_SOURCE_DIR)
# specify as system includes so as to avoid warnings
target_include_directories (nudb SYSTEM INTERFACE ${nudb_src_SOURCE_DIR}/include)
target_link_libraries (nudb
INTERFACE
Boost::thread
Boost::system)
add_library (NIH::nudb ALIAS nudb)
target_link_libraries (ripple_libs INTERFACE NIH::nudb)

View File

@@ -65,7 +65,7 @@ if (local_protobuf OR NOT (Protobuf_FOUND AND Protobuf_PROTOC_EXECUTABLE AND pro
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
TEST_COMMAND ""
INSTALL_COMMAND
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install

View File

@@ -136,7 +136,7 @@ if (local_rocksdb)
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
$<$<BOOL:${is_multiconfig}>:
COMMAND
${CMAKE_COMMAND} -E copy

View File

@@ -42,7 +42,7 @@ else()
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
$<$<BOOL:${is_multiconfig}>:
COMMAND
${CMAKE_COMMAND} -E copy

View File

@@ -113,7 +113,7 @@ else()
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
$<$<BOOL:${is_multiconfig}>:
COMMAND
${CMAKE_COMMAND} -E copy

View File

@@ -56,7 +56,7 @@ else()
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
$<$<BOOL:${is_multiconfig}>:
COMMAND
${CMAKE_COMMAND} -E copy

View File

@@ -112,6 +112,8 @@ if(reporting)
-DLIBUV_LIBARY=${BINARY_DIR}/libuv_a.a
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
-DCASS_BUILD_STATIC=ON
-DCASS_BUILD_SHARED=OFF
-DOPENSSL_ROOT_DIR=/opt/local/openssl
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${ep_lib_prefix}cassandra_static.a
LOG_BUILD TRUE

View File

@@ -9,41 +9,10 @@
find_package (date QUIET)
if (NOT TARGET date::date)
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.14)
FetchContent_Declare(
hh_date_src
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829
)
FetchContent_MakeAvailable(hh_date_src)
else ()
ExternalProject_Add (hh_date_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
TEST_COMMAND ""
INSTALL_COMMAND ""
)
ExternalProject_Get_Property (hh_date_src SOURCE_DIR)
set (hh_date_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${hh_date_src_SOURCE_DIR}/include)
add_library (date_interface INTERFACE)
add_library (date::date ALIAS date_interface)
add_dependencies (date_interface hh_date_src)
file (TO_CMAKE_PATH "${hh_date_src_SOURCE_DIR}" hh_date_src_SOURCE_DIR)
target_include_directories (date_interface
SYSTEM INTERFACE
$<BUILD_INTERFACE:${hh_date_src_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>)
install (
FILES
${hh_date_src_SOURCE_DIR}/include/date/date.h
DESTINATION include/date)
install (TARGETS date_interface
EXPORT RippleExports
INCLUDES DESTINATION include)
endif ()
FetchContent_Declare(
hh_date_src
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829
)
FetchContent_MakeAvailable(hh_date_src)
endif ()

View File

@@ -112,7 +112,7 @@ else ()
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
TEST_COMMAND ""
INSTALL_COMMAND
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
@@ -169,7 +169,7 @@ else ()
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
TEST_COMMAND ""
INSTALL_COMMAND
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
@@ -237,7 +237,7 @@ else ()
${CMAKE_COMMAND}
--build .
--config $<CONFIG>
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
--parallel ${ep_procs}
$<$<BOOL:${is_multiconfig}>:
COMMAND
${CMAKE_COMMAND} -E copy

View File

@@ -22,6 +22,7 @@ time cmake \
-Dpackages_only=ON \
-Dcontainer_label="${container_tag}" \
-Dhave_package_container=ON \
-DCMAKE_VERBOSE_MAKEFILE=OFF \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-Dunity=OFF \
-G Ninja ../..
time cmake --build . --target ${pkgtype}
time cmake --build . --target ${pkgtype} -- -v

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env sh
set -ex
set -e
# used as a before/setup script for docker steps in gitlab-ci
# expects to be run in standard alpine/dind image
echo $(nproc)
@@ -13,4 +13,3 @@ apk add \
pip3 install awscli
# list curdir contents to build log:
ls -la

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env sh
set -ex
set -e
action=$1
filter=$2

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env sh
set -ex
set -e
install_from=$1
use_private=${2:-0} # this option not currently needed by any CI scripts,
# reserved for possible future use

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env sh
set -ex
set -e
docker login -u rippled \
-p ${ARTIFACTORY_DEPLOY_KEY_RIPPLED} "${ARTIFACTORY_HUB}"
# this gives us rippled_version :
@@ -19,4 +19,3 @@ for label in ${rippled_version} latest ; do
docker push \
"${ARTIFACTORY_HUB}/${DPKG_CONTAINER_NAME}:${label}_${CI_COMMIT_REF_SLUG}"
done

View File

@@ -4,7 +4,7 @@ set -ex
# make sure pkg source files are up to date with repo
cd /opt/rippled_bld/pkg
cp -fpru rippled/Builds/containers/packaging/dpkg/debian/. debian/
cp -fpu rippled/Builds/containers/shared/rippled.service debian/
cp -fpu rippled/Builds/containers/shared/rippled*.service debian/
cp -fpu rippled/Builds/containers/shared/update_sources.sh .
source update_sources.sh
@@ -52,14 +52,15 @@ rc=$?; if [[ $rc != 0 ]]; then
error "error building dpkg"
fi
cd ..
ls -latr
# copy artifacts
cp rippled-dev_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR}
cp rippled-reporting_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR}
cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR}
cp rippled_${RIPPLED_DPKG_FULL_VERSION}.dsc ${PKG_OUTDIR}
# dbgsym suffix is ddeb under newer debuild, but just deb under earlier
cp rippled-dbgsym_${RIPPLED_DPKG_FULL_VERSION}_amd64.* ${PKG_OUTDIR}
cp rippled-reporting-dbgsym_${RIPPLED_DPKG_FULL_VERSION}_amd64.* ${PKG_OUTDIR}
cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.changes ${PKG_OUTDIR}
cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.build ${PKG_OUTDIR}
cp rippled_${RIPPLED_DPKG_VERSION}.orig.tar.gz ${PKG_OUTDIR}
@@ -81,15 +82,20 @@ DEB_SHA256=$(cat shasums | \
grep "rippled_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1)
DBG_SHA256=$(cat shasums | \
grep "rippled-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1)
REPORTING_DBG_SHA256=$(cat shasums | \
grep "rippled-reporting-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1)
DEV_SHA256=$(cat shasums | \
grep "rippled-dev_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1)
REPORTING_SHA256=$(cat shasums | \
grep "rippled-reporting_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1)
SRC_SHA256=$(cat shasums | \
grep "rippled_${RIPPLED_DPKG_VERSION}.orig.tar.gz" | cut -d " " -f 1)
echo "deb_sha256=${DEB_SHA256}" >> ${PKG_OUTDIR}/build_vars
echo "dbg_sha256=${DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars
echo "dev_sha256=${DEV_SHA256}" >> ${PKG_OUTDIR}/build_vars
echo "reporting_sha256=${REPORTING_SHA256}" >> ${PKG_OUTDIR}/build_vars
echo "reporting_dbg_sha256=${REPORTING_DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars
echo "src_sha256=${SRC_SHA256}" >> ${PKG_OUTDIR}/build_vars
echo "rippled_version=${RIPPLED_VERSION}" >> ${PKG_OUTDIR}/build_vars
echo "dpkg_version=${RIPPLED_DPKG_VERSION}" >> ${PKG_OUTDIR}/build_vars
echo "dpkg_full_version=${RIPPLED_DPKG_FULL_VERSION}" >> ${PKG_OUTDIR}/build_vars

View File

@@ -1 +1 @@
9
10

View File

@@ -12,6 +12,12 @@ Multi-Arch: foreign
Depends: ${misc:Depends}, ${shlibs:Depends}
Description: rippled daemon
Package: rippled-reporting
Architecture: any
Multi-Arch: foreign
Depends: ${misc:Depends}, ${shlibs:Depends}
Description: rippled reporting daemon
Package: rippled-dev
Section: devel
Recommends: rippled (= ${binary:Version})

View File

@@ -0,0 +1,3 @@
/var/log/rippled-reporting/
/var/lib/rippled-reporting/
/etc/systemd/system/rippled-reporting.service.d/

View File

@@ -0,0 +1,8 @@
bld/rippled-reporting/rippled-reporting opt/rippled-reporting/bin
cfg/rippled-reporting.cfg opt/rippled-reporting/etc
debian/tmp/opt/rippled-reporting/etc/validators.txt opt/rippled-reporting/etc
opt/rippled-reporting/bin/update-rippled-reporting.sh
opt/rippled-reporting/bin/getRippledReportingInfo
opt/rippled-reporting/etc/update-rippled-reporting-cron
etc/logrotate.d/rippled-reporting

View File

@@ -0,0 +1,3 @@
opt/rippled-reporting/etc/rippled-reporting.cfg etc/opt/rippled-reporting/rippled-reporting.cfg
opt/rippled-reporting/etc/validators.txt etc/opt/rippled-reporting/validators.txt
opt/rippled-reporting/bin/rippled-reporting usr/local/bin/rippled-reporting

View File

@@ -0,0 +1,33 @@
#!/bin/sh
set -e
USER_NAME=rippled-reporting
GROUP_NAME=rippled-reporting
case "$1" in
configure)
id -u $USER_NAME >/dev/null 2>&1 || \
adduser --system --quiet \
--home /nonexistent --no-create-home \
--disabled-password \
--group "$GROUP_NAME"
chown -R $USER_NAME:$GROUP_NAME /var/log/rippled-reporting/
chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled-reporting/
chmod 755 /var/log/rippled-reporting/
chmod 755 /var/lib/rippled-reporting/
chown -R $USER_NAME:$GROUP_NAME /opt/rippled-reporting
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

View File

@@ -1,3 +1,2 @@
/opt/ripple/etc/rippled.cfg
/opt/ripple/etc/validators.txt
/etc/logrotate.d/rippled

View File

@@ -5,4 +5,4 @@ opt/ripple/bin/getRippledInfo
opt/ripple/etc/rippled.cfg
opt/ripple/etc/validators.txt
opt/ripple/etc/update-rippled-cron
etc/logrotate.d/rippled
etc/logrotate.d/rippled

View File

@@ -16,28 +16,46 @@ override_dh_systemd_start:
override_dh_auto_configure:
env
rm -rf bld
mkdir -p bld
cd bld && \
cmake .. -G Ninja \
rm -rf bld && mkdir -p bld/rippled
cd bld/rippled && \
cmake ../.. -G Ninja \
-DCMAKE_INSTALL_PREFIX=/opt/ripple \
-DCMAKE_BUILD_TYPE=Release \
-Dstatic=ON \
-Dunity=OFF \
-Dvalidator_keys=ON \
-Dunity=OFF \
-DCMAKE_VERBOSE_MAKEFILE=OFF
cmake -S . \
-B bld/rippled-reporting \
-G Ninja \
-DCMAKE_INSTALL_PREFIX=/opt/rippled-reporting \
-DCMAKE_BUILD_TYPE=Release \
-Dstatic=ON \
-Dunity=OFF \
-DCMAKE_VERBOSE_MAKEFILE=OFF \
-Dreporting=ON
override_dh_auto_build:
cd bld && \
cmake --build . --target rippled --target validator-keys --parallel
cmake --build bld/rippled --target rippled --target validator-keys --parallel
cmake --build bld/rippled-reporting --target rippled --parallel
override_dh_auto_install:
cd bld && DESTDIR=../debian/tmp cmake --build . --target install
install -D bld/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys
cmake --install bld/rippled --prefix debian/tmp/opt/ripple
install -D bld/rippled/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys
install -D Builds/containers/shared/update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh
install -D bin/getRippledInfo debian/tmp/opt/ripple/bin/getRippledInfo
install -D Builds/containers/shared/update-rippled-cron debian/tmp/opt/ripple/etc/update-rippled-cron
install -D Builds/containers/shared/rippled-logrotate debian/tmp/etc/logrotate.d/rippled
rm -rf debian/tmp/opt/ripple/lib64/cmake/date
rm -rf bld
rm -rf bld_vl
mkdir -p debian/tmp/opt/rippled-reporting/etc
cp cfg/validators-example.txt debian/tmp/opt/rippled-reporting/etc/validators.txt
install -D bld/rippled/validator-keys/validator-keys debian/tmp/opt/rippled-reporting/bin/validator-keys
sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled.sh > debian/tmp/opt/rippled-reporting/bin/update-rippled-reporting.sh
sed -E 's/rippled?/rippled-reporting/g' bin/getRippledInfo > debian/tmp/opt/rippled-reporting/bin/getRippledReportingInfo
sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled-cron > debian/tmp/opt/rippled-reporting/etc/update-rippled-reporting-cron
sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/rippled-logrotate > debian/tmp/etc/logrotate.d/rippled-reporting

View File

@@ -0,0 +1 @@
enable rippled-reporting.service

View File

@@ -30,8 +30,8 @@ fi
cd /opt/rippled_bld/pkg/rippled
if [[ -n $(git status --porcelain) ]]; then
git status
error "Unstaged changes in this repo - please commit first"
git status
error "Unstaged changes in this repo - please commit first"
fi
git archive --format tar.gz --prefix rippled/ -o ../rpmbuild/SOURCES/rippled.tar.gz HEAD
# TODO include validator-keys sources
@@ -54,18 +54,22 @@ cp ./rpmbuild/SRPMS/* ${PKG_OUTDIR}
RPM_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm 2>/dev/null)
DBG_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-debuginfo*.rpm 2>/dev/null)
DEV_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-devel*.rpm 2>/dev/null)
REP_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-reporting*.rpm 2>/dev/null)
SRC_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/SRPMS/*.rpm 2>/dev/null)
RPM_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm | awk '{ print $1}')"
DBG_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-debuginfo*.rpm | awk '{ print $1}')"
REP_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-reporting*.rpm | awk '{ print $1}')"
DEV_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-devel*.rpm | awk '{ print $1}')"
SRC_SHA256="$(sha256sum ./rpmbuild/SRPMS/*.rpm | awk '{ print $1}')"
echo "rpm_md5sum=$RPM_MD5SUM" > ${PKG_OUTDIR}/build_vars
echo "rep_md5sum=$REP_MD5SUM" >> ${PKG_OUTDIR}/build_vars
echo "dbg_md5sum=$DBG_MD5SUM" >> ${PKG_OUTDIR}/build_vars
echo "dev_md5sum=$DEV_MD5SUM" >> ${PKG_OUTDIR}/build_vars
echo "src_md5sum=$SRC_MD5SUM" >> ${PKG_OUTDIR}/build_vars
echo "rpm_sha256=$RPM_SHA256" >> ${PKG_OUTDIR}/build_vars
echo "rep_sha256=$REP_SHA256" >> ${PKG_OUTDIR}/build_vars
echo "dbg_sha256=$DBG_SHA256" >> ${PKG_OUTDIR}/build_vars
echo "dev_sha256=$DEV_SHA256" >> ${PKG_OUTDIR}/build_vars
echo "src_sha256=$SRC_SHA256" >> ${PKG_OUTDIR}/build_vars
@@ -73,4 +77,3 @@ echo "rippled_version=$RIPPLED_VERSION" >> ${PKG_OUTDIR}/build_vars
echo "rpm_version=$RIPPLED_RPM_VERSION" >> ${PKG_OUTDIR}/build_vars
echo "rpm_file_name=$tar_file" >> ${PKG_OUTDIR}/build_vars
echo "rpm_version_release=$RPM_VERSION_RELEASE" >> ${PKG_OUTDIR}/build_vars

View File

@@ -2,6 +2,7 @@
%define rpm_release %(echo $RPM_RELEASE)
%define rpm_patch %(echo $RPM_PATCH)
%define _prefix /opt/ripple
Name: rippled
# Dashes in Version extensions must be converted to underscores
Version: %{rippled_version}
@@ -25,29 +26,41 @@ Requires: zlib-static
%description devel
core library for development of standalone applications that sign transactions.
%package reporting
Summary: Reporting Server for rippled
%description reporting
History server for XRP Ledger
%prep
%setup -c -n rippled
%build
cd rippled
mkdir -p bld.release
cd bld.release
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dstatic=true -Dunity=OFF -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON
cmake --build . --parallel --target rippled --target validator-keys
mkdir -p bld.rippled
pushd bld.rippled
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON
cmake --build . --parallel $(nproc) --target rippled --target validator-keys
popd
mkdir -p bld.rippled-reporting
cd bld.rippled-reporting
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix}-reporting -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dreporting=ON
cmake --build . --parallel $(nproc) --target rippled
%pre
test -e /etc/pki/tls || { mkdir -p /etc/pki; ln -s /usr/lib/ssl /etc/pki/tls; }
%install
rm -rf $RPM_BUILD_ROOT
DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.release --target install
DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.rippled --target install -- -v
rm -rf ${RPM_BUILD_ROOT}/%{_prefix}/lib64/cmake/date
install -d ${RPM_BUILD_ROOT}/etc/opt/ripple
install -d ${RPM_BUILD_ROOT}/usr/local/bin
ln -s %{_prefix}/etc/rippled.cfg ${RPM_BUILD_ROOT}/etc/opt/ripple/rippled.cfg
ln -s %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/ripple/validators.txt
ln -s %{_prefix}/bin/rippled ${RPM_BUILD_ROOT}/usr/local/bin/rippled
install -D rippled/bld.release/validator-keys/validator-keys ${RPM_BUILD_ROOT}%{_bindir}/validator-keys
install -D rippled/bld.rippled/validator-keys/validator-keys ${RPM_BUILD_ROOT}%{_bindir}/validator-keys
install -D ./rippled/Builds/containers/shared/rippled.service ${RPM_BUILD_ROOT}/usr/lib/systemd/system/rippled.service
install -D ./rippled/Builds/containers/packaging/rpm/50-rippled.preset ${RPM_BUILD_ROOT}/usr/lib/systemd/system-preset/50-rippled.preset
install -D ./rippled/Builds/containers/shared/update-rippled.sh ${RPM_BUILD_ROOT}%{_bindir}/update-rippled.sh
@@ -57,7 +70,27 @@ install -D ./rippled/Builds/containers/shared/rippled-logrotate ${RPM_BUILD_ROOT
install -d $RPM_BUILD_ROOT/var/log/rippled
install -d $RPM_BUILD_ROOT/var/lib/rippled
# reporting mode
%define _prefix /opt/rippled-reporting
mkdir -p ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/
install -D rippled/bld.rippled-reporting/rippled-reporting ${RPM_BUILD_ROOT}%{_bindir}/rippled-reporting
install -D ./rippled/cfg/rippled-reporting.cfg ${RPM_BUILD_ROOT}%{_prefix}/etc/rippled-reporting.cfg
install -D ./rippled/cfg/validators-example.txt ${RPM_BUILD_ROOT}%{_prefix}/etc/validators.txt
install -D ./rippled/Builds/containers/packaging/rpm/50-rippled-reporting.preset ${RPM_BUILD_ROOT}/usr/lib/systemd/system-preset/50-rippled-reporting.preset
ln -s %{_prefix}/bin/rippled-reporting ${RPM_BUILD_ROOT}/usr/local/bin/rippled-reporting
ln -s %{_prefix}/etc/rippled-reporting.cfg ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/rippled-reporting.cfg
ln -s %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/validators.txt
install -d $RPM_BUILD_ROOT/var/log/rippled-reporting
install -d $RPM_BUILD_ROOT/var/lib/rippled-reporting
install -D ./rippled/Builds/containers/shared/rippled-reporting.service ${RPM_BUILD_ROOT}/usr/lib/systemd/system/rippled-reporting.service
sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/update-rippled.sh > ${RPM_BUILD_ROOT}%{_bindir}/update-rippled-reporting.sh
sed -E 's/rippled?/rippled-reporting/g' ./rippled/bin/getRippledInfo > ${RPM_BUILD_ROOT}%{_bindir}/getRippledReportingInfo
sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/update-rippled-cron > ${RPM_BUILD_ROOT}%{_prefix}/etc/update-rippled-reporting-cron
sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/rippled-logrotate > ${RPM_BUILD_ROOT}/etc/logrotate.d/rippled-reporting
%post
%define _prefix /opt/ripple
USER_NAME=rippled
GROUP_NAME=rippled
@@ -75,7 +108,25 @@ chmod 644 %{_prefix}/etc/update-rippled-cron
chmod 644 /etc/logrotate.d/rippled
chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron
%post reporting
%define _prefix /opt/rippled-reporting
USER_NAME=rippled-reporting
GROUP_NAME=rippled-reporting
getent passwd $USER_NAME &>/dev/null || useradd -r $USER_NAME
getent group $GROUP_NAME &>/dev/null || groupadd $GROUP_NAME
chown -R $USER_NAME:$GROUP_NAME /var/log/rippled-reporting/
chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled-reporting/
chown -R $USER_NAME:$GROUP_NAME %{_prefix}/
chmod 755 /var/log/rippled-reporting/
chmod 755 /var/lib/rippled-reporting/
chmod -x /usr/lib/systemd/system/rippled-reporting.service
%files
%define _prefix /opt/ripple
%doc rippled/README.md rippled/LICENSE.md
%{_bindir}/rippled
/usr/local/bin/rippled
@@ -98,6 +149,25 @@ chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron
%{_prefix}/lib/*.a
%{_prefix}/lib/cmake/ripple
%files reporting
%define _prefix /opt/rippled-reporting
%doc rippled/README.md rippled/LICENSE.md
%{_bindir}/rippled-reporting
/usr/local/bin/rippled-reporting
%config(noreplace) /etc/opt/rippled-reporting/rippled-reporting.cfg
%config(noreplace) %{_prefix}/etc/rippled-reporting.cfg
%config(noreplace) %{_prefix}/etc/validators.txt
%config(noreplace) /etc/opt/rippled-reporting/validators.txt
%config(noreplace) /usr/lib/systemd/system/rippled-reporting.service
%config(noreplace) /usr/lib/systemd/system-preset/50-rippled-reporting.preset
%dir /var/log/rippled-reporting/
%dir /var/lib/rippled-reporting/
%{_bindir}/update-rippled-reporting.sh
%{_bindir}/getRippledReportingInfo
%{_prefix}/etc/update-rippled-reporting-cron
%config(noreplace) /etc/logrotate.d/rippled-reporting
%changelog
* Wed Aug 28 2019 Mike Ellery <mellery451@gmail.com>
- Switch to subproject build for validator-keys

View File

@@ -30,7 +30,7 @@ cd openssl-${OPENSSL_VER}
SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\")
./config -fPIC --prefix=/opt/local/openssl --openssldir=${SSLDIR} zlib shared
make -j$(nproc) >> make_output.txt 2>&1
make install
make install >> make_output.txt 2>&1
cd ..
rm -f openssl-${OPENSSL_VER}.tar.gz
rm -rf openssl-${OPENSSL_VER}
@@ -43,7 +43,7 @@ cd libarchive-3.4.1
mkdir _bld && cd _bld
cmake -DCMAKE_BUILD_TYPE=Release ..
make -j$(nproc) >> make_output.txt 2>&1
make install
make install >> make_output.txt 2>&1
cd ../..
rm -f libarchive-3.4.1.tar.gz
rm -rf libarchive-3.4.1
@@ -55,7 +55,7 @@ cd protobuf-3.10.1
./autogen.sh
./configure
make -j$(nproc) >> make_output.txt 2>&1
make install
make install >> make_output.txt 2>&1
ldconfig
cd ..
rm -f protobuf-all-3.10.1.tar.gz
@@ -78,7 +78,7 @@ cmake \
-DCARES_BUILD_CONTAINER_TESTS=OFF \
..
make -j$(nproc) >> make_output.txt 2>&1
make install
make install >> make_output.txt 2>&1
cd ../..
rm -f c-ares-1.15.0.tar.gz
rm -rf c-ares-1.15.0
@@ -98,7 +98,7 @@ cmake \
-DProtobuf_USE_STATIC_LIBS=ON \
..
make -j$(nproc) >> make_output.txt 2>&1
make install
make install >> make_output.txt 2>&1
cd ../..
rm -f xf v1.25.0.tar.gz
rm -rf grpc-1.25.0
@@ -115,7 +115,7 @@ if [ "${CI_USE}" = true ] ; then
cd build
cmake -G "Unix Makefiles" ..
make -j$(nproc) >> make_output.txt 2>&1
make install
make install >> make_output.txt 2>&1
cd ../..
rm -f Release_1_8_16.tar.gz
rm -rf doxygen-Release_1_8_16
@@ -136,8 +136,8 @@ if [ "${CI_USE}" = true ] ; then
tar xf ccache-3.7.6.tar.gz
cd ccache-3.7.6
./configure --prefix=/usr/local
make
make install
make >> make_output.txt 2>&1
make install >> make_output.txt 2>&1
cd ..
rm -f ccache-3.7.6.tar.gz
rm -rf ccache-3.7.6

View File

@@ -0,0 +1,15 @@
[Unit]
Description=Ripple Daemon
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/opt/rippled-reporting/bin/rippled-reporting --silent --conf /etc/opt/rippled-reporting/rippled-reporting.cfg
Restart=on-failure
User=rippled-reporting
Group=rippled-reporting
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -25,12 +25,7 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps")
include (CheckCXXCompilerFlag)
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
include (FetchContent)
endif ()
if (MSVC AND CMAKE_VERSION VERSION_LESS 3.12)
message (FATAL_ERROR "MSVC requires cmake 3.12 or greater for proper boost support")
endif ()
include (FetchContent)
include (ExternalProject)
include (CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
include (ProcessorCount)

1703
cfg/rippled-reporting.cfg Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -19,8 +19,7 @@
#include <ripple/app/ledger/AcceptedLedger.h>
#include <ripple/app/main/Application.h>
#include <ripple/basics/Log.h>
#include <ripple/basics/chrono.h>
#include <algorithm>
namespace ripple {
@@ -29,29 +28,34 @@ AcceptedLedger::AcceptedLedger(
Application& app)
: mLedger(ledger)
{
transactions_.reserve(256);
auto insertAll = [&](auto const& txns) {
auto const& idcache = app.accountIDCache();
for (auto const& item : txns)
{
insert(std::make_shared<AcceptedLedgerTx>(
ledger,
item.first,
item.second,
app.accountIDCache(),
app.logs()));
}
transactions_.emplace_back(std::make_unique<AcceptedLedgerTx>(
ledger, item.first, item.second, idcache));
};
if (app.config().reporting())
insertAll(flatFetchTransactions(*ledger, app));
{
auto const txs = flatFetchTransactions(*ledger, app);
transactions_.reserve(txs.size());
insertAll(txs);
}
else
{
transactions_.reserve(256);
insertAll(ledger->txs);
}
}
void
AcceptedLedger::insert(AcceptedLedgerTx::ref at)
{
assert(mMap.find(at->getIndex()) == mMap.end());
mMap.insert(std::make_pair(at->getIndex(), at));
std::sort(
transactions_.begin(),
transactions_.end(),
[](auto const& a, auto const& b) {
return a->getTxnSeq() < b->getTxnSeq();
});
}
} // namespace ripple

View File

@@ -41,43 +41,40 @@ namespace ripple {
the result of the a consensus process (though haven't validated
it yet).
*/
class AcceptedLedger
class AcceptedLedger : public CountedObject<AcceptedLedger>
{
public:
using pointer = std::shared_ptr<AcceptedLedger>;
using ret = const pointer&;
using map_t = std::map<int, AcceptedLedgerTx::pointer>;
// mapt_t must be an ordered map!
using value_type = map_t::value_type;
using const_iterator = map_t::const_iterator;
AcceptedLedger(
std::shared_ptr<ReadView const> const& ledger,
Application& app);
public:
std::shared_ptr<ReadView const> const&
getLedger() const
{
return mLedger;
}
const map_t&
getMap() const
std::size_t
size() const
{
return mMap;
return transactions_.size();
}
int
getTxnCount() const
auto
begin() const
{
return mMap.size();
return transactions_.begin();
}
AcceptedLedger(
std::shared_ptr<ReadView const> const& ledger,
Application& app);
auto
end() const
{
return transactions_.end();
}
private:
void insert(AcceptedLedgerTx::ref);
std::shared_ptr<ReadView const> mLedger;
map_t mMap;
std::vector<std::unique_ptr<AcceptedLedgerTx>> transactions_;
};
} // namespace ripple

View File

@@ -18,7 +18,6 @@
//==============================================================================
#include <ripple/app/ledger/AcceptedLedgerTx.h>
#include <ripple/app/main/Application.h>
#include <ripple/basics/Log.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/protocol/UintTypes.h>
@@ -30,72 +29,30 @@ AcceptedLedgerTx::AcceptedLedgerTx(
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const& txn,
std::shared_ptr<STObject const> const& met,
AccountIDCache const& accountCache,
Logs& logs)
: mLedger(ledger)
, mTxn(txn)
, mMeta(std::make_shared<TxMeta>(
txn->getTransactionID(),
ledger->seq(),
*met))
, mAffected(mMeta->getAffectedAccounts(logs.journal("View")))
, accountCache_(accountCache)
, logs_(logs)
AccountIDCache const& accountCache)
: mTxn(txn)
, mMeta(txn->getTransactionID(), ledger->seq(), *met)
, mAffected(mMeta.getAffectedAccounts())
{
assert(!ledger->open());
mResult = mMeta->getResultTER();
Serializer s;
met->add(s);
mRawMeta = std::move(s.modData());
buildJson();
}
AcceptedLedgerTx::AcceptedLedgerTx(
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const& txn,
TER result,
AccountIDCache const& accountCache,
Logs& logs)
: mLedger(ledger)
, mTxn(txn)
, mResult(result)
, mAffected(txn->getMentionedAccounts())
, accountCache_(accountCache)
, logs_(logs)
{
assert(ledger->open());
buildJson();
}
std::string
AcceptedLedgerTx::getEscMeta() const
{
assert(!mRawMeta.empty());
return sqlBlobLiteral(mRawMeta);
}
void
AcceptedLedgerTx::buildJson()
{
mJson = Json::objectValue;
mJson[jss::transaction] = mTxn->getJson(JsonOptions::none);
if (mMeta)
{
mJson[jss::meta] = mMeta->getJson(JsonOptions::none);
mJson[jss::raw_meta] = strHex(mRawMeta);
}
mJson[jss::meta] = mMeta.getJson(JsonOptions::none);
mJson[jss::raw_meta] = strHex(mRawMeta);
mJson[jss::result] = transHuman(mResult);
mJson[jss::result] = transHuman(mMeta.getResultTER());
if (!mAffected.empty())
{
Json::Value& affected = (mJson[jss::affected] = Json::arrayValue);
for (auto const& account : mAffected)
affected.append(accountCache_.toBase58(account));
affected.append(accountCache.toBase58(account));
}
if (mTxn->getTxnType() == ttOFFER_CREATE)
@@ -107,14 +64,21 @@ AcceptedLedgerTx::buildJson()
if (account != amount.issue().account)
{
auto const ownerFunds = accountFunds(
*mLedger,
*ledger,
account,
amount,
fhIGNORE_FREEZE,
logs_.journal("View"));
beast::Journal{beast::Journal::getNullSink()});
mJson[jss::transaction][jss::owner_funds] = ownerFunds.getText();
}
}
}
std::string
AcceptedLedgerTx::getEscMeta() const
{
assert(!mRawMeta.empty());
return sqlBlobLiteral(mRawMeta);
}
} // namespace ripple

View File

@@ -39,40 +39,22 @@ class Logs;
- Which accounts are affected
* This is used by InfoSub to report to clients
- Cached stuff
@code
@endcode
@see {uri}
@ingroup ripple_ledger
*/
class AcceptedLedgerTx
class AcceptedLedgerTx : public CountedObject<AcceptedLedgerTx>
{
public:
using pointer = std::shared_ptr<AcceptedLedgerTx>;
using ref = const pointer&;
public:
AcceptedLedgerTx(
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const&,
std::shared_ptr<STObject const> const&,
AccountIDCache const&,
Logs&);
AcceptedLedgerTx(
std::shared_ptr<ReadView const> const&,
std::shared_ptr<STTx const> const&,
TER,
AccountIDCache const&,
Logs&);
AccountIDCache const&);
std::shared_ptr<STTx const> const&
getTxn() const
{
return mTxn;
}
std::shared_ptr<TxMeta> const&
TxMeta const&
getMeta() const
{
return mMeta;
@@ -97,45 +79,28 @@ public:
TER
getResult() const
{
return mResult;
return mMeta.getResultTER();
}
std::uint32_t
getTxnSeq() const
{
return mMeta->getIndex();
}
bool
isApplied() const
{
return bool(mMeta);
}
int
getIndex() const
{
return mMeta ? mMeta->getIndex() : 0;
return mMeta.getIndex();
}
std::string
getEscMeta() const;
Json::Value
Json::Value const&
getJson() const
{
return mJson;
}
private:
std::shared_ptr<ReadView const> mLedger;
std::shared_ptr<STTx const> mTxn;
std::shared_ptr<TxMeta> mMeta;
TER mResult;
TxMeta mMeta;
boost::container::flat_set<AccountID> mAffected;
Blob mRawMeta;
Json::Value mJson;
AccountIDCache const& accountCache_;
Logs& logs_;
void
buildJson();
};
} // namespace ripple

View File

@@ -39,9 +39,6 @@ class InboundLedger final : public TimeoutCounter,
public:
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
using PeerDataPairType =
std::pair<std::weak_ptr<Peer>, std::shared_ptr<protocol::TMLedgerData>>;
// These are the reasons we might acquire a ledger
enum class Reason {
HISTORY, // Acquiring past ledger
@@ -193,7 +190,9 @@ private:
// Data we have received from peers
std::mutex mReceivedDataLock;
std::vector<PeerDataPairType> mReceivedData;
std::vector<
std::pair<std::weak_ptr<Peer>, std::shared_ptr<protocol::TMLedgerData>>>
mReceivedData;
bool mReceiveDispatched;
std::unique_ptr<PeerSet> mPeerSet;
};

View File

@@ -26,14 +26,6 @@
namespace ripple {
// VFALCO TODO replace macros
#ifndef CACHED_LEDGER_NUM
#define CACHED_LEDGER_NUM 96
#endif
std::chrono::seconds constexpr CachedLedgerAge = std::chrono::minutes{2};
// FIXME: Need to clean up ledgers by index at some point
LedgerHistory::LedgerHistory(
@@ -44,8 +36,8 @@ LedgerHistory::LedgerHistory(
, mismatch_counter_(collector->make_counter("ledger.history", "mismatch"))
, m_ledgers_by_hash(
"LedgerCache",
CACHED_LEDGER_NUM,
CachedLedgerAge,
app_.config().getValueFor(SizedItem::ledgerSize),
std::chrono::seconds{app_.config().getValueFor(SizedItem::ledgerAge)},
stopwatch(),
app_.journal("TaggedCache"))
, m_consensus_validated(
@@ -523,13 +515,6 @@ LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
return true;
}
void
LedgerHistory::tune(int size, std::chrono::seconds age)
{
m_ledgers_by_hash.setTargetSize(size);
m_ledgers_by_hash.setTargetAge(age);
}
void
LedgerHistory::clearLedgerCachePrior(LedgerIndex seq)
{

View File

@@ -70,13 +70,6 @@ public:
LedgerHash
getLedgerHash(LedgerIndex ledgerIndex);
/** Set the history cache's parameters
@param size The target size of the cache
@param age The target age of the cache, in seconds
*/
void
tune(int size, std::chrono::seconds age);
/** Remove stale cache entries
*/
void

View File

@@ -20,6 +20,7 @@
#ifndef RIPPLE_APP_LEDGER_LEDGERHOLDER_H_INCLUDED
#define RIPPLE_APP_LEDGER_LEDGERHOLDER_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/basics/contract.h>
#include <mutex>
@@ -35,7 +36,7 @@ namespace ripple {
way the object always holds a value. We can use the
genesis ledger in all cases.
*/
class LedgerHolder
class LedgerHolder : public CountedObject<LedgerHolder>
{
public:
// Update the held ledger

View File

@@ -219,8 +219,6 @@ public:
bool
getFullValidatedRange(std::uint32_t& minVal, std::uint32_t& maxVal);
void
tune(int size, std::chrono::seconds age);
void
sweep();
float

View File

@@ -20,6 +20,7 @@
#ifndef RIPPLE_APP_LEDGER_LEDGERREPLAY_H_INCLUDED
#define RIPPLE_APP_LEDGER_LEDGERREPLAY_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <cstdint>
#include <map>
#include <memory>
@@ -29,7 +30,7 @@ namespace ripple {
class Ledger;
class STTx;
class LedgerReplay
class LedgerReplay : public CountedObject<LedgerReplay>
{
std::shared_ptr<Ledger const> parent_;
std::shared_ptr<Ledger const> replay_;

View File

@@ -20,6 +20,7 @@
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/OrderBookDB.h>
#include <ripple/app/main/Application.h>
#include <ripple/app/misc/NetworkOPs.h>
#include <ripple/basics/Log.h>
#include <ripple/core/Config.h>
#include <ripple/core/JobQueue.h>
@@ -28,70 +29,72 @@
namespace ripple {
OrderBookDB::OrderBookDB(Application& app)
: app_(app), mSeq(0), j_(app.journal("OrderBookDB"))
: app_(app), seq_(0), j_(app.journal("OrderBookDB"))
{
}
void
OrderBookDB::invalidate()
{
std::lock_guard sl(mLock);
mSeq = 0;
}
void
OrderBookDB::setup(std::shared_ptr<ReadView const> const& ledger)
{
if (!app_.config().standalone() && app_.getOPs().isNeedNetworkLedger())
{
std::lock_guard sl(mLock);
auto seq = ledger->info().seq;
// Do a full update every 256 ledgers
if (mSeq != 0)
{
if (seq == mSeq)
return;
if ((seq > mSeq) && ((seq - mSeq) < 256))
return;
if ((seq < mSeq) && ((mSeq - seq) < 16))
return;
}
JLOG(j_.debug()) << "Advancing from " << mSeq << " to " << seq;
mSeq = seq;
JLOG(j_.warn()) << "Eliding full order book update: no ledger";
return;
}
auto seq = seq_.load();
if (seq != 0)
{
if ((seq > ledger->seq()) && ((ledger->seq() - seq) < 25600))
return;
if ((ledger->seq() <= seq) && ((seq - ledger->seq()) < 16))
return;
}
if (seq_.exchange(ledger->seq()) != seq)
return;
JLOG(j_.debug()) << "Full order book update: " << seq << " to "
<< ledger->seq();
if (app_.config().PATH_SEARCH_MAX != 0)
{
if (app_.config().standalone())
update(ledger);
else
app_.getJobQueue().addJob(
jtUPDATE_PF, "OrderBookDB::update", [this, ledger]() {
update(ledger);
});
jtUPDATE_PF,
"OrderBookDB::update: " + std::to_string(ledger->seq()),
[this, ledger]() { update(ledger); });
}
}
void
OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
{
hash_set<uint256> seen;
OrderBookDB::IssueToOrderBook destMap;
OrderBookDB::IssueToOrderBook sourceMap;
hash_set<Issue> XRPBooks;
JLOG(j_.debug()) << "OrderBookDB::update>";
if (app_.config().PATH_SEARCH_MAX == 0)
return; // pathfinding has been disabled
// A newer full update job is pending
if (auto const seq = seq_.load(); seq > ledger->seq())
{
// pathfinding has been disabled
JLOG(j_.debug()) << "Eliding update for " << ledger->seq()
<< " because of pending update to later " << seq;
return;
}
decltype(allBooks_) allBooks;
decltype(xrpBooks_) xrpBooks;
allBooks.reserve(allBooks_.size());
xrpBooks.reserve(xrpBooks_.size());
JLOG(j_.debug()) << "Beginning update (" << ledger->seq() << ")";
// walk through the entire ledger looking for orderbook entries
int books = 0;
int cnt = 0;
try
{
@@ -100,9 +103,8 @@ OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
if (app_.isStopping())
{
JLOG(j_.info())
<< "OrderBookDB::update exiting due to isStopping";
std::lock_guard sl(mLock);
mSeq = 0;
<< "Update halted because the process is stopping";
seq_.store(0);
return;
}
@@ -111,40 +113,38 @@ OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
sle->getFieldH256(sfRootIndex) == sle->key())
{
Book book;
book.in.currency = sle->getFieldH160(sfTakerPaysCurrency);
book.in.account = sle->getFieldH160(sfTakerPaysIssuer);
book.out.account = sle->getFieldH160(sfTakerGetsIssuer);
book.out.currency = sle->getFieldH160(sfTakerGetsCurrency);
book.out.account = sle->getFieldH160(sfTakerGetsIssuer);
uint256 index = getBookBase(book);
if (seen.insert(index).second)
{
auto orderBook = std::make_shared<OrderBook>(index, book);
sourceMap[book.in].push_back(orderBook);
destMap[book.out].push_back(orderBook);
if (isXRP(book.out))
XRPBooks.insert(book.in);
++books;
}
allBooks[book.in].insert(book.out);
if (isXRP(book.out))
xrpBooks.insert(book.in);
++cnt;
}
}
}
catch (SHAMapMissingNode const& mn)
{
JLOG(j_.info()) << "OrderBookDB::update: " << mn.what();
std::lock_guard sl(mLock);
mSeq = 0;
JLOG(j_.info()) << "Missing node in " << ledger->seq()
<< " during update: " << mn.what();
seq_.store(0);
return;
}
JLOG(j_.debug()) << "OrderBookDB::update< " << books << " books found";
JLOG(j_.debug()) << "Update completed (" << ledger->seq() << "): " << cnt
<< " books found";
{
std::lock_guard sl(mLock);
mXRPBooks.swap(XRPBooks);
mSourceMap.swap(sourceMap);
mDestMap.swap(destMap);
allBooks_.swap(allBooks);
xrpBooks_.swap(xrpBooks);
}
app_.getLedgerMaster().newOrderBookDB();
}
@@ -152,60 +152,50 @@ void
OrderBookDB::addOrderBook(Book const& book)
{
bool toXRP = isXRP(book.out);
std::lock_guard sl(mLock);
if (toXRP)
{
// We don't want to search through all the to-XRP or from-XRP order
// books!
for (auto ob : mSourceMap[book.in])
{
if (isXRP(ob->getCurrencyOut())) // also to XRP
return;
}
}
else
{
for (auto ob : mDestMap[book.out])
{
if (ob->getCurrencyIn() == book.in.currency &&
ob->getIssuerIn() == book.in.account)
{
return;
}
}
}
uint256 index = getBookBase(book);
auto orderBook = std::make_shared<OrderBook>(index, book);
allBooks_[book.in].insert(book.out);
mSourceMap[book.in].push_back(orderBook);
mDestMap[book.out].push_back(orderBook);
if (toXRP)
mXRPBooks.insert(book.in);
xrpBooks_.insert(book.in);
}
// return list of all orderbooks that want this issuerID and currencyID
OrderBook::List
std::vector<Book>
OrderBookDB::getBooksByTakerPays(Issue const& issue)
{
std::lock_guard sl(mLock);
auto it = mSourceMap.find(issue);
return it == mSourceMap.end() ? OrderBook::List() : it->second;
std::vector<Book> ret;
{
std::lock_guard sl(mLock);
if (auto it = allBooks_.find(issue); it != allBooks_.end())
{
ret.reserve(it->second.size());
for (auto const& gets : it->second)
ret.push_back(Book(issue, gets));
}
}
return ret;
}
int
OrderBookDB::getBookSize(Issue const& issue)
{
std::lock_guard sl(mLock);
auto it = mSourceMap.find(issue);
return it == mSourceMap.end() ? 0 : it->second.size();
if (auto it = allBooks_.find(issue); it != allBooks_.end())
return static_cast<int>(it->second.size());
return 0;
}
bool
OrderBookDB::isBookToXRP(Issue const& issue)
{
std::lock_guard sl(mLock);
return mXRPBooks.count(issue) > 0;
return xrpBooks_.count(issue) > 0;
}
BookListeners::pointer
@@ -247,63 +237,49 @@ OrderBookDB::processTxn(
Json::Value const& jvObj)
{
std::lock_guard sl(mLock);
if (alTx.getResult() == tesSUCCESS)
// For this particular transaction, maintain the set of unique
// subscriptions that have already published it. This prevents sending
// the transaction multiple times if it touches multiple ltOFFER
// entries for the same book, or if it touches multiple books and a
// single client has subscribed to those books.
hash_set<std::uint64_t> havePublished;
for (auto const& node : alTx.getMeta().getNodes())
{
// For this particular transaction, maintain the set of unique
// subscriptions that have already published it. This prevents sending
// the transaction multiple times if it touches multiple ltOFFER
// entries for the same book, or if it touches multiple books and a
// single client has subscribed to those books.
hash_set<std::uint64_t> havePublished;
// Check if this is an offer or an offer cancel or a payment that
// consumes an offer.
// Check to see what the meta looks like.
for (auto& node : alTx.getMeta()->getNodes())
try
{
try
if (node.getFieldU16(sfLedgerEntryType) == ltOFFER)
{
if (node.getFieldU16(sfLedgerEntryType) == ltOFFER)
{
SField const* field = nullptr;
// We need a field that contains the TakerGets and TakerPays
// parameters.
if (node.getFName() == sfModifiedNode)
field = &sfPreviousFields;
else if (node.getFName() == sfCreatedNode)
field = &sfNewFields;
else if (node.getFName() == sfDeletedNode)
field = &sfFinalFields;
if (field)
auto process = [&, this](SField const& field) {
if (auto data = dynamic_cast<STObject const*>(
node.peekAtPField(field));
data && data->isFieldPresent(sfTakerPays) &&
data->isFieldPresent(sfTakerGets))
{
auto data = dynamic_cast<const STObject*>(
node.peekAtPField(*field));
if (data && data->isFieldPresent(sfTakerPays) &&
data->isFieldPresent(sfTakerGets))
{
// determine the OrderBook
Book b{
data->getFieldAmount(sfTakerGets).issue(),
data->getFieldAmount(sfTakerPays).issue()};
auto listeners = getBookListeners(b);
if (listeners)
{
listeners->publish(jvObj, havePublished);
}
}
auto listeners = getBookListeners(
{data->getFieldAmount(sfTakerGets).issue(),
data->getFieldAmount(sfTakerPays).issue()});
if (listeners)
listeners->publish(jvObj, havePublished);
}
}
}
catch (std::exception const&)
{
JLOG(j_.info())
<< "Fields not found in OrderBookDB::processTxn";
};
// We need a field that contains the TakerGets and TakerPays
// parameters.
if (node.getFName() == sfModifiedNode)
process(sfPreviousFields);
else if (node.getFName() == sfCreatedNode)
process(sfNewFields);
else if (node.getFName() == sfDeletedNode)
process(sfFinalFields);
}
}
catch (std::exception const& ex)
{
JLOG(j_.info())
<< "processTxn: field not found (" << ex.what() << ")";
}
}
}

View File

@@ -23,7 +23,6 @@
#include <ripple/app/ledger/AcceptedLedgerTx.h>
#include <ripple/app/ledger/BookListeners.h>
#include <ripple/app/main/Application.h>
#include <ripple/app/misc/OrderBook.h>
#include <mutex>
namespace ripple {
@@ -37,15 +36,13 @@ public:
setup(std::shared_ptr<ReadView const> const& ledger);
void
update(std::shared_ptr<ReadView const> const& ledger);
void
invalidate();
void
addOrderBook(Book const&);
/** @return a list of all orderbooks that want this issuerID and currencyID.
*/
OrderBook::List
std::vector<Book>
getBooksByTakerPays(Issue const&);
/** @return a count of all orderbooks that want this issuerID and
@@ -68,22 +65,14 @@ public:
const AcceptedLedgerTx& alTx,
Json::Value const& jvObj);
using IssueToOrderBook = hash_map<Issue, OrderBook::List>;
private:
void
rawAddBook(Book const&);
Application& app_;
// by ci/ii
IssueToOrderBook mSourceMap;
// by co/io
IssueToOrderBook mDestMap;
// Maps order books by "issue in" to "issue out":
hardened_hash_map<Issue, hardened_hash_set<Issue>> allBooks_;
// does an order book to XRP exist
hash_set<Issue> mXRPBooks;
hash_set<Issue> xrpBooks_;
std::recursive_mutex mLock;
@@ -91,7 +80,7 @@ private:
BookToListenersMap mListeners;
std::uint32_t mSeq;
std::atomic<std::uint32_t> seq_;
beast::Journal const j_;
};

View File

@@ -162,7 +162,7 @@ There are also indirect peer queries. If there have been timeouts while
acquiring ledger data then a server may issue indirect queries. In that
case the server receiving the indirect query passes the query along to any
of its peers that may have the requested data. This is important if the
network has a byzantine failure. If also helps protect the validation
network has a byzantine failure. It also helps protect the validation
network. A validator may need to get a peer set from one of the other
validators, and indirect queries improve the likelihood of success with
that.
@@ -487,4 +487,3 @@ ledger(s) for missing nodes in the back end node store
---
# References #

View File

@@ -33,7 +33,10 @@
#include <ripple/resource/Fees.h>
#include <ripple/shamap/SHAMapNodeID.h>
#include <boost/iterator/function_output_iterator.hpp>
#include <algorithm>
#include <random>
namespace ripple {
@@ -57,15 +60,15 @@ enum {
// Number of nodes to find initially
,
missingNodesFind = 256
missingNodesFind = 512
// Number of nodes to request for a reply
,
reqNodesReply = 128
reqNodesReply = 256
// Number of nodes to request blindly
,
reqNodes = 8
reqNodes = 12
};
// millisecond for each ledger timeout
@@ -601,7 +604,7 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
tmBH.set_ledgerhash(hash_.begin(), hash_.size());
for (auto const& p : need)
{
JLOG(journal_.warn()) << "Want: " << p.second;
JLOG(journal_.debug()) << "Want: " << p.second;
if (!typeSet)
{
@@ -661,15 +664,15 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
if (reason != TriggerReason::reply)
{
// If we're querying blind, don't query deep
tmGL.set_querydepth(0);
tmGL.set_querydepth(1);
}
else if (peer && peer->isHighLatency())
{
// If the peer has high latency, query extra deep
tmGL.set_querydepth(2);
tmGL.set_querydepth(3);
}
else
tmGL.set_querydepth(1);
tmGL.set_querydepth(2);
// Get the state data first because it's the most likely to be useful
// if we wind up abandoning this fetch.
@@ -952,22 +955,23 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
try
{
auto const f = filter.get();
for (auto const& node : packet.nodes())
{
auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
if (!nodeID)
{
san.incInvalid();
return;
}
throw std::runtime_error("data does not properly deserialize");
if (nodeID->isRoot())
san += map.addRootNode(
rootHash, makeSlice(node.nodedata()), filter.get());
{
san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f);
}
else
san += map.addKnownNode(
*nodeID, makeSlice(node.nodedata()), filter.get());
{
san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f);
}
if (!san.isGood())
{
@@ -1120,19 +1124,19 @@ InboundLedger::processData(
std::shared_ptr<Peer> peer,
protocol::TMLedgerData& packet)
{
ScopedLockType sl(mtx_);
if (packet.type() == protocol::liBASE)
{
if (packet.nodes_size() < 1)
if (packet.nodes().empty())
{
JLOG(journal_.warn()) << "Got empty header data";
JLOG(journal_.warn()) << peer->id() << ": empty header data";
peer->charge(Resource::feeInvalidRequest);
return -1;
}
SHAMapAddNode san;
ScopedLockType sl(mtx_);
try
{
if (!mHaveHeader)
@@ -1177,13 +1181,18 @@ InboundLedger::processData(
if ((packet.type() == protocol::liTX_NODE) ||
(packet.type() == protocol::liAS_NODE))
{
if (packet.nodes().size() == 0)
std::string type = packet.type() == protocol::liTX_NODE ? "liTX_NODE: "
: "liAS_NODE: ";
if (packet.nodes().empty())
{
JLOG(journal_.info()) << "Got response with no nodes";
JLOG(journal_.info()) << peer->id() << ": response with no nodes";
peer->charge(Resource::feeInvalidRequest);
return -1;
}
ScopedLockType sl(mtx_);
// Verify node IDs and data are complete
for (auto const& node : packet.nodes())
{
@@ -1198,14 +1207,10 @@ InboundLedger::processData(
SHAMapAddNode san;
receiveNode(packet, san);
if (packet.type() == protocol::liTX_NODE)
{
JLOG(journal_.debug()) << "Ledger TX node stats: " << san.get();
}
else
{
JLOG(journal_.debug()) << "Ledger AS node stats: " << san.get();
}
JLOG(journal_.debug())
<< "Ledger "
<< ((packet.type() == protocol::liTX_NODE) ? "TX" : "AS")
<< " node stats: " << san.get();
if (san.isUseful())
progress_ = true;
@@ -1217,20 +1222,100 @@ InboundLedger::processData(
return -1;
}
namespace detail {
// Track the amount of useful data that each peer returns
struct PeerDataCounts
{
// Map from peer to amount of useful the peer returned
std::unordered_map<std::shared_ptr<Peer>, int> counts;
// The largest amount of useful data that any peer returned
int maxCount = 0;
// Update the data count for a peer
void
update(std::shared_ptr<Peer>&& peer, int dataCount)
{
if (dataCount <= 0)
return;
maxCount = std::max(maxCount, dataCount);
auto i = counts.find(peer);
if (i == counts.end())
{
counts.emplace(std::move(peer), dataCount);
return;
}
i->second = std::max(i->second, dataCount);
}
// Prune all the peers that didn't return enough data.
void
prune()
{
// Remove all the peers that didn't return at least half as much data as
// the best peer
auto const thresh = maxCount / 2;
auto i = counts.begin();
while (i != counts.end())
{
if (i->second < thresh)
i = counts.erase(i);
else
++i;
}
}
// call F with the `peer` parameter with a random sample of at most n values
// of the counts vector.
template <class F>
void
sampleN(std::size_t n, F&& f)
{
if (counts.empty())
return;
auto outFunc = [&f](auto&& v) { f(v.first); };
std::minstd_rand rng{std::random_device{}()};
#if _MSC_VER
std::vector<std::pair<std::shared_ptr<Peer>, int>> s;
s.reserve(n);
std::sample(
counts.begin(), counts.end(), std::back_inserter(s), n, rng);
for (auto& v : s)
{
outFunc(v);
}
#else
std::sample(
counts.begin(),
counts.end(),
boost::make_function_output_iterator(outFunc),
n,
rng);
#endif
}
};
} // namespace detail
/** Process pending TMLedgerData
Query the 'best' peer
Query the a random sample of the 'best' peers
*/
void
InboundLedger::runData()
{
std::shared_ptr<Peer> chosenPeer;
int chosenPeerCount = -1;
// Maximum number of peers to request data from
constexpr std::size_t maxUsefulPeers = 6;
std::vector<PeerDataPairType> data;
decltype(mReceivedData) data;
// Reserve some memory so the first couple iterations don't reallocate
data.reserve(8);
detail::PeerDataCounts dataCounts;
for (;;)
{
data.clear();
{
std::lock_guard sl(mReceivedDataLock);
@@ -1243,24 +1328,22 @@ InboundLedger::runData()
data.swap(mReceivedData);
}
// Select the peer that gives us the most nodes that are useful,
// breaking ties in favor of the peer that responded first.
for (auto& entry : data)
{
if (auto peer = entry.first.lock())
{
int count = processData(peer, *(entry.second));
if (count > chosenPeerCount)
{
chosenPeerCount = count;
chosenPeer = std::move(peer);
}
dataCounts.update(std::move(peer), count);
}
}
}
if (chosenPeer)
trigger(chosenPeer, TriggerReason::reply);
// Select a random sample of the peers that gives us the most nodes that are
// useful
dataCounts.prune();
dataCounts.sampleN(maxUsefulPeers, [&](std::shared_ptr<Peer> const& peer) {
trigger(peer, TriggerReason::reply);
});
}
Json::Value

View File

@@ -74,6 +74,12 @@ public:
reason != InboundLedger::Reason::SHARD ||
(seq != 0 && app_.getShardStore()));
// probably not the right rule
if (app_.getOPs().isNeedNetworkLedger() &&
(reason != InboundLedger::Reason::GENERIC) &&
(reason != InboundLedger::Reason::CONSENSUS))
return {};
bool isNew = true;
std::shared_ptr<InboundLedger> inbound;
{
@@ -82,6 +88,7 @@ public:
{
return {};
}
auto it = mLedgers.find(hash);
if (it != mLedgers.end())
{

View File

@@ -71,6 +71,7 @@ public:
, m_zeroSet(m_map[uint256()])
, m_gotSet(std::move(gotSet))
, m_peerSetBuilder(std::move(peerSetBuilder))
, j_(app_.journal("InboundTransactions"))
{
m_zeroSet.mSet = std::make_shared<SHAMap>(
SHAMapType::TRANSACTION, uint256(), app_.getNodeFamily());
@@ -99,9 +100,7 @@ public:
{
std::lock_guard sl(mLock);
auto it = m_map.find(hash);
if (it != m_map.end())
if (auto it = m_map.find(hash); it != m_map.end())
{
if (acquire)
{
@@ -140,11 +139,8 @@ public:
{
protocol::TMLedgerData& packet = *packet_ptr;
JLOG(app_.journal("InboundLedger").trace())
<< "Got data (" << packet.nodes().size()
<< ") "
"for acquiring ledger: "
<< hash;
JLOG(j_.trace()) << "Got data (" << packet.nodes().size()
<< ") for acquiring ledger: " << hash;
TransactionAcquire::pointer ta = getAcquire(hash);
@@ -154,8 +150,9 @@ public:
return;
}
std::list<SHAMapNodeID> nodeIDs;
std::list<Blob> nodeData;
std::vector<std::pair<SHAMapNodeID, Slice>> data;
data.reserve(packet.nodes().size());
for (auto const& node : packet.nodes())
{
if (!node.has_nodeid() || !node.has_nodedata())
@@ -172,12 +169,10 @@ public:
return;
}
nodeIDs.emplace_back(*id);
nodeData.emplace_back(
node.nodedata().begin(), node.nodedata().end());
data.emplace_back(std::make_pair(*id, makeSlice(node.nodedata())));
}
if (!ta->takeNodes(nodeIDs, nodeData, peer).isUseful())
if (!ta->takeNodes(data, peer).isUseful())
peer->charge(Resource::feeUnwantedData);
}
@@ -262,6 +257,8 @@ private:
std::function<void(std::shared_ptr<SHAMap> const&, bool)> m_gotSet;
std::unique_ptr<PeerSetBuilder> m_peerSetBuilder;
beast::Journal j_;
};
//------------------------------------------------------------------------------

View File

@@ -261,8 +261,13 @@ LedgerMaster::getPublishedLedgerAge()
std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
ret -= pubClose;
ret = (ret > 0s) ? ret : 0s;
static std::chrono::seconds lastRet = -1s;
JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
if (ret != lastRet)
{
JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
lastRet = ret;
}
return ret;
}
@@ -287,8 +292,13 @@ LedgerMaster::getValidatedLedgerAge()
std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
ret -= valClose;
ret = (ret > 0s) ? ret : 0s;
static std::chrono::seconds lastRet = -1s;
JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
if (ret != lastRet)
{
JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
lastRet = ret;
}
return ret;
}
@@ -1483,12 +1493,14 @@ LedgerMaster::updatePaths()
if (app_.getOPs().isNeedNetworkLedger())
{
--mPathFindThread;
JLOG(m_journal.debug()) << "Need network ledger for updating paths";
return;
}
}
while (!app_.getJobQueue().isStopping())
{
JLOG(m_journal.debug()) << "updatePaths running";
std::shared_ptr<ReadView const> lastLedger;
{
std::lock_guard ml(m_mutex);
@@ -1506,6 +1518,7 @@ LedgerMaster::updatePaths()
else
{ // Nothing to do
--mPathFindThread;
JLOG(m_journal.debug()) << "Nothing to do for updating paths";
return;
}
}
@@ -1527,7 +1540,31 @@ LedgerMaster::updatePaths()
try
{
app_.getPathRequests().updateAll(lastLedger);
auto& pathRequests = app_.getPathRequests();
{
std::lock_guard ml(m_mutex);
if (!pathRequests.requestsPending())
{
--mPathFindThread;
JLOG(m_journal.debug())
<< "No path requests found. Nothing to do for updating "
"paths. "
<< mPathFindThread << " jobs remaining";
return;
}
}
JLOG(m_journal.debug()) << "Updating paths";
pathRequests.updateAll(lastLedger);
std::lock_guard ml(m_mutex);
if (!pathRequests.requestsPending())
{
JLOG(m_journal.debug())
<< "No path requests left. No need for further updating "
"paths";
--mPathFindThread;
return;
}
}
catch (SHAMapMissingNode const& mn)
{
@@ -1587,8 +1624,12 @@ LedgerMaster::newPFWork(
const char* name,
std::unique_lock<std::recursive_mutex>&)
{
if (mPathFindThread < 2)
if (!app_.isStopping() && mPathFindThread < 2 &&
app_.getPathRequests().requestsPending())
{
JLOG(m_journal.debug())
<< "newPFWork: Creating job. path find threads: "
<< mPathFindThread;
if (app_.getJobQueue().addJob(
jtUPDATE_PF, name, [this]() { updatePaths(); }))
{
@@ -1828,12 +1869,6 @@ LedgerMaster::setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
mCompleteLedgers.insert(range(minV, maxV));
}
void
LedgerMaster::tune(int size, std::chrono::seconds age)
{
mLedgerHistory.tune(size, age);
}
void
LedgerMaster::sweep()
{
@@ -2074,7 +2109,7 @@ LedgerMaster::doAdvance(std::unique_lock<std::recursive_mutex>& sl)
{
JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
<< " ledgers to publish";
for (auto ledger : pubLedgers)
for (auto const& ledger : pubLedgers)
{
{
ScopedUnlock sul{sl};

View File

@@ -65,7 +65,7 @@ TransactionAcquire::done()
if (failed_)
{
JLOG(journal_.warn()) << "Failed to acquire TX set " << hash_;
JLOG(journal_.debug()) << "Failed to acquire TX set " << hash_;
}
else
{
@@ -176,8 +176,7 @@ TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
SHAMapAddNode
TransactionAcquire::takeNodes(
const std::list<SHAMapNodeID>& nodeIDs,
const std::list<Blob>& data,
std::vector<std::pair<SHAMapNodeID, Slice>> const& data,
std::shared_ptr<Peer> const& peer)
{
ScopedLockType sl(mtx_);
@@ -196,24 +195,20 @@ TransactionAcquire::takeNodes(
try
{
if (nodeIDs.empty())
if (data.empty())
return SHAMapAddNode::invalid();
std::list<SHAMapNodeID>::const_iterator nodeIDit = nodeIDs.begin();
std::list<Blob>::const_iterator nodeDatait = data.begin();
ConsensusTransSetSF sf(app_, app_.getTempNodeCache());
while (nodeIDit != nodeIDs.end())
for (auto const& d : data)
{
if (nodeIDit->isRoot())
if (d.first.isRoot())
{
if (mHaveRoot)
JLOG(journal_.debug())
<< "Got root TXS node, already have it";
else if (!mMap->addRootNode(
SHAMapHash{hash_},
makeSlice(*nodeDatait),
nullptr)
SHAMapHash{hash_}, d.second, nullptr)
.isGood())
{
JLOG(journal_.warn()) << "TX acquire got bad root node";
@@ -221,24 +216,22 @@ TransactionAcquire::takeNodes(
else
mHaveRoot = true;
}
else if (!mMap->addKnownNode(*nodeIDit, makeSlice(*nodeDatait), &sf)
.isGood())
else if (!mMap->addKnownNode(d.first, d.second, &sf).isGood())
{
JLOG(journal_.warn()) << "TX acquire got bad non-root node";
return SHAMapAddNode::invalid();
}
++nodeIDit;
++nodeDatait;
}
trigger(peer);
progress_ = true;
return SHAMapAddNode::useful();
}
catch (std::exception const&)
catch (std::exception const& ex)
{
JLOG(journal_.error()) << "Peer sends us junky transaction node data";
JLOG(journal_.error())
<< "Peer " << peer->id()
<< " sent us junky transaction node data: " << ex.what();
return SHAMapAddNode::invalid();
}
}

View File

@@ -44,8 +44,7 @@ public:
SHAMapAddNode
takeNodes(
const std::list<SHAMapNodeID>& IDs,
const std::list<Blob>& data,
std::vector<std::pair<SHAMapNodeID, Slice>> const& data,
std::shared_ptr<Peer> const&);
void

View File

@@ -225,9 +225,11 @@ public:
boost::asio::signal_set m_signals;
std::condition_variable cv_;
mutable std::mutex mut_;
bool isTimeToStop = false;
// Once we get C++20, we could use `std::atomic_flag` for `isTimeToStop`
// and eliminate the need for the condition variable and the mutex.
std::condition_variable stoppingCondition_;
mutable std::mutex stoppingMutex_;
std::atomic<bool> isTimeToStop = false;
std::atomic<bool> checkSigs_;
@@ -960,110 +962,9 @@ public:
<< "' took " << elapsed.count() << " seconds.";
}
// tune caches
using namespace std::chrono;
m_ledgerMaster->tune(
config_->getValueFor(SizedItem::ledgerSize),
seconds{config_->getValueFor(SizedItem::ledgerAge)});
return true;
}
//--------------------------------------------------------------------------
// Called to indicate shutdown.
void
stop()
{
JLOG(m_journal.debug()) << "Application stopping";
m_io_latency_sampler.cancel_async();
// VFALCO Enormous hack, we have to force the probe to cancel
// before we stop the io_service queue or else it never
// unblocks in its destructor. The fix is to make all
// io_objects gracefully handle exit so that we can
// naturally return from io_service::run() instead of
// forcing a call to io_service::stop()
m_io_latency_sampler.cancel();
m_resolver->stop_async();
// NIKB This is a hack - we need to wait for the resolver to
// stop. before we stop the io_server_queue or weird
// things will happen.
m_resolver->stop();
{
boost::system::error_code ec;
sweepTimer_.cancel(ec);
if (ec)
{
JLOG(m_journal.error())
<< "Application: sweepTimer cancel error: " << ec.message();
}
ec.clear();
entropyTimer_.cancel(ec);
if (ec)
{
JLOG(m_journal.error())
<< "Application: entropyTimer cancel error: "
<< ec.message();
}
}
// Make sure that any waitHandlers pending in our timers are done
// before we declare ourselves stopped.
using namespace std::chrono_literals;
waitHandlerCounter_.join("Application", 1s, m_journal);
mValidations.flush();
validatorSites_->stop();
// TODO Store manifests in manifests.sqlite instead of wallet.db
validatorManifests_->save(
getWalletDB(),
"ValidatorManifests",
[this](PublicKey const& pubKey) {
return validators().listed(pubKey);
});
publisherManifests_->save(
getWalletDB(),
"PublisherManifests",
[this](PublicKey const& pubKey) {
return validators().trustedPublisher(pubKey);
});
// The order of these stop calls is delicate.
// Re-ordering them risks undefined behavior.
m_loadManager->stop();
m_shaMapStore->stop();
m_jobQueue->stop();
if (shardArchiveHandler_)
shardArchiveHandler_->stop();
if (overlay_)
overlay_->stop();
if (shardStore_)
shardStore_->stop();
grpcServer_->stop();
m_networkOPs->stop();
serverHandler_->stop();
m_ledgerReplayer->stop();
m_inboundTransactions->stop();
m_inboundLedgers->stop();
ledgerCleaner_->stop();
if (reportingETL_)
reportingETL_->stop();
if (auto pg = dynamic_cast<RelationalDBInterfacePostgres*>(
&*mRelationalDBInterface))
pg->stop();
m_nodeStore->stop();
perfLog_->stop();
}
//--------------------------------------------------------------------------
//
// PropertyStream
@@ -1636,27 +1537,102 @@ ApplicationImp::run()
}
{
std::unique_lock<std::mutex> lk{mut_};
cv_.wait(lk, [this] { return isTimeToStop; });
std::unique_lock<std::mutex> lk{stoppingMutex_};
stoppingCondition_.wait(lk, [this] { return isTimeToStop.load(); });
}
JLOG(m_journal.info()) << "Received shutdown request";
stop();
JLOG(m_journal.debug()) << "Application stopping";
m_io_latency_sampler.cancel_async();
// VFALCO Enormous hack, we have to force the probe to cancel
// before we stop the io_service queue or else it never
// unblocks in its destructor. The fix is to make all
// io_objects gracefully handle exit so that we can
// naturally return from io_service::run() instead of
// forcing a call to io_service::stop()
m_io_latency_sampler.cancel();
m_resolver->stop_async();
// NIKB This is a hack - we need to wait for the resolver to
// stop. before we stop the io_server_queue or weird
// things will happen.
m_resolver->stop();
{
boost::system::error_code ec;
sweepTimer_.cancel(ec);
if (ec)
{
JLOG(m_journal.error())
<< "Application: sweepTimer cancel error: " << ec.message();
}
ec.clear();
entropyTimer_.cancel(ec);
if (ec)
{
JLOG(m_journal.error())
<< "Application: entropyTimer cancel error: " << ec.message();
}
}
// Make sure that any waitHandlers pending in our timers are done
// before we declare ourselves stopped.
using namespace std::chrono_literals;
waitHandlerCounter_.join("Application", 1s, m_journal);
mValidations.flush();
validatorSites_->stop();
// TODO Store manifests in manifests.sqlite instead of wallet.db
validatorManifests_->save(
getWalletDB(), "ValidatorManifests", [this](PublicKey const& pubKey) {
return validators().listed(pubKey);
});
publisherManifests_->save(
getWalletDB(), "PublisherManifests", [this](PublicKey const& pubKey) {
return validators().trustedPublisher(pubKey);
});
// The order of these stop calls is delicate.
// Re-ordering them risks undefined behavior.
m_loadManager->stop();
m_shaMapStore->stop();
m_jobQueue->stop();
if (shardArchiveHandler_)
shardArchiveHandler_->stop();
if (overlay_)
overlay_->stop();
if (shardStore_)
shardStore_->stop();
grpcServer_->stop();
m_networkOPs->stop();
serverHandler_->stop();
m_ledgerReplayer->stop();
m_inboundTransactions->stop();
m_inboundLedgers->stop();
ledgerCleaner_->stop();
if (reportingETL_)
reportingETL_->stop();
if (auto pg = dynamic_cast<RelationalDBInterfacePostgres*>(
&*mRelationalDBInterface))
pg->stop();
m_nodeStore->stop();
perfLog_->stop();
JLOG(m_journal.info()) << "Done.";
}
void
ApplicationImp::signalStop()
{
// Unblock the main thread (which is sitting in run()).
// When we get C++20 this can use std::latch.
std::lock_guard lk{mut_};
if (!isTimeToStop)
{
isTimeToStop = true;
cv_.notify_all();
}
if (!isTimeToStop.exchange(true))
stoppingCondition_.notify_all();
}
bool
@@ -1674,8 +1650,7 @@ ApplicationImp::checkSigs(bool check)
bool
ApplicationImp::isStopping() const
{
std::lock_guard lk{mut_};
return isTimeToStop;
return isTimeToStop.load();
}
int

View File

@@ -20,6 +20,7 @@
#ifndef RIPPLE_APP_MISC_CANONICALTXSET_H_INCLUDED
#define RIPPLE_APP_MISC_CANONICALTXSET_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/protocol/RippleLedgerHash.h>
#include <ripple/protocol/STTx.h>
#include <ripple/protocol/SeqProxy.h>
@@ -34,7 +35,7 @@ namespace ripple {
*/
// VFALCO TODO rename to SortedTxSet
class CanonicalTXSet
class CanonicalTXSet : public CountedObject<CanonicalTXSet>
{
private:
class Key

View File

@@ -24,7 +24,9 @@
#include <ripple/beast/utility/Journal.h>
#include <ripple/protocol/PublicKey.h>
#include <ripple/protocol/SecretKey.h>
#include <optional>
#include <shared_mutex>
#include <string>
namespace ripple {
@@ -223,9 +225,8 @@ class DatabaseCon;
class ManifestCache
{
private:
beast::Journal mutable j_;
std::mutex apply_mutex_;
std::mutex mutable read_mutex_;
beast::Journal j_;
std::shared_mutex mutable mutex_;
/** Active manifests stored by master public key. */
hash_map<PublicKey, Manifest> map_;
@@ -378,8 +379,10 @@ public:
/** Invokes the callback once for every populated manifest.
@note Undefined behavior results when calling ManifestCache members from
within the callback
@note Do not call ManifestCache member functions from within the
callback. This can re-lock the mutex from the same thread, which is UB.
@note Do not write ManifestCache member variables from within the
callback. This can lead to data races.
@param f Function called for each manifest
@@ -391,7 +394,7 @@ public:
void
for_each_manifest(Function&& f) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
for (auto const& [_, manifest] : map_)
{
(void)_;
@@ -401,8 +404,10 @@ public:
/** Invokes the callback once for every populated manifest.
@note Undefined behavior results when calling ManifestCache members from
within the callback
@note Do not call ManifestCache member functions from within the
callback. This can re-lock the mutex from the same thread, which is UB.
@note Do not write ManifestCache member variables from
within the callback. This can lead to data races.
@param pf Pre-function called with the maximum number of times f will be
called (useful for memory allocations)
@@ -417,7 +422,7 @@ public:
void
for_each_manifest(PreFun&& pf, EachFun&& f) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
pf(map_.size());
for (auto const& [_, manifest] : map_)
{

View File

@@ -445,9 +445,9 @@ public:
pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
void
pubProposedTransaction(
std::shared_ptr<ReadView const> const& lpCurrent,
std::shared_ptr<STTx const> const& stTxn,
TER terResult) override;
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const& transaction,
TER result) override;
void
pubValidation(std::shared_ptr<STValidation> const& val) override;
@@ -612,20 +612,26 @@ private:
Json::Value
transJson(
const STTx& stTxn,
TER terResult,
bool bValidated,
std::shared_ptr<ReadView const> const& lpCurrent);
const STTx& transaction,
TER result,
bool validated,
std::shared_ptr<ReadView const> const& ledger);
void
pubValidatedTransaction(
std::shared_ptr<ReadView const> const& alAccepted,
const AcceptedLedgerTx& alTransaction);
std::shared_ptr<ReadView const> const& ledger,
AcceptedLedgerTx const& transaction);
void
pubAccountTransaction(
std::shared_ptr<ReadView const> const& lpCurrent,
const AcceptedLedgerTx& alTransaction,
bool isAccepted);
std::shared_ptr<ReadView const> const& ledger,
AcceptedLedgerTx const& transaction);
void
pubProposedAccountTransaction(
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const& transaction,
TER result);
void
pubServer();
@@ -2643,11 +2649,11 @@ NetworkOPsImp::getLedgerFetchInfo()
void
NetworkOPsImp::pubProposedTransaction(
std::shared_ptr<ReadView const> const& lpCurrent,
std::shared_ptr<STTx const> const& stTxn,
TER terResult)
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const& transaction,
TER result)
{
Json::Value jvObj = transJson(*stTxn, terResult, false, lpCurrent);
Json::Value jvObj = transJson(*transaction, result, false, ledger);
{
std::lock_guard sl(mSubLock);
@@ -2668,10 +2674,8 @@ NetworkOPsImp::pubProposedTransaction(
}
}
}
AcceptedLedgerTx alt(
lpCurrent, stTxn, terResult, app_.accountIDCache(), app_.logs());
JLOG(m_journal.trace()) << "pubProposed: " << alt.getJson();
pubAccountTransaction(lpCurrent, alt, false);
pubProposedAccountTransaction(ledger, transaction, result);
}
void
@@ -2846,9 +2850,13 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
lpAccepted->info().hash, alpAccepted);
}
assert(alpAccepted->getLedger().get() == lpAccepted.get());
{
JLOG(m_journal.debug())
<< "Publishing ledger = " << lpAccepted->info().seq;
<< "Publishing ledger " << lpAccepted->info().seq << " "
<< lpAccepted->info().hash;
std::lock_guard sl(mSubLock);
if (!mStreamMaps[sLedger].empty())
@@ -2868,7 +2876,7 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
jvObj[jss::reserve_inc] =
lpAccepted->fees().increment.jsonClipped();
jvObj[jss::txn_count] = Json::UInt(alpAccepted->getTxnCount());
jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
if (mMode >= OperatingMode::SYNCING)
{
@@ -2882,10 +2890,6 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
InfoSub::pointer p = it->second.lock();
if (p)
{
JLOG(m_journal.debug())
<< "Publishing ledger = " << lpAccepted->info().seq
<< " : consumer = " << p->getConsumer()
<< " : obj = " << jvObj;
p->send(jvObj, true);
++it;
}
@@ -2917,9 +2921,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
}
// Don't lock since pubAcceptedTransaction is locking.
for (auto const& [_, accTx] : alpAccepted->getMap())
for (auto const& accTx : *alpAccepted)
{
(void)_;
JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
pubValidatedTransaction(lpAccepted, *accTx);
}
@@ -2969,26 +2972,26 @@ NetworkOPsImp::getLocalTxCount()
// transactions.
Json::Value
NetworkOPsImp::transJson(
const STTx& stTxn,
TER terResult,
bool bValidated,
std::shared_ptr<ReadView const> const& lpCurrent)
const STTx& transaction,
TER result,
bool validated,
std::shared_ptr<ReadView const> const& ledger)
{
Json::Value jvObj(Json::objectValue);
std::string sToken;
std::string sHuman;
transResultInfo(terResult, sToken, sHuman);
transResultInfo(result, sToken, sHuman);
jvObj[jss::type] = "transaction";
jvObj[jss::transaction] = stTxn.getJson(JsonOptions::none);
jvObj[jss::transaction] = transaction.getJson(JsonOptions::none);
if (bValidated)
if (validated)
{
jvObj[jss::ledger_index] = lpCurrent->info().seq;
jvObj[jss::ledger_hash] = to_string(lpCurrent->info().hash);
jvObj[jss::ledger_index] = ledger->info().seq;
jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
jvObj[jss::transaction][jss::date] =
lpCurrent->info().closeTime.time_since_epoch().count();
ledger->info().closeTime.time_since_epoch().count();
jvObj[jss::validated] = true;
// WRITEME: Put the account next seq here
@@ -2996,24 +2999,24 @@ NetworkOPsImp::transJson(
else
{
jvObj[jss::validated] = false;
jvObj[jss::ledger_current_index] = lpCurrent->info().seq;
jvObj[jss::ledger_current_index] = ledger->info().seq;
}
jvObj[jss::status] = bValidated ? "closed" : "proposed";
jvObj[jss::status] = validated ? "closed" : "proposed";
jvObj[jss::engine_result] = sToken;
jvObj[jss::engine_result_code] = terResult;
jvObj[jss::engine_result_code] = result;
jvObj[jss::engine_result_message] = sHuman;
if (stTxn.getTxnType() == ttOFFER_CREATE)
if (transaction.getTxnType() == ttOFFER_CREATE)
{
auto const account = stTxn.getAccountID(sfAccount);
auto const amount = stTxn.getFieldAmount(sfTakerGets);
auto const account = transaction.getAccountID(sfAccount);
auto const amount = transaction.getFieldAmount(sfTakerGets);
// If the offer create is not self funded then add the owner balance
if (account != amount.issue().account)
{
auto const ownerFunds = accountFunds(
*lpCurrent,
*ledger,
account,
amount,
fhIGNORE_FREEZE,
@@ -3027,17 +3030,18 @@ NetworkOPsImp::transJson(
void
NetworkOPsImp::pubValidatedTransaction(
std::shared_ptr<ReadView const> const& alAccepted,
const AcceptedLedgerTx& alTx)
std::shared_ptr<ReadView const> const& ledger,
const AcceptedLedgerTx& transaction)
{
std::shared_ptr<STTx const> stTxn = alTx.getTxn();
Json::Value jvObj = transJson(*stTxn, alTx.getResult(), true, alAccepted);
auto const& stTxn = transaction.getTxn();
Json::Value jvObj =
transJson(*stTxn, transaction.getResult(), true, ledger);
if (auto const txMeta = alTx.getMeta())
{
jvObj[jss::meta] = txMeta->getJson(JsonOptions::none);
RPC::insertDeliveredAmount(
jvObj[jss::meta], *alAccepted, stTxn, *txMeta);
auto const& meta = transaction.getMeta();
jvObj[jss::meta] = meta.getJson(JsonOptions::none);
RPC::insertDeliveredAmount(jvObj[jss::meta], *ledger, stTxn, meta);
}
{
@@ -3072,32 +3076,31 @@ NetworkOPsImp::pubValidatedTransaction(
it = mStreamMaps[sRTTransactions].erase(it);
}
}
app_.getOrderBookDB().processTxn(alAccepted, alTx, jvObj);
pubAccountTransaction(alAccepted, alTx, true);
if (transaction.getResult() == tesSUCCESS)
app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
pubAccountTransaction(ledger, transaction);
}
void
NetworkOPsImp::pubAccountTransaction(
std::shared_ptr<ReadView const> const& lpCurrent,
const AcceptedLedgerTx& alTx,
bool bAccepted)
std::shared_ptr<ReadView const> const& ledger,
AcceptedLedgerTx const& transaction)
{
hash_set<InfoSub::pointer> notify;
int iProposed = 0;
int iAccepted = 0;
std::vector<SubAccountHistoryInfo> accountHistoryNotify;
auto const currLedgerSeq = lpCurrent->seq();
auto const currLedgerSeq = ledger->seq();
{
std::lock_guard sl(mSubLock);
if (!bAccepted && mSubRTAccount.empty())
return;
if (!mSubAccount.empty() || (!mSubRTAccount.empty()) ||
if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
!mSubAccountHistory.empty())
{
for (auto const& affectedAccount : alTx.getAffected())
for (auto const& affectedAccount : transaction.getAffected())
{
if (auto simiIt = mSubRTAccount.find(affectedAccount);
simiIt != mSubRTAccount.end())
@@ -3119,80 +3122,140 @@ NetworkOPsImp::pubAccountTransaction(
}
}
if (bAccepted)
if (auto simiIt = mSubAccount.find(affectedAccount);
simiIt != mSubAccount.end())
{
if (auto simiIt = mSubAccount.find(affectedAccount);
simiIt != mSubAccount.end())
auto it = simiIt->second.begin();
while (it != simiIt->second.end())
{
auto it = simiIt->second.begin();
while (it != simiIt->second.end())
{
InfoSub::pointer p = it->second.lock();
InfoSub::pointer p = it->second.lock();
if (p)
{
notify.insert(p);
++it;
++iAccepted;
}
else
it = simiIt->second.erase(it);
if (p)
{
notify.insert(p);
++it;
++iAccepted;
}
else
it = simiIt->second.erase(it);
}
}
if (auto histoIt = mSubAccountHistory.find(affectedAccount);
histoIt != mSubAccountHistory.end())
{
auto& subs = histoIt->second;
auto it = subs.begin();
while (it != subs.end())
{
SubAccountHistoryInfoWeak const& info = it->second;
if (currLedgerSeq <= info.index_->separationLedgerSeq_)
{
++it;
continue;
}
if (auto isSptr = info.sinkWptr_.lock(); isSptr)
{
accountHistoryNotify.emplace_back(
SubAccountHistoryInfo{isSptr, info.index_});
++it;
}
else
{
it = subs.erase(it);
}
}
if (auto histoIt = mSubAccountHistory.find(affectedAccount);
histoIt != mSubAccountHistory.end())
{
auto& subs = histoIt->second;
auto it = subs.begin();
while (it != subs.end())
{
SubAccountHistoryInfoWeak const& info = it->second;
if (currLedgerSeq <=
info.index_->separationLedgerSeq_)
{
++it;
continue;
}
if (auto isSptr = info.sinkWptr_.lock(); isSptr)
{
accountHistoryNotify.emplace_back(
SubAccountHistoryInfo{isSptr, info.index_});
++it;
}
else
{
it = subs.erase(it);
}
}
if (subs.empty())
mSubAccountHistory.erase(histoIt);
}
if (subs.empty())
mSubAccountHistory.erase(histoIt);
}
}
}
}
JLOG(m_journal.trace())
<< "pubAccountTransaction:"
<< " iProposed=" << iProposed << " iAccepted=" << iAccepted;
<< "pubAccountTransaction: "
<< "proposed=" << iProposed << ", accepted=" << iAccepted;
if (!notify.empty() || !accountHistoryNotify.empty())
{
std::shared_ptr<STTx const> stTxn = alTx.getTxn();
Json::Value jvObj =
transJson(*stTxn, alTx.getResult(), bAccepted, lpCurrent);
auto const& stTxn = transaction.getTxn();
Json::Value jvObj =
transJson(*stTxn, transaction.getResult(), true, ledger);
if (alTx.isApplied())
{
if (auto const txMeta = alTx.getMeta())
auto const& meta = transaction.getMeta();
jvObj[jss::meta] = meta.getJson(JsonOptions::none);
RPC::insertDeliveredAmount(jvObj[jss::meta], *ledger, stTxn, meta);
}
for (InfoSub::ref isrListener : notify)
isrListener->send(jvObj, true);
assert(!jvObj.isMember(jss::account_history_tx_stream));
for (auto& info : accountHistoryNotify)
{
auto& index = info.index_;
if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
jvObj[jss::account_history_tx_first] = true;
jvObj[jss::account_history_tx_index] = index->forwardTxIndex_++;
info.sink_->send(jvObj, true);
}
}
}
void
NetworkOPsImp::pubProposedAccountTransaction(
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const& tx,
TER result)
{
hash_set<InfoSub::pointer> notify;
int iProposed = 0;
std::vector<SubAccountHistoryInfo> accountHistoryNotify;
{
std::lock_guard sl(mSubLock);
if (mSubRTAccount.empty())
return;
if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
!mSubAccountHistory.empty())
{
for (auto const& affectedAccount : tx->getMentionedAccounts())
{
jvObj[jss::meta] = txMeta->getJson(JsonOptions::none);
RPC::insertDeliveredAmount(
jvObj[jss::meta], *lpCurrent, stTxn, *txMeta);
if (auto simiIt = mSubRTAccount.find(affectedAccount);
simiIt != mSubRTAccount.end())
{
auto it = simiIt->second.begin();
while (it != simiIt->second.end())
{
InfoSub::pointer p = it->second.lock();
if (p)
{
notify.insert(p);
++it;
++iProposed;
}
else
it = simiIt->second.erase(it);
}
}
}
}
}
JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
if (!notify.empty() || !accountHistoryNotify.empty())
{
Json::Value jvObj = transJson(*tx, result, false, ledger);
for (InfoSub::ref isrListener : notify)
isrListener->send(jvObj, true);

View File

@@ -255,9 +255,9 @@ public:
pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) = 0;
virtual void
pubProposedTransaction(
std::shared_ptr<ReadView const> const& lpCurrent,
std::shared_ptr<STTx const> const& stTxn,
TER terResult) = 0;
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const& transaction,
TER result) = 0;
virtual void
pubValidation(std::shared_ptr<STValidation> const& val) = 0;

View File

@@ -1,87 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_MISC_ORDERBOOK_H_INCLUDED
#define RIPPLE_APP_MISC_ORDERBOOK_H_INCLUDED
namespace ripple {
/** Describes a serialized ledger entry for an order book. */
class OrderBook
{
public:
using pointer = std::shared_ptr<OrderBook>;
using ref = std::shared_ptr<OrderBook> const&;
using List = std::vector<pointer>;
/** Construct from a currency specification.
@param index ???
@param book in and out currency/issuer pairs.
*/
// VFALCO NOTE what is the meaning of the index parameter?
OrderBook(uint256 const& base, Book const& book)
: mBookBase(base), mBook(book)
{
}
uint256 const&
getBookBase() const
{
return mBookBase;
}
Book const&
book() const
{
return mBook;
}
Currency const&
getCurrencyIn() const
{
return mBook.in.currency;
}
Currency const&
getCurrencyOut() const
{
return mBook.out.currency;
}
AccountID const&
getIssuerIn() const
{
return mBook.in.account;
}
AccountID const&
getIssuerOut() const
{
return mBook.out.account;
}
private:
uint256 const mBookBase;
Book const mBook;
};
} // namespace ripple
#endif

View File

@@ -23,6 +23,7 @@
#include <ripple/app/tx/applySteps.h>
#include <ripple/ledger/ApplyView.h>
#include <ripple/ledger/OpenView.h>
#include <ripple/protocol/RippleLedgerHash.h>
#include <ripple/protocol/STTx.h>
#include <ripple/protocol/SeqProxy.h>
#include <ripple/protocol/TER.h>
@@ -340,7 +341,7 @@ public:
in the queue.
*/
std::vector<TxDetails>
getAccountTxs(AccountID const& account, ReadView const& view) const;
getAccountTxs(AccountID const& account) const;
/** Returns information about all transactions currently
in the queue.
@@ -349,7 +350,7 @@ public:
in the queue.
*/
std::vector<TxDetails>
getTxs(ReadView const& view) const;
getTxs() const;
/** Summarize current fee metrics for the `fee` RPC command.
@@ -575,6 +576,16 @@ private:
*/
static constexpr int retriesAllowed = 10;
/** The hash of the parent ledger.
This is used to pseudo-randomize the transaction order when
populating byFee_, by XORing it with the transaction hash (txID).
Using a single static and doing the XOR operation every time was
tested to be as fast or faster than storing the computed "sort key",
and obviously uses less memory.
*/
static LedgerHash parentHashComp;
public:
/// Constructor
MaybeTx(
@@ -621,22 +632,26 @@ private:
explicit OrderCandidates() = default;
/** Sort @ref MaybeTx by `feeLevel` descending, then by
* transaction ID ascending
* pseudo-randomized transaction ID ascending
*
* The transaction queue is ordered such that transactions
* paying a higher fee are in front of transactions paying
* a lower fee, giving them an opportunity to be processed into
* the open ledger first. Within transactions paying the same
* fee, order by the arbitrary but consistent transaction ID.
* This allows validators to build similar queues in the same
* order, and thus have more similar initial proposals.
* fee, order by the arbitrary but consistent pseudo-randomized
* transaction ID. The ID is pseudo-randomized by XORing it with
* the open ledger's parent hash, which is deterministic, but
* unpredictable. This allows validators to build similar queues
* in the same order, and thus have more similar initial
* proposals.
*
*/
bool
operator()(const MaybeTx& lhs, const MaybeTx& rhs) const
{
if (lhs.feeLevel == rhs.feeLevel)
return lhs.txID < rhs.txID;
return (lhs.txID ^ MaybeTx::parentHashComp) <
(rhs.txID ^ MaybeTx::parentHashComp);
return lhs.feeLevel > rhs.feeLevel;
}
};
@@ -770,6 +785,14 @@ private:
*/
std::optional<size_t> maxSize_;
#if !NDEBUG
/**
parentHash_ checks that no unexpected ledger transitions
happen, and is only checked via debug asserts.
*/
LedgerHash parentHash_{beast::zero};
#endif
/** Most queue operations are done under the master lock,
but use this mutex for the RPC "fee" command, which isn't.
*/

View File

@@ -28,8 +28,11 @@
#include <ripple/json/json_reader.h>
#include <ripple/protocol/PublicKey.h>
#include <ripple/protocol/Sign.h>
#include <boost/algorithm/string/trim.hpp>
#include <numeric>
#include <shared_mutex>
#include <stdexcept>
namespace ripple {
@@ -283,7 +286,7 @@ loadValidatorToken(std::vector<std::string> const& blob)
PublicKey
ManifestCache::getSigningKey(PublicKey const& pk) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
auto const iter = map_.find(pk);
if (iter != map_.end() && !iter->second.revoked())
@@ -295,7 +298,7 @@ ManifestCache::getSigningKey(PublicKey const& pk) const
PublicKey
ManifestCache::getMasterKey(PublicKey const& pk) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
if (auto const iter = signingToMasterKeys_.find(pk);
iter != signingToMasterKeys_.end())
@@ -307,7 +310,7 @@ ManifestCache::getMasterKey(PublicKey const& pk) const
std::optional<std::uint32_t>
ManifestCache::getSequence(PublicKey const& pk) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
auto const iter = map_.find(pk);
if (iter != map_.end() && !iter->second.revoked())
@@ -319,7 +322,7 @@ ManifestCache::getSequence(PublicKey const& pk) const
std::optional<std::string>
ManifestCache::getDomain(PublicKey const& pk) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
auto const iter = map_.find(pk);
if (iter != map_.end() && !iter->second.revoked())
@@ -331,7 +334,7 @@ ManifestCache::getDomain(PublicKey const& pk) const
std::optional<std::string>
ManifestCache::getManifest(PublicKey const& pk) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
auto const iter = map_.find(pk);
if (iter != map_.end() && !iter->second.revoked())
@@ -343,7 +346,7 @@ ManifestCache::getManifest(PublicKey const& pk) const
bool
ManifestCache::revoked(PublicKey const& pk) const
{
std::lock_guard lock{read_mutex_};
std::shared_lock lock{mutex_};
auto const iter = map_.find(pk);
if (iter != map_.end())
@@ -355,86 +358,115 @@ ManifestCache::revoked(PublicKey const& pk) const
ManifestDisposition
ManifestCache::applyManifest(Manifest m)
{
std::lock_guard applyLock{apply_mutex_};
// Check the manifest against the conditions that do not require a
// `unique_lock` (write lock) on the `mutex_`. Since the signature can be
// relatively expensive, the `checkSignature` parameter determines if the
// signature should be checked. Since `prewriteCheck` is run twice (see
// comment below), `checkSignature` only needs to be set to true on the
// first run.
auto prewriteCheck =
[this, &m](auto const& iter, bool checkSignature, auto const& lock)
-> std::optional<ManifestDisposition> {
assert(lock.owns_lock());
(void)lock; // not used. parameter is present to ensure the mutex is
// locked when the lambda is called.
if (iter != map_.end() && m.sequence <= iter->second.sequence)
{
// We received a manifest whose sequence number is not strictly
// greater than the one we already know about. This can happen in
// several cases including when we receive manifests from a peer who
// doesn't have the latest data.
if (auto stream = j_.debug())
logMftAct(
stream,
"Stale",
m.masterKey,
m.sequence,
iter->second.sequence);
return ManifestDisposition::stale;
}
// Before we spend time checking the signature, make sure the
// sequence number is newer than any we have.
auto const iter = map_.find(m.masterKey);
if (checkSignature && !m.verify())
{
if (auto stream = j_.warn())
logMftAct(stream, "Invalid", m.masterKey, m.sequence);
return ManifestDisposition::invalid;
}
if (iter != map_.end() && m.sequence <= iter->second.sequence)
{
// We received a manifest whose sequence number is not strictly greater
// than the one we already know about. This can happen in several cases
// including when we receive manifests from a peer who doesn't have the
// latest data.
if (auto stream = j_.debug())
logMftAct(
stream,
"Stale",
m.masterKey,
m.sequence,
iter->second.sequence);
return ManifestDisposition::stale;
}
// If the master key associated with a manifest is or might be
// compromised and is, therefore, no longer trustworthy.
//
// A manifest revocation essentially marks a manifest as compromised. By
// setting the sequence number to the highest value possible, the
// manifest is effectively neutered and cannot be superseded by a forged
// one.
bool const revoked = m.revoked();
// Now check the signature
if (!m.verify())
{
if (auto stream = j_.warn())
logMftAct(stream, "Invalid", m.masterKey, m.sequence);
return ManifestDisposition::invalid;
}
if (auto stream = j_.warn(); stream && revoked)
logMftAct(stream, "Revoked", m.masterKey, m.sequence);
// If the master key associated with a manifest is or might be compromised
// and is, therefore, no longer trustworthy.
//
// A manifest revocation essentially marks a manifest as compromised. By
// setting the sequence number to the highest value possible, the manifest
// is effectively neutered and cannot be superseded by a forged one.
bool const revoked = m.revoked();
if (auto stream = j_.warn(); stream && revoked)
logMftAct(stream, "Revoked", m.masterKey, m.sequence);
std::lock_guard readLock{read_mutex_};
// Sanity check: the master key of this manifest should not be used as
// the ephemeral key of another manifest:
if (auto const x = signingToMasterKeys_.find(m.masterKey);
x != signingToMasterKeys_.end())
{
JLOG(j_.warn()) << to_string(m)
<< ": Master key already used as ephemeral key for "
<< toBase58(TokenType::NodePublic, x->second);
return ManifestDisposition::badMasterKey;
}
if (!revoked)
{
// Sanity check: the ephemeral key of this manifest should not be used
// as the master or ephemeral key of another manifest:
if (auto const x = signingToMasterKeys_.find(m.signingKey);
// Sanity check: the master key of this manifest should not be used as
// the ephemeral key of another manifest:
if (auto const x = signingToMasterKeys_.find(m.masterKey);
x != signingToMasterKeys_.end())
{
JLOG(j_.warn())
<< to_string(m)
<< ": Ephemeral key already used as ephemeral key for "
<< toBase58(TokenType::NodePublic, x->second);
JLOG(j_.warn()) << to_string(m)
<< ": Master key already used as ephemeral key for "
<< toBase58(TokenType::NodePublic, x->second);
return ManifestDisposition::badEphemeralKey;
return ManifestDisposition::badMasterKey;
}
if (auto const x = map_.find(m.signingKey); x != map_.end())
if (!revoked)
{
JLOG(j_.warn())
<< to_string(m) << ": Ephemeral key used as master key for "
<< to_string(x->second);
// Sanity check: the ephemeral key of this manifest should not be
// used as the master or ephemeral key of another manifest:
if (auto const x = signingToMasterKeys_.find(m.signingKey);
x != signingToMasterKeys_.end())
{
JLOG(j_.warn())
<< to_string(m)
<< ": Ephemeral key already used as ephemeral key for "
<< toBase58(TokenType::NodePublic, x->second);
return ManifestDisposition::badEphemeralKey;
return ManifestDisposition::badEphemeralKey;
}
if (auto const x = map_.find(m.signingKey); x != map_.end())
{
JLOG(j_.warn())
<< to_string(m) << ": Ephemeral key used as master key for "
<< to_string(x->second);
return ManifestDisposition::badEphemeralKey;
}
}
return std::nullopt;
};
{
std::shared_lock sl{mutex_};
if (auto d =
prewriteCheck(map_.find(m.masterKey), /*checkSig*/ true, sl))
return *d;
}
std::unique_lock sl{mutex_};
auto const iter = map_.find(m.masterKey);
// Since we released the previously held read lock, it's possible that the
// collections have been written to. This means we need to run
// `prewriteCheck` again. This re-does work, but `prewriteCheck` is
// relatively inexpensive to run, and doing it this way allows us to run
// `prewriteCheck` under a `shared_lock` above.
// Note, the signature has already been checked above, so it
// doesn't need to happen again (signature checks are somewhat expensive).
// Note: It's a mistake to use an upgradable lock. This is a recipe for
// deadlock.
if (auto d = prewriteCheck(iter, /*checkSig*/ false, sl))
return *d;
bool const revoked = m.revoked();
// This is the first manifest we are seeing for a master key. This should
// only ever happen once per validator run.
if (iter == map_.end())
@@ -543,7 +575,7 @@ ManifestCache::save(
std::string const& dbTable,
std::function<bool(PublicKey const&)> const& isTrusted)
{
std::lock_guard lock{apply_mutex_};
std::shared_lock lock{mutex_};
auto db = dbCon.checkoutDb();
saveManifests(*db, dbTable, isTrusted, map_, j_);

View File

@@ -265,6 +265,8 @@ TxQ::FeeMetrics::escalatedSeriesFeeLevel(
return totalFeeLevel;
}
LedgerHash TxQ::MaybeTx::parentHashComp{};
TxQ::MaybeTx::MaybeTx(
std::shared_ptr<STTx const> const& txn_,
TxID const& txID_,
@@ -467,13 +469,12 @@ TxQ::eraseAndAdvance(TxQ::FeeMultiSet::const_iterator_type candidateIter)
// Check if the next transaction for this account is earlier in the queue,
// which means we skipped it earlier, and need to try it again.
OrderCandidates o;
auto const feeNextIter = std::next(candidateIter);
bool const useAccountNext =
accountNextIter != txQAccount.transactions.end() &&
accountNextIter->first > candidateIter->seqProxy &&
(feeNextIter == byFee_.end() ||
o(accountNextIter->second, *feeNextIter));
byFee_.value_comp()(accountNextIter->second, *feeNextIter));
auto const candidateNextIter = byFee_.erase(candidateIter);
txQAccount.transactions.erase(accountIter);
@@ -1529,6 +1530,37 @@ TxQ::accept(Application& app, OpenView& view)
}
}
// All transactions that can be moved out of the queue into the open
// ledger have been. Rebuild the queue using the open ledger's
// parent hash, so that transactions paying the same fee are
// reordered.
LedgerHash const& parentHash = view.info().parentHash;
#if !NDEBUG
auto const startingSize = byFee_.size();
assert(parentHash != parentHash_);
parentHash_ = parentHash;
#endif
// byFee_ doesn't "own" the candidate objects inside it, so it's
// perfectly safe to wipe it and start over, repopulating from
// byAccount_.
//
// In the absence of a "re-sort the list in place" function, this
// was the fastest method tried to repopulate the list.
// Other methods included: create a new list and moving items over one at a
// time, create a new list and merge the old list into it.
byFee_.clear();
MaybeTx::parentHashComp = parentHash;
for (auto& [_, account] : byAccount_)
{
for (auto& [_, candidate] : account.transactions)
{
byFee_.insert(candidate);
}
}
assert(byFee_.size() == startingSize);
return ledgerChanged;
}
@@ -1740,7 +1772,7 @@ TxQ::getTxRequiredFeeAndSeq(
}
std::vector<TxQ::TxDetails>
TxQ::getAccountTxs(AccountID const& account, ReadView const& view) const
TxQ::getAccountTxs(AccountID const& account) const
{
std::vector<TxDetails> result;
@@ -1761,7 +1793,7 @@ TxQ::getAccountTxs(AccountID const& account, ReadView const& view) const
}
std::vector<TxQ::TxDetails>
TxQ::getTxs(ReadView const& view) const
TxQ::getTxs() const
{
std::vector<TxDetails> result;

View File

@@ -33,24 +33,16 @@ accountSourceCurrencies(
if (includeXRP)
currencies.insert(xrpCurrency());
// List of ripple lines.
auto& rippleLines = lrCache->getRippleLines(account);
for (auto const& item : rippleLines)
for (auto const& rspEntry : lrCache->getRippleLines(account))
{
auto rspEntry = (RippleState*)item.get();
assert(rspEntry);
if (!rspEntry)
continue;
auto& saBalance = rspEntry->getBalance();
auto& saBalance = rspEntry.getBalance();
// Filter out non
if (saBalance > beast::zero
// Have IOUs to send.
|| (rspEntry->getLimitPeer()
|| (rspEntry.getLimitPeer()
// Peer extends credit.
&& ((-saBalance) < rspEntry->getLimitPeer()))) // Credit left.
&& ((-saBalance) < rspEntry.getLimitPeer()))) // Credit left.
{
currencies.insert(saBalance.getCurrency());
}
@@ -72,19 +64,11 @@ accountDestCurrencies(
currencies.insert(xrpCurrency());
// Even if account doesn't exist
// List of ripple lines.
auto& rippleLines = lrCache->getRippleLines(account);
for (auto const& item : rippleLines)
for (auto const& rspEntry : lrCache->getRippleLines(account))
{
auto rspEntry = (RippleState*)item.get();
assert(rspEntry);
if (!rspEntry)
continue;
auto& saBalance = rspEntry.getBalance();
auto& saBalance = rspEntry->getBalance();
if (saBalance < rspEntry->getLimit()) // Can take more
if (saBalance < rspEntry.getLimit()) // Can take more
currencies.insert(saBalance.getCurrency());
}

View File

@@ -441,7 +441,7 @@ PathRequest::parseJson(Json::Value const& jvParams)
}
Json::Value
PathRequest::doClose(Json::Value const&)
PathRequest::doClose()
{
JLOG(m_journal.debug()) << iIdentifier << " closed";
std::lock_guard sl(mLock);
@@ -457,13 +457,20 @@ PathRequest::doStatus(Json::Value const&)
return jvStatus;
}
void
PathRequest::doAborting() const
{
JLOG(m_journal.info()) << iIdentifier << " aborting early";
}
std::unique_ptr<Pathfinder> const&
PathRequest::getPathFinder(
std::shared_ptr<RippleLineCache> const& cache,
hash_map<Currency, std::unique_ptr<Pathfinder>>& currency_map,
Currency const& currency,
STAmount const& dst_amount,
int const level)
int const level,
std::function<bool(void)> const& continueCallback)
{
auto i = currency_map.find(currency);
if (i != currency_map.end())
@@ -477,8 +484,8 @@ PathRequest::getPathFinder(
dst_amount,
saSendMax,
app_);
if (pathfinder->findPaths(level))
pathfinder->computePathRanks(max_paths_);
if (pathfinder->findPaths(level, continueCallback))
pathfinder->computePathRanks(max_paths_, continueCallback);
else
pathfinder.reset(); // It's a bad request - clear it.
return currency_map[currency] = std::move(pathfinder);
@@ -488,7 +495,8 @@ bool
PathRequest::findPaths(
std::shared_ptr<RippleLineCache> const& cache,
int const level,
Json::Value& jvArray)
Json::Value& jvArray,
std::function<bool(void)> const& continueCallback)
{
auto sourceCurrencies = sciSourceCurrencies;
if (sourceCurrencies.empty() && saSendMax)
@@ -515,22 +523,33 @@ PathRequest::findPaths(
hash_map<Currency, std::unique_ptr<Pathfinder>> currency_map;
for (auto const& issue : sourceCurrencies)
{
if (continueCallback && !continueCallback())
break;
JLOG(m_journal.debug())
<< iIdentifier
<< " Trying to find paths: " << STAmount(issue, 1).getFullText();
auto& pathfinder = getPathFinder(
cache, currency_map, issue.currency, dst_amount, level);
cache,
currency_map,
issue.currency,
dst_amount,
level,
continueCallback);
if (!pathfinder)
{
assert(false);
assert(continueCallback && !continueCallback());
JLOG(m_journal.debug()) << iIdentifier << " No paths found";
continue;
}
STPath fullLiquidityPath;
auto ps = pathfinder->getBestPaths(
max_paths_, fullLiquidityPath, mContext[issue], issue.account);
max_paths_,
fullLiquidityPath,
mContext[issue],
issue.account,
continueCallback);
mContext[issue] = ps;
auto& sourceAccount = !isXRP(issue.account)
@@ -628,7 +647,10 @@ PathRequest::findPaths(
}
Json::Value
PathRequest::doUpdate(std::shared_ptr<RippleLineCache> const& cache, bool fast)
PathRequest::doUpdate(
std::shared_ptr<RippleLineCache> const& cache,
bool fast,
std::function<bool(void)> const& continueCallback)
{
using namespace std::chrono;
JLOG(m_journal.debug())
@@ -699,7 +721,7 @@ PathRequest::doUpdate(std::shared_ptr<RippleLineCache> const& cache, bool fast)
JLOG(m_journal.debug()) << iIdentifier << " processing at level " << iLevel;
Json::Value jvArray = Json::arrayValue;
if (findPaths(cache, iLevel, jvArray))
if (findPaths(cache, iLevel, jvArray, continueCallback))
{
bLastSuccess = jvArray.size() != 0;
newStatus[jss::alternatives] = std::move(jvArray);
@@ -730,7 +752,7 @@ PathRequest::doUpdate(std::shared_ptr<RippleLineCache> const& cache, bool fast)
}
InfoSub::pointer
PathRequest::getSubscriber()
PathRequest::getSubscriber() const
{
return wpSubscriber.lock();
}

View File

@@ -43,10 +43,10 @@ class PathRequests;
// Return values from parseJson <0 = invalid, >0 = valid
#define PFR_PJ_INVALID -1
#define PFR_PJ_NOCHANGE 0
#define PFR_PJ_CHANGE 1
class PathRequest : public std::enable_shared_from_this<PathRequest>,
public CountedObject<PathRequest>
class PathRequest final : public InfoSubRequest,
public std::enable_shared_from_this<PathRequest>,
public CountedObject<PathRequest>
{
public:
using wptr = std::weak_ptr<PathRequest>;
@@ -55,8 +55,6 @@ public:
using wref = const wptr&;
public:
// VFALCO TODO Break the cyclic dependency on InfoSub
// path_find semantics
// Subscriber is updated
PathRequest(
@@ -91,15 +89,20 @@ public:
doCreate(std::shared_ptr<RippleLineCache> const&, Json::Value const&);
Json::Value
doClose(Json::Value const&);
doClose() override;
Json::Value
doStatus(Json::Value const&);
doStatus(Json::Value const&) override;
void
doAborting() const;
// update jvStatus
Json::Value
doUpdate(std::shared_ptr<RippleLineCache> const&, bool fast);
doUpdate(
std::shared_ptr<RippleLineCache> const&,
bool fast,
std::function<bool(void)> const& continueCallback = {});
InfoSub::pointer
getSubscriber();
getSubscriber() const;
bool
hasCompletion();
@@ -113,13 +116,18 @@ private:
hash_map<Currency, std::unique_ptr<Pathfinder>>&,
Currency const&,
STAmount const&,
int const);
int const,
std::function<bool(void)> const&);
/** Finds and sets a PathSet in the JSON argument.
Returns false if the source currencies are inavlid.
*/
bool
findPaths(std::shared_ptr<RippleLineCache> const&, int const, Json::Value&);
findPaths(
std::shared_ptr<RippleLineCache> const&,
int const,
Json::Value&,
std::function<bool(void)> const&);
int
parseJson(Json::Value const&);
@@ -156,7 +164,7 @@ private:
int iLevel;
bool bLastSuccess;
int iIdentifier;
int const iIdentifier;
std::chrono::steady_clock::time_point const created_;
std::chrono::steady_clock::time_point quick_reply_;

View File

@@ -40,8 +40,12 @@ PathRequests::getLineCache(
{
std::lock_guard sl(mLock);
std::uint32_t lineSeq = mLineCache ? mLineCache->getLedger()->seq() : 0;
std::uint32_t lgrSeq = ledger->seq();
auto lineCache = lineCache_.lock();
std::uint32_t const lineSeq = lineCache ? lineCache->getLedger()->seq() : 0;
std::uint32_t const lgrSeq = ledger->seq();
JLOG(mJournal.debug()) << "getLineCache has cache for " << lineSeq
<< ", considering " << lgrSeq;
if ((lineSeq == 0) || // no ledger
(authoritative && (lgrSeq > lineSeq)) || // newer authoritative ledger
@@ -49,9 +53,15 @@ PathRequests::getLineCache(
((lgrSeq + 8) < lineSeq)) || // we jumped way back for some reason
(lgrSeq > (lineSeq + 8))) // we jumped way forward for some reason
{
mLineCache = std::make_shared<RippleLineCache>(ledger);
JLOG(mJournal.debug())
<< "getLineCache creating new cache for " << lgrSeq;
// Assign to the local before the member, because the member is a
// weak_ptr, and will immediately discard it if there are no other
// references.
lineCache_ = lineCache = std::make_shared<RippleLineCache>(
ledger, app_.journal("RippleLineCache"));
}
return mLineCache;
return lineCache;
}
void
@@ -78,8 +88,20 @@ PathRequests::updateAll(std::shared_ptr<ReadView const> const& inLedger)
int processed = 0, removed = 0;
auto getSubscriber =
[](PathRequest::pointer const& request) -> InfoSub::pointer {
if (auto ipSub = request->getSubscriber();
ipSub && ipSub->getRequest() == request)
{
return ipSub;
}
request->doAborting();
return nullptr;
};
do
{
JLOG(mJournal.trace()) << "updateAll looping";
for (auto const& wr : requests)
{
if (app_.getJobQueue().isStopping())
@@ -87,25 +109,40 @@ PathRequests::updateAll(std::shared_ptr<ReadView const> const& inLedger)
auto request = wr.lock();
bool remove = true;
JLOG(mJournal.trace())
<< "updateAll request " << (request ? "" : "not ") << "found";
if (request)
{
auto continueCallback = [&getSubscriber, &request]() {
// This callback is used by doUpdate to determine whether to
// continue working. If getSubscriber returns null, that
// indicates that this request is no longer relevant.
return (bool)getSubscriber(request);
};
if (!request->needsUpdate(
newRequests, cache->getLedger()->seq()))
remove = false;
else
{
if (auto ipSub = request->getSubscriber())
if (auto ipSub = getSubscriber(request))
{
if (!ipSub->getConsumer().warn())
{
Json::Value update =
request->doUpdate(cache, false);
// Release the shared ptr to the subscriber so that
// it can be freed if the client disconnects, and
// thus fail to lock later.
ipSub.reset();
Json::Value update = request->doUpdate(
cache, false, continueCallback);
request->updateComplete();
update[jss::type] = "path_find";
ipSub->send(update, false);
remove = false;
++processed;
if ((ipSub = getSubscriber(request)))
{
ipSub->send(update, false);
remove = false;
++processed;
}
}
}
else if (request->hasCompletion())
@@ -178,6 +215,13 @@ PathRequests::updateAll(std::shared_ptr<ReadView const> const& inLedger)
<< " processed and " << removed << " removed";
}
bool
PathRequests::requestsPending() const
{
std::lock_guard sl(mLock);
return !requests_.empty();
}
void
PathRequests::insertPathRequest(PathRequest::pointer const& req)
{
@@ -211,7 +255,7 @@ PathRequests::makePathRequest(
if (valid)
{
subscriber->setPathRequest(req);
subscriber->setRequest(req);
insertPathRequest(req);
app_.getLedgerMaster().newPathRequest();
}
@@ -258,7 +302,8 @@ PathRequests::doLegacyPathRequest(
std::shared_ptr<ReadView const> const& inLedger,
Json::Value const& request)
{
auto cache = std::make_shared<RippleLineCache>(inLedger);
auto cache = std::make_shared<RippleLineCache>(
inLedger, app_.journal("RippleLineCache"));
auto req = std::make_shared<PathRequest>(
app_, [] {}, consumer, ++mLastIdentifier, *this, mJournal);

View File

@@ -51,6 +51,9 @@ public:
void
updateAll(std::shared_ptr<ReadView const> const& ledger);
bool
requestsPending() const;
std::shared_ptr<RippleLineCache>
getLineCache(
std::shared_ptr<ReadView const> const& ledger,
@@ -109,11 +112,11 @@ private:
std::vector<PathRequest::wptr> requests_;
// Use a RippleLineCache
std::shared_ptr<RippleLineCache> mLineCache;
std::weak_ptr<RippleLineCache> lineCache_;
std::atomic<int> mLastIdentifier;
std::recursive_mutex mLock;
std::recursive_mutex mutable mLock;
};
} // namespace ripple

View File

@@ -24,6 +24,7 @@
#include <ripple/app/paths/RippleLineCache.h>
#include <ripple/app/paths/impl/PathfinderUtils.h>
#include <ripple/basics/Log.h>
#include <ripple/basics/join.h>
#include <ripple/core/Config.h>
#include <ripple/core/JobQueue.h>
#include <ripple/json/to_string.h>
@@ -191,8 +192,11 @@ Pathfinder::Pathfinder(
}
bool
Pathfinder::findPaths(int searchLevel)
Pathfinder::findPaths(
int searchLevel,
std::function<bool(void)> const& continueCallback)
{
JLOG(j_.trace()) << "findPaths start";
if (mDstAmount == beast::zero)
{
// No need to send zero money.
@@ -316,10 +320,13 @@ Pathfinder::findPaths(int searchLevel)
// Now iterate over all paths for that paymentType.
for (auto const& costedPath : mPathTable[paymentType])
{
if (continueCallback && !continueCallback())
return false;
// Only use paths with at most the current search level.
if (costedPath.searchLevel <= searchLevel)
{
addPathsForType(costedPath.type);
JLOG(j_.trace()) << "findPaths trying payment type " << paymentType;
addPathsForType(costedPath.type, continueCallback);
if (mCompletePaths.size() > PATHFINDER_MAX_COMPLETE_PATHS)
break;
@@ -401,7 +408,9 @@ Pathfinder::getPathLiquidity(
}
void
Pathfinder::computePathRanks(int maxPaths)
Pathfinder::computePathRanks(
int maxPaths,
std::function<bool(void)> const& continueCallback)
{
mRemainingAmount = convertAmount(mDstAmount, convert_all_);
@@ -439,7 +448,7 @@ Pathfinder::computePathRanks(int maxPaths)
JLOG(j_.debug()) << "Default path causes exception";
}
rankPaths(maxPaths, mCompletePaths, mPathRanks);
rankPaths(maxPaths, mCompletePaths, mPathRanks, continueCallback);
}
static bool
@@ -480,8 +489,11 @@ void
Pathfinder::rankPaths(
int maxPaths,
STPathSet const& paths,
std::vector<PathRank>& rankedPaths)
std::vector<PathRank>& rankedPaths,
std::function<bool(void)> const& continueCallback)
{
JLOG(j_.trace()) << "rankPaths with " << paths.size() << " candidates, and "
<< maxPaths << " maximum";
rankedPaths.clear();
rankedPaths.reserve(paths.size());
@@ -499,6 +511,8 @@ Pathfinder::rankPaths(
for (int i = 0; i < paths.size(); ++i)
{
if (continueCallback && !continueCallback())
return;
auto const& currentPath = paths[i];
if (!currentPath.empty())
{
@@ -554,7 +568,8 @@ Pathfinder::getBestPaths(
int maxPaths,
STPath& fullLiquidityPath,
STPathSet const& extraPaths,
AccountID const& srcIssuer)
AccountID const& srcIssuer,
std::function<bool(void)> const& continueCallback)
{
JLOG(j_.debug()) << "findPaths: " << mCompletePaths.size() << " paths and "
<< extraPaths.size() << " extras";
@@ -567,7 +582,7 @@ Pathfinder::getBestPaths(
isXRP(mSrcCurrency) || (srcIssuer == mSrcAccount);
std::vector<PathRank> extraPathRanks;
rankPaths(maxPaths, extraPaths, extraPathRanks);
rankPaths(maxPaths, extraPaths, extraPathRanks, continueCallback);
STPathSet bestPaths;
@@ -582,6 +597,8 @@ Pathfinder::getBestPaths(
while (pathsIterator != mPathRanks.end() ||
extraPathsIterator != extraPathRanks.end())
{
if (continueCallback && !continueCallback())
break;
bool usePath = false;
bool useExtraPath = false;
@@ -692,7 +709,8 @@ Pathfinder::getPathsOut(
Currency const& currency,
AccountID const& account,
bool isDstCurrency,
AccountID const& dstAccount)
AccountID const& dstAccount,
std::function<bool(void)> const& continueCallback)
{
Issue const issue(currency, account);
@@ -717,30 +735,27 @@ Pathfinder::getPathsOut(
{
count = app_.getOrderBookDB().getBookSize(issue);
for (auto const& item : mRLCache->getRippleLines(account))
for (auto const& rspEntry : mRLCache->getRippleLines(account))
{
RippleState* rspEntry = (RippleState*)item.get();
if (currency != rspEntry->getLimit().getCurrency())
if (currency != rspEntry.getLimit().getCurrency())
{
}
else if (
rspEntry->getBalance() <= beast::zero &&
(!rspEntry->getLimitPeer() ||
-rspEntry->getBalance() >= rspEntry->getLimitPeer() ||
(bAuthRequired && !rspEntry->getAuth())))
rspEntry.getBalance() <= beast::zero &&
(!rspEntry.getLimitPeer() ||
-rspEntry.getBalance() >= rspEntry.getLimitPeer() ||
(bAuthRequired && !rspEntry.getAuth())))
{
}
else if (
isDstCurrency && dstAccount == rspEntry->getAccountIDPeer())
else if (isDstCurrency && dstAccount == rspEntry.getAccountIDPeer())
{
count += 10000; // count a path to the destination extra
}
else if (rspEntry->getNoRipplePeer())
else if (rspEntry.getNoRipplePeer())
{
// This probably isn't a useful path out
}
else if (rspEntry->getFreezePeer())
else if (rspEntry.getFreezePeer())
{
// Not a useful path out
}
@@ -758,17 +773,26 @@ void
Pathfinder::addLinks(
STPathSet const& currentPaths, // The paths to build from
STPathSet& incompletePaths, // The set of partial paths we add to
int addFlags)
int addFlags,
std::function<bool(void)> const& continueCallback)
{
JLOG(j_.debug()) << "addLink< on " << currentPaths.size()
<< " source(s), flags=" << addFlags;
for (auto const& path : currentPaths)
addLink(path, incompletePaths, addFlags);
{
if (continueCallback && !continueCallback())
return;
addLink(path, incompletePaths, addFlags, continueCallback);
}
}
STPathSet&
Pathfinder::addPathsForType(PathType const& pathType)
Pathfinder::addPathsForType(
PathType const& pathType,
std::function<bool(void)> const& continueCallback)
{
JLOG(j_.warn()) << "addPathsForType "
<< CollectionAndDelimiter(pathType, ", ");
// See if the set of paths for this type already exists.
auto it = mPaths.find(pathType);
if (it != mPaths.end())
@@ -777,13 +801,16 @@ Pathfinder::addPathsForType(PathType const& pathType)
// Otherwise, if the type has no nodes, return the empty path.
if (pathType.empty())
return mPaths[pathType];
if (continueCallback && !continueCallback())
return mPaths[{}];
// Otherwise, get the paths for the parent PathType by calling
// addPathsForType recursively.
PathType parentPathType = pathType;
parentPathType.pop_back();
STPathSet const& parentPaths = addPathsForType(parentPathType);
STPathSet const& parentPaths =
addPathsForType(parentPathType, continueCallback);
STPathSet& pathsOut = mPaths[pathType];
JLOG(j_.debug()) << "getPaths< adding onto '"
@@ -803,26 +830,38 @@ Pathfinder::addPathsForType(PathType const& pathType)
break;
case nt_ACCOUNTS:
addLinks(parentPaths, pathsOut, afADD_ACCOUNTS);
addLinks(parentPaths, pathsOut, afADD_ACCOUNTS, continueCallback);
break;
case nt_BOOKS:
addLinks(parentPaths, pathsOut, afADD_BOOKS);
addLinks(parentPaths, pathsOut, afADD_BOOKS, continueCallback);
break;
case nt_XRP_BOOK:
addLinks(parentPaths, pathsOut, afADD_BOOKS | afOB_XRP);
addLinks(
parentPaths,
pathsOut,
afADD_BOOKS | afOB_XRP,
continueCallback);
break;
case nt_DEST_BOOK:
addLinks(parentPaths, pathsOut, afADD_BOOKS | afOB_LAST);
addLinks(
parentPaths,
pathsOut,
afADD_BOOKS | afOB_LAST,
continueCallback);
break;
case nt_DESTINATION:
// FIXME: What if a different issuer was specified on the
// destination amount?
// TODO(tom): what does this even mean? Should it be a JIRA?
addLinks(parentPaths, pathsOut, afADD_ACCOUNTS | afAC_LAST);
addLinks(
parentPaths,
pathsOut,
afADD_ACCOUNTS | afAC_LAST,
continueCallback);
break;
}
@@ -893,7 +932,8 @@ void
Pathfinder::addLink(
const STPath& currentPath, // The path to build from
STPathSet& incompletePaths, // The set of partial paths we add to
int addFlags)
int addFlags,
std::function<bool(void)> const& continueCallback)
{
auto const& pathEnd = currentPath.empty() ? mSource : currentPath.back();
auto const& uEndCurrency = pathEnd.getCurrency();
@@ -906,7 +946,8 @@ Pathfinder::addLink(
// rather than the ultimate destination?
bool const hasEffectiveDestination = mEffectiveDst != mDstAccount;
JLOG(j_.trace()) << "addLink< flags=" << addFlags << " onXRP=" << bOnXRP;
JLOG(j_.trace()) << "addLink< flags=" << addFlags << " onXRP=" << bOnXRP
<< " completePaths size=" << mCompletePaths.size();
JLOG(j_.trace()) << currentPath.getJson(JsonOptions::none);
if (addFlags & afADD_ACCOUNTS)
@@ -940,15 +981,11 @@ Pathfinder::addLink(
AccountCandidates candidates;
candidates.reserve(rippleLines.size());
for (auto const& item : rippleLines)
for (auto const& rs : rippleLines)
{
auto* rs = dynamic_cast<RippleState const*>(item.get());
if (!rs)
{
JLOG(j_.error()) << "Couldn't decipher RippleState";
continue;
}
auto const& acct = rs->getAccountIDPeer();
if (continueCallback && !continueCallback())
return;
auto const& acct = rs.getAccountIDPeer();
if (hasEffectiveDestination && (acct == mDstAccount))
{
@@ -963,18 +1000,18 @@ Pathfinder::addLink(
continue;
}
if ((uEndCurrency == rs->getLimit().getCurrency()) &&
if ((uEndCurrency == rs.getLimit().getCurrency()) &&
!currentPath.hasSeen(acct, uEndCurrency, acct))
{
// path is for correct currency and has not been seen
if (rs->getBalance() <= beast::zero &&
(!rs->getLimitPeer() ||
-rs->getBalance() >= rs->getLimitPeer() ||
(bRequireAuth && !rs->getAuth())))
if (rs.getBalance() <= beast::zero &&
(!rs.getLimitPeer() ||
-rs.getBalance() >= rs.getLimitPeer() ||
(bRequireAuth && !rs.getAuth())))
{
// path has no credit
}
else if (bIsNoRippleOut && rs->getNoRipple())
else if (bIsNoRippleOut && rs.getNoRipple())
{
// Can't leave on this path
}
@@ -1011,7 +1048,8 @@ Pathfinder::addLink(
uEndCurrency,
acct,
bIsEndCurrency,
mEffectiveDst);
mEffectiveDst,
continueCallback);
if (out)
candidates.push_back({out, acct});
}
@@ -1039,6 +1077,8 @@ Pathfinder::addLink(
auto it = candidates.begin();
while (count-- != 0)
{
if (continueCallback && !continueCallback())
return;
// Add accounts to incompletePaths
STPathElement pathElement(
STPathElement::typeAccount,
@@ -1083,17 +1123,17 @@ Pathfinder::addLink(
for (auto const& book : books)
{
if (continueCallback && !continueCallback())
return;
if (!currentPath.hasSeen(
xrpAccount(),
book->getCurrencyOut(),
book->getIssuerOut()) &&
!issueMatchesOrigin(book->book().out) &&
xrpAccount(), book.out.currency, book.out.account) &&
!issueMatchesOrigin(book.out) &&
(!bDestOnly ||
(book->getCurrencyOut() == mDstAmount.getCurrency())))
(book.out.currency == mDstAmount.getCurrency())))
{
STPath newPath(currentPath);
if (book->getCurrencyOut().isZero())
if (book.out.currency.isZero())
{ // to XRP
// add the order book itself
@@ -1116,9 +1156,9 @@ Pathfinder::addLink(
incompletePaths.push_back(newPath);
}
else if (!currentPath.hasSeen(
book->getIssuerOut(),
book->getCurrencyOut(),
book->getIssuerOut()))
book.out.account,
book.out.currency,
book.out.account))
{
// Don't want the book if we've already seen the issuer
// book -> account -> book
@@ -1131,8 +1171,8 @@ Pathfinder::addLink(
STPathElement::typeCurrency |
STPathElement::typeIssuer,
xrpAccount(),
book->getCurrencyOut(),
book->getIssuerOut());
book.out.currency,
book.out.account);
}
else
{
@@ -1141,19 +1181,19 @@ Pathfinder::addLink(
STPathElement::typeCurrency |
STPathElement::typeIssuer,
xrpAccount(),
book->getCurrencyOut(),
book->getIssuerOut());
book.out.currency,
book.out.account);
}
if (hasEffectiveDestination &&
book->getIssuerOut() == mDstAccount &&
book->getCurrencyOut() == mDstAmount.getCurrency())
book.out.account == mDstAccount &&
book.out.currency == mDstAmount.getCurrency())
{
// We skipped a required issuer
}
else if (
book->getIssuerOut() == mEffectiveDst &&
book->getCurrencyOut() == mDstAmount.getCurrency())
book.out.account == mEffectiveDst &&
book.out.currency == mDstAmount.getCurrency())
{ // with the destination account, this path is
// complete
JLOG(j_.trace())
@@ -1168,9 +1208,9 @@ Pathfinder::addLink(
newPath,
STPathElement(
STPathElement::typeAccount,
book->getIssuerOut(),
book->getCurrencyOut(),
book->getIssuerOut()));
book.out.account,
book.out.currency,
book.out.account));
}
}
}

View File

@@ -22,6 +22,7 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleLineCache.h>
#include <ripple/basics/CountedObject.h>
#include <ripple/core/LoadEvent.h>
#include <ripple/protocol/STAmount.h>
#include <ripple/protocol/STPathSet.h>
@@ -34,7 +35,7 @@ namespace ripple {
@see RippleCalc
*/
class Pathfinder
class Pathfinder : public CountedObject<Pathfinder>
{
public:
/** Construct a pathfinder without an issuer.*/
@@ -56,11 +57,15 @@ public:
initPathTable();
bool
findPaths(int searchLevel);
findPaths(
int searchLevel,
std::function<bool(void)> const& continueCallback = {});
/** Compute the rankings of the paths. */
void
computePathRanks(int maxPaths);
computePathRanks(
int maxPaths,
std::function<bool(void)> const& continueCallback = {});
/* Get the best paths, up to maxPaths in number, from mCompletePaths.
@@ -72,7 +77,8 @@ public:
int maxPaths,
STPath& fullLiquidityPath,
STPathSet const& extraPaths,
AccountID const& srcIssuer);
AccountID const& srcIssuer,
std::function<bool(void)> const& continueCallback = {});
enum NodeType {
nt_SOURCE, // The source account: with an issuer account, if needed.
@@ -127,7 +133,9 @@ private:
// Add all paths of one type to mCompletePaths.
STPathSet&
addPathsForType(PathType const& type);
addPathsForType(
PathType const& type,
std::function<bool(void)> const& continueCallback);
bool
issueMatchesOrigin(Issue const&);
@@ -137,20 +145,23 @@ private:
Currency const& currency,
AccountID const& account,
bool isDestCurrency,
AccountID const& dest);
AccountID const& dest,
std::function<bool(void)> const& continueCallback);
void
addLink(
STPath const& currentPath,
STPathSet& incompletePaths,
int addFlags);
int addFlags,
std::function<bool(void)> const& continueCallback);
// Call addLink() for each path in currentPaths.
void
addLinks(
STPathSet const& currentPaths,
STPathSet& incompletePaths,
int addFlags);
int addFlags,
std::function<bool(void)> const& continueCallback);
// Compute the liquidity for a path. Return tesSUCCESS if it has has enough
// liquidity to be worth keeping, otherwise an error.
@@ -178,7 +189,8 @@ private:
rankPaths(
int maxPaths,
STPathSet const& paths,
std::vector<PathRank>& rankedPaths);
std::vector<PathRank>& rankedPaths,
std::function<bool(void)> const& continueCallback);
AccountID mSrcAccount;
AccountID mDstAccount;

View File

@@ -18,30 +18,47 @@
//==============================================================================
#include <ripple/app/paths/RippleLineCache.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/ledger/OpenView.h>
namespace ripple {
RippleLineCache::RippleLineCache(std::shared_ptr<ReadView const> const& ledger)
RippleLineCache::RippleLineCache(
std::shared_ptr<ReadView const> const& ledger,
beast::Journal j)
: journal_(j)
{
// We want the caching that OpenView provides
// And we need to own a shared_ptr to the input view
// VFALCO TODO This should be a CachedLedger
mLedger = std::make_shared<OpenView>(&*ledger, ledger);
mLedger = ledger;
JLOG(journal_.debug()) << "RippleLineCache created for ledger "
<< mLedger->info().seq;
}
std::vector<RippleState::pointer> const&
RippleLineCache::~RippleLineCache()
{
JLOG(journal_.debug()) << "~RippleLineCache destroyed for ledger "
<< mLedger->info().seq << " with " << lines_.size()
<< " accounts";
}
std::vector<PathFindTrustLine> const&
RippleLineCache::getRippleLines(AccountID const& accountID)
{
AccountKey key(accountID, hasher_(accountID));
std::lock_guard sl(mLock);
auto [it, inserted] =
lines_.emplace(key, std::vector<RippleState::pointer>());
auto [it, inserted] = lines_.emplace(key, std::vector<PathFindTrustLine>());
if (inserted)
it->second = getRippleStateItems(accountID, *mLedger);
it->second = PathFindTrustLine::getItems(accountID, *mLedger);
JLOG(journal_.debug()) << "RippleLineCache getRippleLines for ledger "
<< mLedger->info().seq << " found "
<< it->second.size() << " lines for "
<< (inserted ? "new " : "existing ") << accountID
<< " out of a total of " << lines_.size()
<< " accounts";
return it->second;
}

View File

@@ -21,8 +21,10 @@
#define RIPPLE_APP_PATHS_RIPPLELINECACHE_H_INCLUDED
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/basics/CountedObject.h>
#include <ripple/basics/hardened_hash.h>
#include <cstddef>
#include <memory>
#include <mutex>
@@ -31,10 +33,13 @@
namespace ripple {
// Used by Pathfinder
class RippleLineCache
class RippleLineCache final : public CountedObject<RippleLineCache>
{
public:
explicit RippleLineCache(std::shared_ptr<ReadView const> const& l);
explicit RippleLineCache(
std::shared_ptr<ReadView const> const& l,
beast::Journal j);
~RippleLineCache();
std::shared_ptr<ReadView const> const&
getLedger() const
@@ -42,7 +47,7 @@ public:
return mLedger;
}
std::vector<RippleState::pointer> const&
std::vector<PathFindTrustLine> const&
getRippleLines(AccountID const& accountID);
private:
@@ -51,7 +56,9 @@ private:
ripple::hardened_hash<> hasher_;
std::shared_ptr<ReadView const> mLedger;
struct AccountKey
beast::Journal journal_;
struct AccountKey final : public CountedObject<AccountKey>
{
AccountID account_;
std::size_t hash_value_;
@@ -90,7 +97,7 @@ private:
};
};
hash_map<AccountKey, std::vector<RippleState::pointer>, AccountKey::Hash>
hash_map<AccountKey, std::vector<PathFindTrustLine>, AccountKey::Hash>
lines_;
};

View File

@@ -1,85 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/main/Application.h>
#include <ripple/app/paths/RippleState.h>
#include <ripple/protocol/STAmount.h>
#include <cstdint>
#include <memory>
namespace ripple {
RippleState::pointer
RippleState::makeItem(
AccountID const& accountID,
std::shared_ptr<SLE const> sle)
{
// VFALCO Does this ever happen in practice?
if (!sle || sle->getType() != ltRIPPLE_STATE)
return {};
return std::make_shared<RippleState>(std::move(sle), accountID);
}
RippleState::RippleState(
std::shared_ptr<SLE const>&& sle,
AccountID const& viewAccount)
: sle_(std::move(sle))
, mFlags(sle_->getFieldU32(sfFlags))
, mLowLimit(sle_->getFieldAmount(sfLowLimit))
, mHighLimit(sle_->getFieldAmount(sfHighLimit))
, mLowID(mLowLimit.getIssuer())
, mHighID(mHighLimit.getIssuer())
, lowQualityIn_(sle_->getFieldU32(sfLowQualityIn))
, lowQualityOut_(sle_->getFieldU32(sfLowQualityOut))
, highQualityIn_(sle_->getFieldU32(sfHighQualityIn))
, highQualityOut_(sle_->getFieldU32(sfHighQualityOut))
, mBalance(sle_->getFieldAmount(sfBalance))
{
mViewLowest = (mLowID == viewAccount);
if (!mViewLowest)
mBalance.negate();
}
Json::Value
RippleState::getJson(int)
{
Json::Value ret(Json::objectValue);
ret["low_id"] = to_string(mLowID);
ret["high_id"] = to_string(mHighID);
return ret;
}
std::vector<RippleState::pointer>
getRippleStateItems(AccountID const& accountID, ReadView const& view)
{
std::vector<RippleState::pointer> items;
forEachItem(
view,
accountID,
[&items, &accountID](std::shared_ptr<SLE const> const& sleCur) {
auto ret = RippleState::makeItem(accountID, sleCur);
if (ret)
items.push_back(ret);
});
return items;
}
} // namespace ripple

View File

@@ -0,0 +1,113 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/main/Application.h>
#include <ripple/app/paths/TrustLine.h>
#include <ripple/protocol/STAmount.h>
#include <cstdint>
#include <memory>
namespace ripple {
TrustLineBase::TrustLineBase(
std::shared_ptr<SLE const> const& sle,
AccountID const& viewAccount)
: key_(sle->key())
, mLowLimit(sle->getFieldAmount(sfLowLimit))
, mHighLimit(sle->getFieldAmount(sfHighLimit))
, mBalance(sle->getFieldAmount(sfBalance))
, mFlags(sle->getFieldU32(sfFlags))
, mViewLowest(mLowLimit.getIssuer() == viewAccount)
{
if (!mViewLowest)
mBalance.negate();
}
Json::Value
TrustLineBase::getJson(int)
{
Json::Value ret(Json::objectValue);
ret["low_id"] = to_string(mLowLimit.getIssuer());
ret["high_id"] = to_string(mHighLimit.getIssuer());
return ret;
}
std::optional<PathFindTrustLine>
PathFindTrustLine::makeItem(
AccountID const& accountID,
std::shared_ptr<SLE const> const& sle)
{
if (!sle || sle->getType() != ltRIPPLE_STATE)
return {};
return std::optional{PathFindTrustLine{sle, accountID}};
}
namespace detail {
template <class T>
std::vector<T>
getTrustLineItems(AccountID const& accountID, ReadView const& view)
{
std::vector<T> items;
forEachItem(
view,
accountID,
[&items, &accountID](std::shared_ptr<SLE const> const& sleCur) {
auto ret = T::makeItem(accountID, sleCur);
if (ret)
items.push_back(std::move(*ret));
});
return items;
}
} // namespace detail
std::vector<PathFindTrustLine>
PathFindTrustLine::getItems(AccountID const& accountID, ReadView const& view)
{
return detail::getTrustLineItems<PathFindTrustLine>(accountID, view);
}
RPCTrustLine::RPCTrustLine(
std::shared_ptr<SLE const> const& sle,
AccountID const& viewAccount)
: TrustLineBase(sle, viewAccount)
, lowQualityIn_(sle->getFieldU32(sfLowQualityIn))
, lowQualityOut_(sle->getFieldU32(sfLowQualityOut))
, highQualityIn_(sle->getFieldU32(sfHighQualityIn))
, highQualityOut_(sle->getFieldU32(sfHighQualityOut))
{
}
std::optional<RPCTrustLine>
RPCTrustLine::makeItem(
AccountID const& accountID,
std::shared_ptr<SLE const> const& sle)
{
if (!sle || sle->getType() != ltRIPPLE_STATE)
return {};
return std::optional{RPCTrustLine{sle, accountID}};
}
std::vector<RPCTrustLine>
RPCTrustLine::getItems(AccountID const& accountID, ReadView const& view)
{
return detail::getTrustLineItems<RPCTrustLine>(accountID, view);
}
} // namespace ripple

View File

@@ -20,12 +20,14 @@
#ifndef RIPPLE_APP_PATHS_RIPPLESTATE_H_INCLUDED
#define RIPPLE_APP_PATHS_RIPPLESTATE_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/ledger/View.h>
#include <ripple/protocol/Rate.h>
#include <ripple/protocol/STAmount.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <cstdint>
#include <memory> // <memory>
#include <optional>
namespace ripple {
@@ -34,30 +36,32 @@ namespace ripple {
"low" account and a "high" account. This wraps the
SLE and expresses its data from the perspective of
a chosen account on the line.
This wrapper is primarily used in the path finder and there can easily be
tens of millions of instances of this class. When modifying this class think
carefully about the memory implications.
*/
// VFALCO TODO Rename to TrustLine
class RippleState
class TrustLineBase
{
public:
// VFALCO Why is this shared_ptr?
using pointer = std::shared_ptr<RippleState>;
protected:
// This class should not be instantiated directly. Use one of the derived
// classes.
TrustLineBase(
std::shared_ptr<SLE const> const& sle,
AccountID const& viewAccount);
~TrustLineBase() = default;
TrustLineBase(TrustLineBase const&) = default;
TrustLineBase&
operator=(TrustLineBase const&) = delete;
TrustLineBase(TrustLineBase&&) = default;
public:
RippleState() = delete;
virtual ~RippleState() = default;
static RippleState::pointer
makeItem(AccountID const& accountID, std::shared_ptr<SLE const> sle);
// Must be public, for make_shared
RippleState(std::shared_ptr<SLE const>&& sle, AccountID const& viewAccount);
/** Returns the state map key for the ledger entry. */
uint256
uint256 const&
key() const
{
return sle_->key();
return key_;
}
// VFALCO Take off the "get" from each function name
@@ -65,13 +69,13 @@ public:
AccountID const&
getAccountID() const
{
return mViewLowest ? mLowID : mHighID;
return mViewLowest ? mLowLimit.getIssuer() : mHighLimit.getIssuer();
}
AccountID const&
getAccountIDPeer() const
{
return !mViewLowest ? mLowID : mHighID;
return !mViewLowest ? mLowLimit.getIssuer() : mHighLimit.getIssuer();
}
// True, Provided auth to peer.
@@ -137,6 +141,52 @@ public:
return !mViewLowest ? mLowLimit : mHighLimit;
}
Json::Value
getJson(int);
protected:
uint256 key_;
STAmount const mLowLimit;
STAmount const mHighLimit;
STAmount mBalance;
std::uint32_t mFlags;
bool mViewLowest;
};
// This wrapper is used for the path finder
class PathFindTrustLine final : public TrustLineBase,
public CountedObject<PathFindTrustLine>
{
using TrustLineBase::TrustLineBase;
public:
PathFindTrustLine() = delete;
static std::optional<PathFindTrustLine>
makeItem(AccountID const& accountID, std::shared_ptr<SLE const> const& sle);
static std::vector<PathFindTrustLine>
getItems(AccountID const& accountID, ReadView const& view);
};
// This wrapper is used for the `AccountLines` command and includes the quality
// in and quality out values.
class RPCTrustLine final : public TrustLineBase,
public CountedObject<RPCTrustLine>
{
using TrustLineBase::TrustLineBase;
public:
RPCTrustLine() = delete;
RPCTrustLine(
std::shared_ptr<SLE const> const& sle,
AccountID const& viewAccount);
Rate const&
getQualityIn() const
{
@@ -149,33 +199,19 @@ public:
return mViewLowest ? lowQualityOut_ : highQualityOut_;
}
Json::Value
getJson(int);
static std::optional<RPCTrustLine>
makeItem(AccountID const& accountID, std::shared_ptr<SLE const> const& sle);
static std::vector<RPCTrustLine>
getItems(AccountID const& accountID, ReadView const& view);
private:
std::shared_ptr<SLE const> sle_;
bool mViewLowest;
std::uint32_t mFlags;
STAmount const& mLowLimit;
STAmount const& mHighLimit;
AccountID const& mLowID;
AccountID const& mHighID;
Rate lowQualityIn_;
Rate lowQualityOut_;
Rate highQualityIn_;
Rate highQualityOut_;
STAmount mBalance;
};
std::vector<RippleState::pointer>
getRippleStateItems(AccountID const& accountID, ReadView const& view);
} // namespace ripple
#endif

View File

@@ -125,7 +125,7 @@ public:
TxMeta const& meta,
uint256 const& nodestoreHash,
beast::Journal j)
: accounts(meta.getAffectedAccounts(j))
: accounts(meta.getAffectedAccounts())
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
, txHash(meta.getTxID())

View File

@@ -222,7 +222,7 @@ saveValidatedLedger(
hotLEDGER, std::move(s.modData()), ledger->info().hash, seq);
}
AcceptedLedger::pointer aLedger;
std::shared_ptr<AcceptedLedger> aLedger;
try
{
aLedger = app.getAcceptedLedgerCache().fetch(ledger->info().hash);
@@ -269,9 +269,8 @@ saveValidatedLedger(
std::string const ledgerSeq(std::to_string(seq));
for (auto const& [_, acceptedLedgerTx] : aLedger->getMap())
for (auto const& acceptedLedgerTx : *aLedger)
{
(void)_;
uint256 transactionID = acceptedLedgerTx->getTransactionID();
std::string const txnId(to_string(transactionID));
@@ -317,7 +316,7 @@ saveValidatedLedger(
JLOG(j.trace()) << "ActTx: " << sql;
*db << sql;
}
else if (auto const sleTxn = acceptedLedgerTx->getTxn();
else if (auto const& sleTxn = acceptedLedgerTx->getTxn();
!isPseudoTx(*sleTxn))
{
// It's okay for pseudo transactions to not affect any

View File

@@ -79,7 +79,8 @@ saveLedgerMeta(
if (app.config().useTxTables())
{
AcceptedLedger::pointer const aLedger = [&app, ledger] {
auto const aLedger = [&app,
ledger]() -> std::shared_ptr<AcceptedLedger> {
try
{
auto aLedger =
@@ -99,7 +100,7 @@ saveLedgerMeta(
<< "An accepted ledger was missing nodes";
}
return AcceptedLedger::pointer{nullptr};
return {};
}();
if (!aLedger)
@@ -107,10 +108,8 @@ saveLedgerMeta(
soci::transaction tr(txnMetaSession);
for (auto const& [_, acceptedLedgerTx] : aLedger->getMap())
for (auto const& acceptedLedgerTx : *aLedger)
{
(void)_;
std::string_view constexpr txnSQL =
R"sql(INSERT OR REPLACE INTO TransactionMeta VALUES
(:transactionID,:shardIndex);)sql";
@@ -247,7 +246,7 @@ updateLedgerDBs(
"WHERE TransID = :txID;",
soci::use(sTxID);
auto const& accounts = txMeta->getAffectedAccounts(j);
auto const& accounts = txMeta->getAffectedAccounts();
if (!accounts.empty())
{
auto const sTxnSeq{std::to_string(txMeta->getIndex())};

View File

@@ -300,19 +300,16 @@ public:
@param key The key corresponding to the object
@param data A shared pointer to the data corresponding to the object.
@param replace `true` if `data` is the up to date version of the object.
@param replace Function that decides if cache should be replaced
@return `true` If the key already existed.
*/
private:
template <bool replace>
public:
bool
canonicalize(
const key_type& key,
std::conditional_t<
replace,
std::shared_ptr<T> const,
std::shared_ptr<T>>& data)
std::shared_ptr<T>& data,
std::function<bool(std::shared_ptr<T> const&)>&& replace)
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
@@ -335,7 +332,7 @@ private:
if (entry.isCached())
{
if constexpr (replace)
if (replace(entry.ptr))
{
entry.ptr = data;
entry.weak_ptr = data;
@@ -352,7 +349,7 @@ private:
if (cachedData)
{
if constexpr (replace)
if (replace(entry.ptr))
{
entry.ptr = data;
entry.weak_ptr = data;
@@ -374,19 +371,22 @@ private:
return false;
}
public:
bool
canonicalize_replace_cache(
const key_type& key,
std::shared_ptr<T> const& data)
{
return canonicalize<true>(key, data);
return canonicalize(
key,
const_cast<std::shared_ptr<T>&>(data),
[](std::shared_ptr<T> const&) { return true; });
}
bool
canonicalize_replace_client(const key_type& key, std::shared_ptr<T>& data)
{
return canonicalize<false>(key, data);
return canonicalize(
key, data, [](std::shared_ptr<T> const&) { return false; });
}
std::shared_ptr<T>

108
src/ripple/basics/join.h Normal file
View File

@@ -0,0 +1,108 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2022 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef JOIN_H_INCLUDED
#define JOIN_H_INCLUDED
#include <string>
namespace ripple {
template <class Stream, class Iter>
Stream&
join(Stream& s, Iter iter, Iter end, std::string const& delimiter)
{
if (iter == end)
return s;
s << *iter;
for (++iter; iter != end; ++iter)
s << delimiter << *iter;
return s;
}
template <class Collection>
class CollectionAndDelimiter
{
public:
Collection const& collection;
std::string const delimiter;
explicit CollectionAndDelimiter(Collection const& c, std::string delim)
: collection(c), delimiter(std::move(delim))
{
}
template <class Stream>
friend Stream&
operator<<(Stream& s, CollectionAndDelimiter const& cd)
{
return join(
s,
std::begin(cd.collection),
std::end(cd.collection),
cd.delimiter);
}
};
template <class Collection, std::size_t N>
class CollectionAndDelimiter<Collection[N]>
{
public:
Collection const* collection;
std::string const delimiter;
explicit CollectionAndDelimiter(Collection const c[N], std::string delim)
: collection(c), delimiter(std::move(delim))
{
}
template <class Stream>
friend Stream&
operator<<(Stream& s, CollectionAndDelimiter const& cd)
{
return join(s, cd.collection, cd.collection + N, cd.delimiter);
}
};
// Specialization for const char* strings
template <std::size_t N>
class CollectionAndDelimiter<char[N]>
{
public:
char const* collection;
std::string const delimiter;
explicit CollectionAndDelimiter(char const c[N], std::string delim)
: collection(c), delimiter(std::move(delim))
{
}
template <class Stream>
friend Stream&
operator<<(Stream& s, CollectionAndDelimiter const& cd)
{
auto end = cd.collection + N;
if (N > 0 && *(end - 1) == '\0')
--end;
return join(s, cd.collection, end, cd.delimiter);
}
};
} // namespace ripple
#endif

View File

@@ -20,6 +20,7 @@
#ifndef RIPPLE_CORE_JOB_H_INCLUDED
#define RIPPLE_CORE_JOB_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/core/ClosureCounter.h>
#include <ripple/core/LoadMonitor.h>
#include <functional>
@@ -52,17 +53,18 @@ enum JobType {
jtRPC, // A websocket command from the client
jtSWEEP, // Sweep for stale structures
jtVALIDATION_ut, // A validation from an untrusted source
jtMANIFEST, // A validator's manifest
jtUPDATE_PF, // Update pathfinding requests
jtTRANSACTION_l, // A local transaction
jtREPLAY_REQ, // Peer request a ledger delta or a skip list
jtLEDGER_REQ, // Peer request ledger/txnset data
jtPROPOSAL_ut, // A proposal from an untrusted source
jtREPLAY_TASK, // A Ledger replay task/subtask
jtLEDGER_DATA, // Received data for a ledger we're acquiring
jtTRANSACTION, // A transaction received from the network
jtMISSING_TXN, // Request missing transactions
jtREQUESTED_TXN, // Reply with requested transactions
jtBATCH, // Apply batched transactions
jtLEDGER_DATA, // Received data for a ledger we're acquiring
jtADVANCE, // Advance validated/acquired ledgers
jtPUBLEDGER, // Publish a fully-accepted ledger
jtTXN_DATA, // Fetch a proposed set
@@ -91,7 +93,7 @@ enum JobType {
jtNS_WRITE,
};
class Job
class Job : public CountedObject<Job>
{
public:
using clock_type = std::chrono::steady_clock;

View File

@@ -72,6 +72,7 @@ private:
add(jtPACK, "makeFetchPack", 1, 0ms, 0ms);
add(jtPUBOLDLEDGER, "publishAcqLedger", 2, 10000ms, 15000ms);
add(jtVALIDATION_ut, "untrustedValidation", maxLimit, 2000ms, 5000ms);
add(jtMANIFEST, "manifest", maxLimit, 2000ms, 5000ms);
add(jtTRANSACTION_l, "localTransaction", maxLimit, 100ms, 500ms);
add(jtREPLAY_REQ, "ledgerReplayRequest", 10, 250ms, 1000ms);
add(jtLEDGER_REQ, "ledgerRequest", 3, 0ms, 0ms);

View File

@@ -115,19 +115,19 @@ sizedItems
// what they control and whether there exists an explicit
// config option that can be used to override the default.
// tiny small medium large huge
{SizedItem::sweepInterval, {{ 10, 30, 60, 90, 120 }}},
{SizedItem::treeCacheSize, {{ 128000, 256000, 512000, 768000, 2048000 }}},
{SizedItem::treeCacheAge, {{ 30, 60, 90, 120, 900 }}},
{SizedItem::ledgerSize, {{ 32, 128, 256, 384, 768 }}},
{SizedItem::ledgerAge, {{ 30, 90, 180, 240, 900 }}},
{SizedItem::ledgerFetch, {{ 2, 3, 4, 5, 8 }}},
{SizedItem::hashNodeDBCache, {{ 4, 12, 24, 64, 128 }}},
{SizedItem::txnDBCache, {{ 4, 12, 24, 64, 128 }}},
{SizedItem::lgrDBCache, {{ 4, 8, 16, 32, 128 }}},
{SizedItem::openFinalLimit, {{ 8, 16, 32, 64, 128 }}},
{SizedItem::burstSize, {{ 4, 8, 16, 32, 48 }}},
{SizedItem::ramSizeGB, {{ 8, 12, 16, 24, 32 }}},
// tiny small medium large huge
{SizedItem::sweepInterval, {{ 10, 30, 60, 90, 120 }}},
{SizedItem::treeCacheSize, {{ 262144, 524288, 2097152, 4194304, 8388608 }}},
{SizedItem::treeCacheAge, {{ 30, 60, 90, 120, 900 }}},
{SizedItem::ledgerSize, {{ 32, 32, 64, 256, 384 }}},
{SizedItem::ledgerAge, {{ 30, 60, 180, 300, 600 }}},
{SizedItem::ledgerFetch, {{ 2, 3, 4, 5, 8 }}},
{SizedItem::hashNodeDBCache, {{ 4, 12, 24, 64, 128 }}},
{SizedItem::txnDBCache, {{ 4, 12, 24, 64, 128 }}},
{SizedItem::lgrDBCache, {{ 4, 8, 16, 32, 128 }}},
{SizedItem::openFinalLimit, {{ 8, 16, 32, 64, 128 }}},
{SizedItem::burstSize, {{ 4, 8, 16, 32, 48 }}},
{SizedItem::ramSizeGB, {{ 8, 12, 16, 24, 32 }}},
}};
// Ensure that the order of entries in the table corresponds to the

View File

@@ -33,7 +33,18 @@ namespace ripple {
// Operations that clients may wish to perform against the network
// Master operational handler, server sequencer, network tracker
class PathRequest;
class InfoSubRequest
{
public:
using pointer = std::shared_ptr<InfoSubRequest>;
virtual ~InfoSubRequest() = default;
virtual Json::Value
doClose() = 0;
virtual Json::Value
doStatus(Json::Value const&) = 0;
};
/** Manages a client's subscription to data feeds.
*/
@@ -205,13 +216,13 @@ public:
deleteSubAccountHistory(AccountID const& account);
void
clearPathRequest();
clearRequest();
void
setPathRequest(const std::shared_ptr<PathRequest>& req);
setRequest(const std::shared_ptr<InfoSubRequest>& req);
std::shared_ptr<PathRequest> const&
getPathRequest();
std::shared_ptr<InfoSubRequest> const&
getRequest();
protected:
std::mutex mLock;
@@ -221,7 +232,7 @@ private:
Source& m_source;
hash_set<AccountID> realTimeSubscriptions_;
hash_set<AccountID> normalSubscriptions_;
std::shared_ptr<PathRequest> mPathRequest;
std::shared_ptr<InfoSubRequest> request_;
std::uint64_t mSeq;
hash_set<AccountID> accountHistorySubscriptions_;

View File

@@ -119,21 +119,21 @@ InfoSub::deleteSubAccountHistory(AccountID const& account)
}
void
InfoSub::clearPathRequest()
InfoSub::clearRequest()
{
mPathRequest.reset();
request_.reset();
}
void
InfoSub::setPathRequest(const std::shared_ptr<PathRequest>& req)
InfoSub::setRequest(const std::shared_ptr<InfoSubRequest>& req)
{
mPathRequest = req;
request_ = req;
}
const std::shared_ptr<PathRequest>&
InfoSub::getPathRequest()
const std::shared_ptr<InfoSubRequest>&
InfoSub::getRequest()
{
return mPathRequest;
return request_;
}
} // namespace ripple

View File

@@ -156,7 +156,7 @@ public:
object is stored, used by the shard store.
@param callback Callback function when read completes
*/
void
virtual void
asyncFetch(
uint256 const& hash,
std::uint32_t ledgerSeq,
@@ -366,11 +366,8 @@ private:
std::function<void(std::shared_ptr<NodeObject> const&)>>>>
read_;
// last read
uint256 readLastHash_;
std::vector<std::thread> readThreads_;
bool readStopping_{false};
std::atomic<bool> readStopping_ = false;
std::atomic<int> readThreads_ = 0;
virtual std::shared_ptr<NodeObject>
fetchNodeObject(

View File

@@ -33,7 +33,8 @@ enum NodeObjectType : std::uint32_t {
hotUNKNOWN = 0,
hotLEDGER = 1,
hotACCOUNT_NODE = 3,
hotTRANSACTION_NODE = 4
hotTRANSACTION_NODE = 4,
hotDUMMY = 512 // an invalid or missing object
};
/** A simple object that the Ledger uses to store entries.

View File

@@ -43,15 +43,76 @@ Database::Database(
, earliestLedgerSeq_(
get<std::uint32_t>(config, "earliest_seq", XRP_LEDGER_EARLIEST_SEQ))
, earliestShardIndex_((earliestLedgerSeq_ - 1) / ledgersPerShard_)
, readThreads_(std::min(1, readThreads))
{
assert(readThreads != 0);
if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
Throw<std::runtime_error>("Invalid ledgers_per_shard");
if (earliestLedgerSeq_ < 1)
Throw<std::runtime_error>("Invalid earliest_seq");
while (readThreads-- > 0)
readThreads_.emplace_back(&Database::threadEntry, this);
for (int i = 0; i != readThreads_.load(); ++i)
{
std::thread t(
[this](int i) {
beast::setCurrentThreadName(
"db prefetch #" + std::to_string(i));
decltype(read_) read;
while (!isStopping())
{
{
std::unique_lock<std::mutex> lock(readLock_);
if (read_.empty())
readCondVar_.wait(lock);
if (isStopping())
continue;
// We extract up to 64 objects to minimize the overhead
// of acquiring the mutex.
for (int cnt = 0; !read_.empty() && cnt != 64; ++cnt)
read.insert(read_.extract(read_.begin()));
}
for (auto it = read.begin(); it != read.end(); ++it)
{
assert(!it->second.empty());
auto const& hash = it->first;
auto const& data = std::move(it->second);
auto const seqn = data[0].first;
auto obj =
fetchNodeObject(hash, seqn, FetchType::async);
// This could be further optimized: if there are
// multiple requests for sequence numbers mapping to
// multiple databases by sorting requests such that all
// indices mapping to the same database are grouped
// together and serviced by a single read.
for (auto const& req : data)
{
req.second(
(seqn == req.first) || isSameDB(req.first, seqn)
? obj
: fetchNodeObject(
hash, req.first, FetchType::async));
}
}
read.clear();
}
--readThreads_;
},
i);
t.detach();
}
}
Database::~Database()
@@ -68,8 +129,7 @@ Database::~Database()
bool
Database::isStopping() const
{
std::lock_guard lock(readLock_);
return readStopping_;
return readStopping_.load(std::memory_order_relaxed);
}
std::uint32_t
@@ -88,19 +148,15 @@ Database::maxLedgers(std::uint32_t shardIndex) const noexcept
void
Database::stop()
{
// After stop time we can no longer use the JobQueue for background
// reads. Join the background read threads.
if (!readStopping_.exchange(true, std::memory_order_relaxed))
{
std::lock_guard lock(readLock_);
if (readStopping_) // Only stop threads once.
return;
readStopping_ = true;
read_.clear();
readCondVar_.notify_all();
}
for (auto& e : readThreads_)
e.join();
while (readThreads_.load() != 0)
std::this_thread::yield();
}
void
@@ -280,53 +336,6 @@ Database::storeLedger(
return true;
}
// Entry point for async read threads
void
Database::threadEntry()
{
beast::setCurrentThreadName("prefetch");
while (true)
{
uint256 lastHash;
std::vector<std::pair<
std::uint32_t,
std::function<void(std::shared_ptr<NodeObject> const&)>>>
entry;
{
std::unique_lock<std::mutex> lock(readLock_);
readCondVar_.wait(
lock, [this] { return readStopping_ || !read_.empty(); });
if (readStopping_)
break;
// Read in key order to make the back end more efficient
auto it = read_.lower_bound(readLastHash_);
if (it == read_.end())
{
// start over from the beginning
it = read_.begin();
}
lastHash = it->first;
entry = std::move(it->second);
read_.erase(it);
readLastHash_ = lastHash;
}
auto seq = entry[0].first;
auto obj = fetchNodeObject(lastHash, seq, FetchType::async);
for (auto const& req : entry)
{
if ((seq == req.first) || isSameDB(req.first, seq))
req.second(obj);
else
req.second(
fetchNodeObject(lastHash, req.first, FetchType::async));
}
}
}
void
Database::getCountsJson(Json::Value& obj)
{

View File

@@ -33,7 +33,34 @@ DatabaseNodeImp::store(
{
storeStats(1, data.size());
backend_->store(NodeObject::createObject(type, std::move(data), hash));
auto obj = NodeObject::createObject(type, std::move(data), hash);
backend_->store(obj);
if (cache_)
{
// After the store, replace a negative cache entry if there is one
cache_->canonicalize(
hash, obj, [](std::shared_ptr<NodeObject> const& n) {
return n->getType() == hotDUMMY;
});
}
}
void
DatabaseNodeImp::asyncFetch(
uint256 const& hash,
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
{
if (cache_)
{
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
if (obj)
{
callback(obj->getType() == hotDUMMY ? nullptr : obj);
return;
}
}
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
}
void
@@ -75,8 +102,19 @@ DatabaseNodeImp::fetchNodeObject(
switch (status)
{
case ok:
if (nodeObject && cache_)
cache_->canonicalize_replace_client(hash, nodeObject);
if (cache_)
{
if (nodeObject)
cache_->canonicalize_replace_client(hash, nodeObject);
else
{
auto notFound =
NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nodeObject = notFound;
}
}
break;
case notFound:
break;
@@ -95,6 +133,8 @@ DatabaseNodeImp::fetchNodeObject(
{
JLOG(j_.trace()) << "fetchNodeObject " << hash
<< ": record found in cache";
if (nodeObject->getType() == hotDUMMY)
nodeObject.reset();
}
if (nodeObject)
@@ -127,7 +167,7 @@ DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
}
else
{
results[i] = nObj;
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
// It was in the cache.
++hits;
}
@@ -140,9 +180,8 @@ DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
for (size_t i = 0; i < dbResults.size(); ++i)
{
auto nObj = dbResults[i];
auto nObj = std::move(dbResults[i]);
size_t index = indexMap[cacheMisses[i]];
results[index] = nObj;
auto const& hash = hashes[index];
if (nObj)
@@ -156,7 +195,15 @@ DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
JLOG(j_.error())
<< "fetchBatch - "
<< "record not found in db or cache. hash = " << strHex(hash);
if (cache_)
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nObj = std::move(notFound);
}
}
results[index] = std::move(nObj);
}
auto fetchDurationUs =

View File

@@ -111,6 +111,7 @@ public:
// only one database
return true;
}
void
sync() override
{
@@ -120,6 +121,13 @@ public:
std::vector<std::shared_ptr<NodeObject>>
fetchBatch(std::vector<uint256> const& hashes);
void
asyncFetch(
uint256 const& hash,
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
override;
bool
storeLedger(std::shared_ptr<Ledger const> const& srcLedger) override
{

View File

@@ -1067,10 +1067,8 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMManifests> const& m)
if (s > 100)
fee_ = Resource::feeMediumBurdenPeer;
// VFALCO What's the right job type?
auto that = shared_from_this();
app_.getJobQueue().addJob(
jtVALIDATION_ut, "receiveManifests", [this, that, m]() {
jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
overlay_.onManifests(m, that);
});
}
@@ -1341,7 +1339,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMPeerShardInfoV2> const& m)
// case ShardState::finalized:
default:
return badData("Invalid incomplete shard state");
};
}
s.add32(incomplete.state());
// Verify progress
@@ -1589,17 +1587,18 @@ PeerImp::handleTransaction(
}
}
if (app_.getJobQueue().getJobCount(jtTRANSACTION) >
if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
{
JLOG(p_journal_.trace())
<< "No new transactions until synchronized";
}
else if (
app_.getJobQueue().getJobCount(jtTRANSACTION) >
app_.config().MAX_TRANSACTIONS)
{
overlay_.incJqTransOverflow();
JLOG(p_journal_.info()) << "Transaction queue is full";
}
else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
{
JLOG(p_journal_.trace())
<< "No new transactions until synchronized";
}
else
{
app_.getJobQueue().addJob(
@@ -2575,6 +2574,7 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
return;
auto key = sha512Half(makeSlice(m->validation()));
if (auto [added, relayed] =
app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
!added)
@@ -2594,22 +2594,36 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
if (!isTrusted && (tracking_.load() == Tracking::diverged))
{
JLOG(p_journal_.debug())
<< "Validation: dropping untrusted from diverged peer";
<< "Dropping untrusted validation from diverged peer";
}
if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
{
std::string const name = [isTrusted, val]() {
std::string ret =
isTrusted ? "Trusted validation" : "Untrusted validation";
#ifdef DEBUG
ret += " " +
std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
to_string(val->getNodeID());
#endif
return ret;
}();
std::weak_ptr<PeerImp> weak = shared_from_this();
app_.getJobQueue().addJob(
isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
"recvValidation->checkValidation",
[weak, val, m]() {
name,
[weak, val, m, key]() {
if (auto peer = weak.lock())
peer->checkValidation(val, m);
peer->checkValidation(val, key, m);
});
}
else
{
JLOG(p_journal_.debug()) << "Validation: Dropping UNTRUSTED (load)";
JLOG(p_journal_.debug())
<< "Dropping untrusted validation for load";
}
}
catch (std::exception const& e)
@@ -3154,12 +3168,13 @@ PeerImp::checkPropose(
void
PeerImp::checkValidation(
std::shared_ptr<STValidation> const& val,
uint256 const& key,
std::shared_ptr<protocol::TMValidation> const& packet)
{
if (!cluster() && !val->isValid())
if (!val->isValid())
{
JLOG(p_journal_.debug()) << "Validation forwarded by peer is invalid";
charge(Resource::feeInvalidRequest);
charge(Resource::feeInvalidSignature);
return;
}
@@ -3169,18 +3184,16 @@ PeerImp::checkValidation(
if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
cluster())
{
auto const suppression =
sha512Half(makeSlice(val->getSerialized()));
// haveMessage contains peers, which are suppressed; i.e. the peers
// are the source of the message, consequently the message should
// not be relayed to these peers. But the message must be counted
// as part of the squelch logic.
auto haveMessage =
overlay_.relay(*packet, suppression, val->getSignerPublic());
overlay_.relay(*packet, key, val->getSignerPublic());
if (reduceRelayReady() && !haveMessage.empty())
{
overlay_.updateSlotAndSquelch(
suppression,
key,
val->getSignerPublic(),
std::move(haveMessage),
protocol::mtVALIDATION);
@@ -3525,8 +3538,8 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
{
auto const queryDepth{
m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
std::vector<SHAMapNodeID> nodeIds;
std::vector<Blob> rawNodes;
std::vector<std::pair<SHAMapNodeID, Blob>> data;
for (int i = 0; i < m->nodeids_size() &&
ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
@@ -3534,30 +3547,22 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
{
auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
nodeIds.clear();
rawNodes.clear();
data.clear();
data.reserve(Tuning::softMaxReplyNodes);
try
{
if (map->getNodeFat(
*shaMapNodeId,
nodeIds,
rawNodes,
fatLeaves,
queryDepth))
if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
{
assert(nodeIds.size() == rawNodes.size());
JLOG(p_journal_.trace())
<< "processLedgerRequest: getNodeFat got "
<< rawNodes.size() << " nodes";
<< data.size() << " nodes";
auto rawNodeIter{rawNodes.begin()};
for (auto const& nodeId : nodeIds)
for (auto const& d : data)
{
protocol::TMLedgerNode* node{ledgerData.add_nodes()};
node->set_nodeid(nodeId.getRawString());
node->set_nodedata(
&rawNodeIter->front(), rawNodeIter->size());
++rawNodeIter;
node->set_nodeid(d.first.getRawString());
node->set_nodedata(d.second.data(), d.second.size());
}
}
else
@@ -3609,9 +3614,7 @@ PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
<< ledgerData.nodes_size() << " nodes";
}
auto message{
std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
send(message);
send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
}
int

View File

@@ -626,6 +626,7 @@ private:
void
checkValidation(
std::shared_ptr<STValidation> const& val,
uint256 const& key,
std::shared_ptr<protocol::TMValidation> const& packet);
void

View File

@@ -20,6 +20,7 @@
#ifndef RIPPLE_PROTOCOL_BOOK_H_INCLUDED
#define RIPPLE_PROTOCOL_BOOK_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/protocol/Issue.h>
#include <boost/utility/base_from_member.hpp>
@@ -29,7 +30,7 @@ namespace ripple {
The order book is a pair of Issues called in and out.
@see Issue.
*/
class Book
class Book final : public CountedObject<Book>
{
public:
Issue in;

View File

@@ -341,6 +341,7 @@ extern SF_UINT8 const sfMethod;
extern SF_UINT8 const sfTransactionResult;
extern SF_UINT8 const sfTickSize;
extern SF_UINT8 const sfUNLModifyDisabling;
extern SF_UINT8 const sfHookResult;
// 16-bit integers
extern SF_UINT16 const sfLedgerEntryType;
@@ -349,6 +350,10 @@ extern SF_UINT16 const sfSignerWeight;
// 16-bit integers (uncommon)
extern SF_UINT16 const sfVersion;
extern SF_UINT16 const sfHookStateChangeCount;
extern SF_UINT16 const sfHookEmitCount;
extern SF_UINT16 const sfHookExecutionIndex;
extern SF_UINT16 const sfHookApiVersion;
// 32-bit integers (common)
extern SF_UINT32 const sfFlags;
@@ -392,6 +397,8 @@ extern SF_UINT32 const sfSignerListID;
extern SF_UINT32 const sfSettleDelay;
extern SF_UINT32 const sfTicketCount;
extern SF_UINT32 const sfTicketSequence;
extern SF_UINT32 const sfHookStateCount;
extern SF_UINT32 const sfEmitGeneration;
// 64-bit integers
extern SF_UINT64 const sfIndexNext;
@@ -405,6 +412,11 @@ extern SF_UINT64 const sfHighNode;
extern SF_UINT64 const sfDestinationNode;
extern SF_UINT64 const sfCookie;
extern SF_UINT64 const sfServerVersion;
extern SF_UINT64 const sfHookOn;
extern SF_UINT64 const sfHookInstructionCount;
extern SF_UINT64 const sfEmitBurden;
extern SF_UINT64 const sfHookReturnCode;
extern SF_UINT64 const sfReferenceCount;
// 128-bit
extern SF_HASH128 const sfEmailHash;
@@ -425,6 +437,9 @@ extern SF_HASH256 const sfLedgerIndex;
extern SF_HASH256 const sfWalletLocator;
extern SF_HASH256 const sfRootIndex;
extern SF_HASH256 const sfAccountTxnID;
extern SF_HASH256 const sfEmitParentTxnID;
extern SF_HASH256 const sfEmitNonce;
extern SF_HASH256 const sfEmitHookHash;
// 256-bit (uncommon)
extern SF_HASH256 const sfBookDirectory;
@@ -436,6 +451,10 @@ extern SF_HASH256 const sfChannel;
extern SF_HASH256 const sfConsensusHash;
extern SF_HASH256 const sfCheckID;
extern SF_HASH256 const sfValidatedHash;
extern SF_HASH256 const sfHookStateKey;
extern SF_HASH256 const sfHookHash;
extern SF_HASH256 const sfHookNamespace;
extern SF_HASH256 const sfHookSetTxnID;
// currency amount (common)
extern SF_AMOUNT const sfAmount;
@@ -477,6 +496,10 @@ extern SF_VL const sfMasterSignature;
extern SF_VL const sfUNLModifyValidator;
extern SF_VL const sfValidatorToDisable;
extern SF_VL const sfValidatorToReEnable;
extern SF_VL const sfHookStateData;
extern SF_VL const sfHookReturnString;
extern SF_VL const sfHookParameterName;
extern SF_VL const sfHookParameterValue;
// account
extern SF_ACCOUNT const sfAccount;
@@ -487,6 +510,10 @@ extern SF_ACCOUNT const sfAuthorize;
extern SF_ACCOUNT const sfUnauthorize;
extern SF_ACCOUNT const sfTarget;
extern SF_ACCOUNT const sfRegularKey;
extern SF_ACCOUNT const sfEmitCallback;
// account (uncommon)
extern SF_ACCOUNT const sfHookAccount;
// path set
extern SField const sfPaths;
@@ -511,6 +538,11 @@ extern SField const sfSignerEntry;
extern SField const sfSigner;
extern SField const sfMajority;
extern SField const sfDisabledValidator;
extern SField const sfEmittedTxn;
extern SField const sfHook;
extern SField const sfHookDefinition;
extern SField const sfHookParameter;
extern SField const sfHookGrant;
// array of objects
// ARRAY/1 is reserved for end of array
@@ -524,6 +556,13 @@ extern SField const sfAffectedNodes;
extern SField const sfMemos;
extern SField const sfMajorities;
extern SField const sfDisabledValidators;
extern SField const sfEmitDetails;
extern SField const sfHookExecutions;
extern SField const sfHookExecution;
extern SField const sfHookParameters;
extern SField const sfHooks;
extern SField const sfHookGrants;
//------------------------------------------------------------------------------
} // namespace ripple

View File

@@ -20,6 +20,7 @@
#ifndef RIPPLE_PROTOCOL_STAMOUNT_H_INCLUDED
#define RIPPLE_PROTOCOL_STAMOUNT_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/basics/IOUAmount.h>
#include <ripple/basics/LocalValue.h>
#include <ripple/basics/XRPAmount.h>
@@ -40,7 +41,7 @@ namespace ripple {
// Wire form:
// High 8 bits are (offset+142), legal range is, 80 to 22 inclusive
// Low 56 bits are value, legal range is 10^15 to (10^16 - 1) inclusive
class STAmount : public STBase
class STAmount final : public STBase, public CountedObject<STAmount>
{
public:
using mantissa_type = std::uint64_t;

View File

@@ -20,6 +20,7 @@
#ifndef RIPPLE_PROTOCOL_STPATHSET_H_INCLUDED
#define RIPPLE_PROTOCOL_STPATHSET_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/json/json_value.h>
#include <ripple/protocol/SField.h>
#include <ripple/protocol/STBase.h>
@@ -30,7 +31,7 @@
namespace ripple {
class STPathElement
class STPathElement final : public CountedObject<STPathElement>
{
unsigned int mType;
AccountID mAccountID;
@@ -114,7 +115,7 @@ private:
get_hash(STPathElement const& element);
};
class STPath
class STPath final : public CountedObject<STPath>
{
std::vector<STPathElement> mPath;
@@ -172,7 +173,7 @@ public:
//------------------------------------------------------------------------------
// A set of zero or more payment paths
class STPathSet final : public STBase
class STPathSet final : public STBase, public CountedObject<STPathSet>
{
std::vector<STPath> value;

Some files were not shown because too many files have changed in this diff Show More