mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-19 02:25:52 +00:00
Compare commits
103 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e5275b8577 | ||
|
|
83faf43140 | ||
|
|
22b4de2e44 | ||
|
|
b95ca98965 | ||
|
|
7e46f5342b | ||
|
|
59326bbbc5 | ||
|
|
9eb303f8e8 | ||
|
|
47ccd0b579 | ||
|
|
5e6728dccd | ||
|
|
d458e9972b | ||
|
|
b0b44d32bd | ||
|
|
8266d9d598 | ||
|
|
0839a202c9 | ||
|
|
ee60b16b3a | ||
|
|
18d437284e | ||
|
|
1f75ba23ee | ||
|
|
723733a778 | ||
|
|
8e6a0d418c | ||
|
|
f55913dcee | ||
|
|
e46d2bcf27 | ||
|
|
d632f9f6c8 | ||
|
|
3172a816fa | ||
|
|
4e724794c5 | ||
|
|
610436d737 | ||
|
|
0ee6f15b35 | ||
|
|
e32bc674aa | ||
|
|
34786abd4f | ||
|
|
d0a813a19d | ||
|
|
25474343a9 | ||
|
|
670bc22cfa | ||
|
|
80bda7cc48 | ||
|
|
dac080f1c8 | ||
|
|
767dd4ff3f | ||
|
|
01c37fed69 | ||
|
|
04bd5878f1 | ||
|
|
e836375d99 | ||
|
|
aa4a5b7fe9 | ||
|
|
5aedb0e07a | ||
|
|
dfe69f1b76 | ||
|
|
87d06a2571 | ||
|
|
7ca1f78446 | ||
|
|
b68a66928c | ||
|
|
245174c42c | ||
|
|
7c66747d27 | ||
|
|
cdd37a2a05 | ||
|
|
c66be3e6cf | ||
|
|
70779f6850 | ||
|
|
525aaecbca | ||
|
|
9d3cd718e4 | ||
|
|
656e9fe180 | ||
|
|
8aa617d972 | ||
|
|
711608e652 | ||
|
|
bc9773eb45 | ||
|
|
bea9610440 | ||
|
|
375af87a86 | ||
|
|
1502e6e2cd | ||
|
|
593677ee82 | ||
|
|
8f58687091 | ||
|
|
c7e6803956 | ||
|
|
6faaa91850 | ||
|
|
d66d960d59 | ||
|
|
18235067af | ||
|
|
3eb8aa8b80 | ||
|
|
b9903bbcc4 | ||
|
|
48803a48af | ||
|
|
1b9387eddc | ||
|
|
34ca457132 | ||
|
|
df60e46750 | ||
|
|
e7e672c3f8 | ||
|
|
4d5459d041 | ||
|
|
59f5844381 | ||
|
|
a07a729e3d | ||
|
|
b65e279db6 | ||
|
|
1ddc966b31 | ||
|
|
1a8eb5e6e3 | ||
|
|
6a8180c967 | ||
|
|
eb57679085 | ||
|
|
297def5ed3 | ||
|
|
a01cadbfd5 | ||
|
|
11ca9a946c | ||
|
|
f326f019bf | ||
|
|
90326bf756 | ||
|
|
255bf829ca | ||
|
|
0623a40f02 | ||
|
|
a529b218f3 | ||
|
|
c0cb389b20 | ||
|
|
8f82b62e0d | ||
|
|
dc213a4fab | ||
|
|
06e87e0f6a | ||
|
|
c2a08a1f26 | ||
|
|
df02eb125f | ||
|
|
0c13676d5f | ||
|
|
74e6ed1af3 | ||
|
|
72377e7bf2 | ||
|
|
5b085a75fd | ||
|
|
61389a8bef | ||
|
|
bd97e59254 | ||
|
|
95ecf296ad | ||
|
|
b7e0306d0a | ||
|
|
a9ee802240 | ||
|
|
d23d37fcfd | ||
|
|
289bc0afd9 | ||
|
|
c5dc00af74 |
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -3,9 +3,6 @@ contact_links:
|
||||
- name: XRP Ledger Documentation
|
||||
url: https://xrpl.org/
|
||||
about: All things about XRPL
|
||||
- name: General question for the community
|
||||
url: https://forum.xpring.io/c/community/
|
||||
about: Please ask and answer questions here.
|
||||
- name: Security bug bounty program
|
||||
url: https://ripple.com/bug-bounty/
|
||||
about: Please report security-relevant bugs in our software here.
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -104,3 +104,5 @@ Builds/VisualStudio2015/*.sdf
|
||||
CMakeSettings.json
|
||||
compile_commands.json
|
||||
.clangd
|
||||
packages
|
||||
pkg_out
|
||||
|
||||
22
.travis.yml
22
.travis.yml
@@ -36,9 +36,9 @@ env:
|
||||
- NIH_CACHE_ROOT=${CACHE_DIR}/nih_c
|
||||
- PARALLEL_TESTS=true
|
||||
# this is NOT used by linux container based builds (which already have boost installed)
|
||||
- BOOST_URL='https://boostorg.jfrog.io/artifactory/main/release/1.70.0/source/boost_1_70_0.tar.gz'
|
||||
- BOOST_URL='https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz'
|
||||
# Alternate dowload location
|
||||
- BOOST_URL2='https://downloads.sourceforge.net/project/boost/boost/1.70.0/boost_1_70_0.tar.bz2?r=&ts=1594393912&use_mirror=newcontinuum'
|
||||
- BOOST_URL2='https://downloads.sourceforge.net/project/boost/boost/1.75.0/boost_1_75_0.tar.bz2?r=&ts=1594393912&use_mirror=newcontinuum'
|
||||
# Travis downloader doesn't seem to have updated certs. Using this option
|
||||
# introduces obvious security risks, but they're Travis's risks.
|
||||
# Note that this option is only used if the "normal" build fails.
|
||||
@@ -282,7 +282,7 @@ matrix:
|
||||
env:
|
||||
- MATRIX_EVAL="CC=gcc-8 && CXX=g++-8"
|
||||
- BUILD_TYPE=Debug
|
||||
- CMAKE_EXE=/opt/local/cmake-3.9/bin/cmake
|
||||
- CMAKE_EXE=/opt/local/cmake/bin/cmake
|
||||
- SKIP_TESTS=true
|
||||
# validator keys project as subproj of rippled
|
||||
- <<: *linux
|
||||
@@ -299,15 +299,15 @@ matrix:
|
||||
if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_mac/
|
||||
stage: build
|
||||
os: osx
|
||||
osx_image: xcode11.2
|
||||
name: xcode11.2, debug
|
||||
osx_image: xcode13.1
|
||||
name: xcode13.1, debug
|
||||
env:
|
||||
# put NIH in non-cache location since it seems to
|
||||
# cause failures when homebrew updates
|
||||
- NIH_CACHE_ROOT=${TRAVIS_BUILD_DIR}/nih_c
|
||||
- BLD_CONFIG=Debug
|
||||
- TEST_EXTRA_ARGS=""
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_70_0
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_75_0
|
||||
- >-
|
||||
CMAKE_ADD="
|
||||
-DBOOST_ROOT=${BOOST_ROOT}/_INSTALLED_
|
||||
@@ -338,7 +338,7 @@ matrix:
|
||||
- travis_wait ${MAX_TIME_MIN} cmake --build . --parallel --verbose
|
||||
- ./rippled --unittest --quiet --unittest-log --unittest-jobs ${NUM_PROCESSORS} ${TEST_EXTRA_ARGS}
|
||||
- <<: *macos
|
||||
name: xcode11.2, release
|
||||
name: xcode13.1, release
|
||||
before_script:
|
||||
- export BLD_CONFIG=Release
|
||||
- export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -Dassert=ON"
|
||||
@@ -347,8 +347,8 @@ matrix:
|
||||
before_script:
|
||||
- export TEST_EXTRA_ARGS="--unittest-ipv6"
|
||||
- <<: *macos
|
||||
osx_image: xcode11.2
|
||||
name: xcode11.2, debug
|
||||
osx_image: xcode13.1
|
||||
name: xcode13.1, debug
|
||||
# windows
|
||||
- &windows
|
||||
if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_win/
|
||||
@@ -361,13 +361,13 @@ matrix:
|
||||
- NIH_CACHE_ROOT=${TRAVIS_BUILD_DIR}/nih_c
|
||||
- VCPKG_DEFAULT_TRIPLET="x64-windows-static"
|
||||
- MATRIX_EVAL="CC=cl.exe && CXX=cl.exe"
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_70
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_75
|
||||
- >-
|
||||
CMAKE_ADD="
|
||||
-DCMAKE_PREFIX_PATH=${BOOST_ROOT}/_INSTALLED_
|
||||
-DBOOST_ROOT=${BOOST_ROOT}/_INSTALLED_
|
||||
-DBoost_ROOT=${BOOST_ROOT}/_INSTALLED_
|
||||
-DBoost_DIR=${BOOST_ROOT}/_INSTALLED_/lib/cmake/Boost-1.70.0
|
||||
-DBoost_DIR=${BOOST_ROOT}/_INSTALLED_/lib/cmake/Boost-1.75.0
|
||||
-DBoost_COMPILER=vc141
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DCMAKE_TOOLCHAIN_FILE=${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake
|
||||
|
||||
@@ -82,6 +82,7 @@ target_sources (xrpl_core PRIVATE
|
||||
src/ripple/protocol/impl/PublicKey.cpp
|
||||
src/ripple/protocol/impl/Quality.cpp
|
||||
src/ripple/protocol/impl/Rate2.cpp
|
||||
src/ripple/protocol/impl/Rules.cpp
|
||||
src/ripple/protocol/impl/SField.cpp
|
||||
src/ripple/protocol/impl/SOTemplate.cpp
|
||||
src/ripple/protocol/impl/STAccount.cpp
|
||||
@@ -120,12 +121,8 @@ add_library (Ripple::xrpl_core ALIAS xrpl_core)
|
||||
target_include_directories (xrpl_core
|
||||
PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src/ripple>
|
||||
# this one is for beast/legacy files:
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src/beast/extras>
|
||||
$<INSTALL_INTERFACE:include>)
|
||||
|
||||
|
||||
target_compile_definitions(xrpl_core
|
||||
PUBLIC
|
||||
BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT
|
||||
@@ -159,7 +156,9 @@ install (
|
||||
src/ripple/basics/MathUtilities.h
|
||||
src/ripple/basics/safe_cast.h
|
||||
src/ripple/basics/Slice.h
|
||||
src/ripple/basics/spinlock.h
|
||||
src/ripple/basics/StringUtilities.h
|
||||
src/ripple/basics/ThreadSafetyAnalysis.h
|
||||
src/ripple/basics/ToString.h
|
||||
src/ripple/basics/UnorderedContainers.h
|
||||
src/ripple/basics/XRPAmount.h
|
||||
@@ -213,6 +212,7 @@ install (
|
||||
src/ripple/protocol/PublicKey.h
|
||||
src/ripple/protocol/Quality.h
|
||||
src/ripple/protocol/Rate.h
|
||||
src/ripple/protocol/Rules.h
|
||||
src/ripple/protocol/SField.h
|
||||
src/ripple/protocol/SOTemplate.h
|
||||
src/ripple/protocol/STAccount.h
|
||||
@@ -295,23 +295,23 @@ install (
|
||||
if (tests)
|
||||
install (
|
||||
FILES
|
||||
src/beast/extras/beast/unit_test/amount.hpp
|
||||
src/beast/extras/beast/unit_test/dstream.hpp
|
||||
src/beast/extras/beast/unit_test/global_suites.hpp
|
||||
src/beast/extras/beast/unit_test/match.hpp
|
||||
src/beast/extras/beast/unit_test/recorder.hpp
|
||||
src/beast/extras/beast/unit_test/reporter.hpp
|
||||
src/beast/extras/beast/unit_test/results.hpp
|
||||
src/beast/extras/beast/unit_test/runner.hpp
|
||||
src/beast/extras/beast/unit_test/suite.hpp
|
||||
src/beast/extras/beast/unit_test/suite_info.hpp
|
||||
src/beast/extras/beast/unit_test/suite_list.hpp
|
||||
src/beast/extras/beast/unit_test/thread.hpp
|
||||
DESTINATION include/beast/unit_test)
|
||||
src/ripple/beast/unit_test/amount.hpp
|
||||
src/ripple/beast/unit_test/dstream.hpp
|
||||
src/ripple/beast/unit_test/global_suites.hpp
|
||||
src/ripple/beast/unit_test/match.hpp
|
||||
src/ripple/beast/unit_test/recorder.hpp
|
||||
src/ripple/beast/unit_test/reporter.hpp
|
||||
src/ripple/beast/unit_test/results.hpp
|
||||
src/ripple/beast/unit_test/runner.hpp
|
||||
src/ripple/beast/unit_test/suite.hpp
|
||||
src/ripple/beast/unit_test/suite_info.hpp
|
||||
src/ripple/beast/unit_test/suite_list.hpp
|
||||
src/ripple/beast/unit_test/thread.hpp
|
||||
DESTINATION include/ripple/beast/extras/unit_test)
|
||||
install (
|
||||
FILES
|
||||
src/beast/extras/beast/unit_test/detail/const_container.hpp
|
||||
DESTINATION include/beast/unit_test/detail)
|
||||
src/ripple/beast/unit_test/detail/const_container.hpp
|
||||
DESTINATION include/ripple/beast/unit_test/detail)
|
||||
endif () #tests
|
||||
#[===================================================================[
|
||||
rippled executable
|
||||
@@ -399,18 +399,23 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/app/paths/Pathfinder.cpp
|
||||
src/ripple/app/paths/RippleCalc.cpp
|
||||
src/ripple/app/paths/RippleLineCache.cpp
|
||||
src/ripple/app/paths/RippleState.cpp
|
||||
src/ripple/app/paths/TrustLine.cpp
|
||||
src/ripple/app/paths/impl/BookStep.cpp
|
||||
src/ripple/app/paths/impl/DirectStep.cpp
|
||||
src/ripple/app/paths/impl/PaySteps.cpp
|
||||
src/ripple/app/paths/impl/XRPEndpointStep.cpp
|
||||
src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp
|
||||
src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp
|
||||
src/ripple/app/rdb/backend/detail/impl/Node.cpp
|
||||
src/ripple/app/rdb/backend/detail/impl/Shard.cpp
|
||||
src/ripple/app/rdb/backend/impl/PostgresDatabase.cpp
|
||||
src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp
|
||||
src/ripple/app/rdb/impl/Download.cpp
|
||||
src/ripple/app/rdb/impl/PeerFinder.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDatabase.cpp
|
||||
src/ripple/app/rdb/impl/ShardArchive.cpp
|
||||
src/ripple/app/rdb/impl/State.cpp
|
||||
src/ripple/app/rdb/impl/UnitaryShard.cpp
|
||||
src/ripple/app/rdb/impl/Vacuum.cpp
|
||||
src/ripple/app/rdb/impl/Wallet.cpp
|
||||
src/ripple/app/tx/impl/ApplyContext.cpp
|
||||
src/ripple/app/tx/impl/BookTip.cpp
|
||||
src/ripple/app/tx/impl/CancelCheck.cpp
|
||||
@@ -424,6 +429,11 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/app/tx/impl/DepositPreauth.cpp
|
||||
src/ripple/app/tx/impl/Escrow.cpp
|
||||
src/ripple/app/tx/impl/InvariantCheck.cpp
|
||||
src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp
|
||||
src/ripple/app/tx/impl/NFTokenBurn.cpp
|
||||
src/ripple/app/tx/impl/NFTokenCancelOffer.cpp
|
||||
src/ripple/app/tx/impl/NFTokenCreateOffer.cpp
|
||||
src/ripple/app/tx/impl/NFTokenMint.cpp
|
||||
src/ripple/app/tx/impl/OfferStream.cpp
|
||||
src/ripple/app/tx/impl/PayChan.cpp
|
||||
src/ripple/app/tx/impl/Payment.cpp
|
||||
@@ -436,13 +446,13 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/app/tx/impl/Transactor.cpp
|
||||
src/ripple/app/tx/impl/apply.cpp
|
||||
src/ripple/app/tx/impl/applySteps.cpp
|
||||
src/ripple/app/tx/impl/details/NFTokenUtils.cpp
|
||||
#[===============================[
|
||||
main sources:
|
||||
subdir: basics (partial)
|
||||
#]===============================]
|
||||
src/ripple/basics/impl/Archive.cpp
|
||||
src/ripple/basics/impl/BasicConfig.cpp
|
||||
src/ripple/basics/impl/PerfLogImp.cpp
|
||||
src/ripple/basics/impl/ResolverAsio.cpp
|
||||
src/ripple/basics/impl/UptimeClock.cpp
|
||||
src/ripple/basics/impl/make_SSLContext.cpp
|
||||
@@ -598,6 +608,7 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/handlers/LogLevel.cpp
|
||||
src/ripple/rpc/handlers/LogRotate.cpp
|
||||
src/ripple/rpc/handlers/Manifest.cpp
|
||||
src/ripple/rpc/handlers/NFTOffers.cpp
|
||||
src/ripple/rpc/handlers/NodeToShard.cpp
|
||||
src/ripple/rpc/handlers/NoRippleCheck.cpp
|
||||
src/ripple/rpc/handlers/OwnerInfo.cpp
|
||||
@@ -640,6 +651,11 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/impl/ShardVerificationScheduler.cpp
|
||||
src/ripple/rpc/impl/Status.cpp
|
||||
src/ripple/rpc/impl/TransactionSign.cpp
|
||||
#[===============================[
|
||||
main sources:
|
||||
subdir: perflog
|
||||
#]===============================]
|
||||
src/ripple/perflog/impl/PerfLogImp.cpp
|
||||
|
||||
#[===============================[
|
||||
main sources:
|
||||
@@ -687,6 +703,9 @@ if (tests)
|
||||
src/test/app/LoadFeeTrack_test.cpp
|
||||
src/test/app/Manifest_test.cpp
|
||||
src/test/app/MultiSign_test.cpp
|
||||
src/test/app/NFToken_test.cpp
|
||||
src/test/app/NFTokenBurn_test.cpp
|
||||
src/test/app/NFTokenDir_test.cpp
|
||||
src/test/app/OfferStream_test.cpp
|
||||
src/test/app/Offer_test.cpp
|
||||
src/test/app/OversizeMeta_test.cpp
|
||||
@@ -733,6 +752,7 @@ if (tests)
|
||||
src/test/basics/contract_test.cpp
|
||||
src/test/basics/FeeUnits_test.cpp
|
||||
src/test/basics/hardened_hash_test.cpp
|
||||
src/test/basics/join_test.cpp
|
||||
src/test/basics/mulDiv_test.cpp
|
||||
src/test/basics/tagged_integer_test.cpp
|
||||
#[===============================[
|
||||
@@ -835,6 +855,7 @@ if (tests)
|
||||
src/test/jtx/impl/sig.cpp
|
||||
src/test/jtx/impl/tag.cpp
|
||||
src/test/jtx/impl/ticket.cpp
|
||||
src/test/jtx/impl/token.cpp
|
||||
src/test/jtx/impl/trust.cpp
|
||||
src/test/jtx/impl/txflags.cpp
|
||||
src/test/jtx/impl/utility.cpp
|
||||
@@ -891,6 +912,7 @@ if (tests)
|
||||
src/test/protocol/InnerObjectFormats_test.cpp
|
||||
src/test/protocol/Issue_test.cpp
|
||||
src/test/protocol/KnownFormatToGRPC_test.cpp
|
||||
src/test/protocol/Hooks_test.cpp
|
||||
src/test/protocol/PublicKey_test.cpp
|
||||
src/test/protocol/Quality_test.cpp
|
||||
src/test/protocol/STAccount_test.cpp
|
||||
@@ -989,17 +1011,18 @@ if (is_ci)
|
||||
target_compile_definitions(rippled PRIVATE RIPPLED_RUNNING_IN_CI)
|
||||
endif ()
|
||||
|
||||
if (reporting)
|
||||
target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING)
|
||||
endif ()
|
||||
if(reporting)
|
||||
set_target_properties(rippled PROPERTIES OUTPUT_NAME rippled-reporting)
|
||||
get_target_property(BIN_NAME rippled OUTPUT_NAME)
|
||||
message(STATUS "Reporting mode build: rippled renamed ${BIN_NAME}")
|
||||
target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING)
|
||||
endif()
|
||||
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.16)
|
||||
# any files that don't play well with unity should be added here
|
||||
if (tests)
|
||||
set_source_files_properties(
|
||||
# these two seem to produce conflicts in beast teardown template methods
|
||||
src/test/rpc/ValidatorRPC_test.cpp
|
||||
src/test/rpc/ShardArchiveHandler_test.cpp
|
||||
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
|
||||
endif () #tests
|
||||
endif ()
|
||||
# any files that don't play well with unity should be added here
|
||||
if (tests)
|
||||
set_source_files_properties(
|
||||
# these two seem to produce conflicts in beast teardown template methods
|
||||
src/test/rpc/ValidatorRPC_test.cpp
|
||||
src/test/rpc/ShardArchiveHandler_test.cpp
|
||||
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
|
||||
endif () #tests
|
||||
|
||||
@@ -31,7 +31,7 @@ if (tests)
|
||||
# find_path sets a CACHE variable, so don't try using a "local" variable.
|
||||
find_path (${variable} "${name}" ${ARGN})
|
||||
if (NOT ${variable})
|
||||
message (WARNING "could not find ${name}")
|
||||
message (NOTICE "could not find ${name}")
|
||||
else ()
|
||||
message (STATUS "found ${name}: ${${variable}}/${name}")
|
||||
endif ()
|
||||
|
||||
@@ -48,12 +48,15 @@ if (is_root_project)
|
||||
Builds/containers/centos-builder/Dockerfile
|
||||
Builds/containers/centos-builder/centos_setup.sh
|
||||
Builds/containers/centos-builder/extras.sh
|
||||
Builds/containers/shared/build_deps.sh
|
||||
Builds/containers/shared/rippled.service
|
||||
Builds/containers/shared/update_sources.sh
|
||||
Builds/containers/shared/update-rippled.sh
|
||||
Builds/containers/shared/update_sources.sh
|
||||
Builds/containers/shared/rippled.service
|
||||
Builds/containers/shared/rippled-reporting.service
|
||||
Builds/containers/shared/build_deps.sh
|
||||
Builds/containers/packaging/rpm/rippled.spec
|
||||
Builds/containers/packaging/rpm/build_rpm.sh
|
||||
Builds/containers/packaging/rpm/50-rippled.preset
|
||||
Builds/containers/packaging/rpm/50-rippled-reporting.preset
|
||||
bin/getRippledInfo
|
||||
)
|
||||
exclude_from_default (rpm_container)
|
||||
@@ -86,7 +89,7 @@ if (is_root_project)
|
||||
add_custom_target (dpkg_container
|
||||
docker build
|
||||
--pull
|
||||
--build-arg DIST_TAG=16.04
|
||||
--build-arg DIST_TAG=18.04
|
||||
--build-arg GIT_COMMIT=${commit_hash}
|
||||
-t rippled-dpkg-builder:${container_label}
|
||||
$<$<BOOL:${dpkg_cache_from}>:--cache-from=${dpkg_cache_from}>
|
||||
@@ -96,28 +99,40 @@ if (is_root_project)
|
||||
USES_TERMINAL
|
||||
COMMAND_EXPAND_LISTS
|
||||
SOURCES
|
||||
Builds/containers/packaging/dpkg/debian/rippled-reporting.links
|
||||
Builds/containers/packaging/dpkg/debian/copyright
|
||||
Builds/containers/packaging/dpkg/debian/rules
|
||||
Builds/containers/packaging/dpkg/debian/rippled-reporting.install
|
||||
Builds/containers/packaging/dpkg/debian/rippled-reporting.postinst
|
||||
Builds/containers/packaging/dpkg/debian/rippled.links
|
||||
Builds/containers/packaging/dpkg/debian/rippled.prerm
|
||||
Builds/containers/packaging/dpkg/debian/rippled.postinst
|
||||
Builds/containers/packaging/dpkg/debian/rippled-dev.install
|
||||
Builds/containers/packaging/dpkg/debian/dirs
|
||||
Builds/containers/packaging/dpkg/debian/rippled.postrm
|
||||
Builds/containers/packaging/dpkg/debian/rippled.conffiles
|
||||
Builds/containers/packaging/dpkg/debian/compat
|
||||
Builds/containers/packaging/dpkg/debian/source/format
|
||||
Builds/containers/packaging/dpkg/debian/source/local-options
|
||||
Builds/containers/packaging/dpkg/debian/README.Debian
|
||||
Builds/containers/packaging/dpkg/debian/rippled.install
|
||||
Builds/containers/packaging/dpkg/debian/rippled.preinst
|
||||
Builds/containers/packaging/dpkg/debian/docs
|
||||
Builds/containers/packaging/dpkg/debian/control
|
||||
Builds/containers/packaging/dpkg/debian/rippled-reporting.dirs
|
||||
Builds/containers/packaging/dpkg/build_dpkg.sh
|
||||
Builds/containers/ubuntu-builder/Dockerfile
|
||||
Builds/containers/ubuntu-builder/ubuntu_setup.sh
|
||||
bin/getRippledInfo
|
||||
Builds/containers/shared/install_cmake.sh
|
||||
Builds/containers/shared/install_boost.sh
|
||||
Builds/containers/shared/update-rippled.sh
|
||||
Builds/containers/shared/update_sources.sh
|
||||
Builds/containers/shared/build_deps.sh
|
||||
Builds/containers/shared/rippled.service
|
||||
Builds/containers/shared/update_sources.sh
|
||||
Builds/containers/shared/update-rippled.sh
|
||||
Builds/containers/packaging/dpkg/build_dpkg.sh
|
||||
Builds/containers/packaging/dpkg/debian/README.Debian
|
||||
Builds/containers/packaging/dpkg/debian/conffiles
|
||||
Builds/containers/packaging/dpkg/debian/control
|
||||
Builds/containers/packaging/dpkg/debian/copyright
|
||||
Builds/containers/packaging/dpkg/debian/dirs
|
||||
Builds/containers/packaging/dpkg/debian/docs
|
||||
Builds/containers/packaging/dpkg/debian/rippled-dev.install
|
||||
Builds/containers/packaging/dpkg/debian/rippled.install
|
||||
Builds/containers/packaging/dpkg/debian/rippled.links
|
||||
Builds/containers/packaging/dpkg/debian/rippled.postinst
|
||||
Builds/containers/packaging/dpkg/debian/rippled.postrm
|
||||
Builds/containers/packaging/dpkg/debian/rippled.preinst
|
||||
Builds/containers/packaging/dpkg/debian/rippled.prerm
|
||||
Builds/containers/packaging/dpkg/debian/rules
|
||||
bin/getRippledInfo
|
||||
Builds/containers/shared/rippled-reporting.service
|
||||
Builds/containers/shared/rippled-logrotate
|
||||
Builds/containers/shared/update-rippled-cron
|
||||
)
|
||||
exclude_from_default (dpkg_container)
|
||||
add_custom_target (dpkg
|
||||
@@ -187,4 +202,3 @@ if (is_root_project)
|
||||
message (STATUS "docker NOT found -- won't be able to build containers for packaging")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
|
||||
@@ -39,14 +39,14 @@ endif ()
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
|
||||
set (is_clang TRUE)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND
|
||||
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
|
||||
message (FATAL_ERROR "This project requires clang 7 or later")
|
||||
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
|
||||
message (FATAL_ERROR "This project requires clang 8 or later")
|
||||
endif ()
|
||||
# TODO min AppleClang version check ?
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set (is_gcc TRUE)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
|
||||
message (FATAL_ERROR "This project requires GCC 7 or later")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
|
||||
message (FATAL_ERROR "This project requires GCC 8 or later")
|
||||
endif ()
|
||||
endif ()
|
||||
if (CMAKE_GENERATOR STREQUAL "Xcode")
|
||||
@@ -72,10 +72,8 @@ if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
|
||||
"directory from ${CMAKE_CURRENT_SOURCE_DIR} and try building in a separate directory.")
|
||||
endif ()
|
||||
|
||||
if ("${CMAKE_GENERATOR}" MATCHES "Visual Studio" AND
|
||||
NOT ("${CMAKE_GENERATOR}" MATCHES .*Win64.*))
|
||||
message (FATAL_ERROR
|
||||
"Visual Studio 32-bit build is not supported. Use -G\"${CMAKE_GENERATOR} Win64\"")
|
||||
if (MSVC AND CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")
|
||||
message (FATAL_ERROR "Visual Studio 32-bit build is not supported.")
|
||||
endif ()
|
||||
|
||||
if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
|
||||
@@ -10,13 +10,8 @@ option (tests "Build tests" ON)
|
||||
|
||||
option (unity "Creates a build using UNITY support in cmake. This is the default" ON)
|
||||
if (unity)
|
||||
if (CMAKE_VERSION VERSION_LESS 3.16)
|
||||
message (WARNING "unity option only supported for with cmake 3.16+ (please upgrade)")
|
||||
set (unity OFF CACHE BOOL "unity only available for cmake 3.16+" FORCE)
|
||||
else ()
|
||||
if (NOT is_ci)
|
||||
set (CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif ()
|
||||
if (NOT is_ci)
|
||||
set (CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif ()
|
||||
endif ()
|
||||
if (is_gcc OR is_clang)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF)
|
||||
|
||||
if (validator_keys AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
|
||||
if (validator_keys)
|
||||
git_branch (current_branch)
|
||||
# default to tracking VK develop branch unless we are on master/release
|
||||
if (NOT (current_branch STREQUAL "master" OR current_branch STREQUAL "release"))
|
||||
@@ -20,5 +20,3 @@ if (validator_keys AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
|
||||
endif ()
|
||||
add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys)
|
||||
endif ()
|
||||
|
||||
|
||||
|
||||
@@ -969,7 +969,7 @@ function(_Boost_COMPONENT_DEPENDENCIES component _ret)
|
||||
set(_Boost_WAVE_DEPENDENCIES filesystem serialization thread chrono date_time atomic)
|
||||
set(_Boost_WSERIALIZATION_DEPENDENCIES serialization)
|
||||
endif()
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.71.0)
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.77.0)
|
||||
message(WARNING "New Boost version may have incorrect or missing dependencies and imported targets")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -125,7 +125,7 @@ if (local_libarchive)
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--target archive_static
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
|
||||
@@ -43,7 +43,7 @@ else()
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--target lz4_static
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
|
||||
@@ -8,35 +8,19 @@
|
||||
|
||||
if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build
|
||||
add_library (nudb INTERFACE)
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
|
||||
FetchContent_Declare(
|
||||
nudb_src
|
||||
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
|
||||
GIT_TAG 2.0.5
|
||||
)
|
||||
FetchContent_GetProperties(nudb_src)
|
||||
if(NOT nudb_src_POPULATED)
|
||||
message (STATUS "Pausing to download NuDB...")
|
||||
FetchContent_Populate(nudb_src)
|
||||
endif()
|
||||
else ()
|
||||
ExternalProject_Add (nudb_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
|
||||
GIT_TAG 2.0.5
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
ExternalProject_Get_Property (nudb_src SOURCE_DIR)
|
||||
set (nudb_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${nudb_src_SOURCE_DIR}/include)
|
||||
add_dependencies (nudb nudb_src)
|
||||
endif ()
|
||||
FetchContent_Declare(
|
||||
nudb_src
|
||||
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
|
||||
GIT_TAG 2.0.5
|
||||
)
|
||||
FetchContent_GetProperties(nudb_src)
|
||||
if(NOT nudb_src_POPULATED)
|
||||
message (STATUS "Pausing to download NuDB...")
|
||||
FetchContent_Populate(nudb_src)
|
||||
endif()
|
||||
|
||||
file(TO_CMAKE_PATH "${nudb_src_SOURCE_DIR}" nudb_src_SOURCE_DIR)
|
||||
# specify as system includes so as to avoid warnings
|
||||
# specify as system includes so as to avoid warnings
|
||||
target_include_directories (nudb SYSTEM INTERFACE ${nudb_src_SOURCE_DIR}/include)
|
||||
target_link_libraries (nudb
|
||||
INTERFACE
|
||||
|
||||
@@ -9,9 +9,21 @@ if (static)
|
||||
set (Protobuf_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
find_package (Protobuf 3.8)
|
||||
if (local_protobuf OR NOT Protobuf_FOUND)
|
||||
if (is_multiconfig)
|
||||
set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARIES})
|
||||
else ()
|
||||
string(TOUPPER ${CMAKE_BUILD_TYPE} upper_cmake_build_type)
|
||||
set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARY_${upper_cmake_build_type}})
|
||||
endif ()
|
||||
if (local_protobuf OR NOT (Protobuf_FOUND AND Protobuf_PROTOC_EXECUTABLE AND protobuf_protoc_lib))
|
||||
include (GNUInstallDirs)
|
||||
message (STATUS "using local protobuf build.")
|
||||
set(protobuf_reqs Protobuf_PROTOC_EXECUTABLE protobuf_protoc_lib)
|
||||
foreach(lib ${protobuf_reqs})
|
||||
if(NOT ${lib})
|
||||
message(STATUS "Couldn't find ${lib}")
|
||||
endif()
|
||||
endforeach()
|
||||
if (WIN32)
|
||||
# protobuf prepends lib even on windows
|
||||
set (pbuf_lib_pre "lib")
|
||||
@@ -53,7 +65,7 @@ if (local_protobuf OR NOT Protobuf_FOUND)
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
|
||||
|
||||
@@ -8,7 +8,7 @@ set_target_properties (rocksdb_lib
|
||||
|
||||
option (local_rocksdb "use local build of rocksdb." OFF)
|
||||
if (NOT local_rocksdb)
|
||||
find_package (RocksDB 6.7 QUIET CONFIG)
|
||||
find_package (RocksDB 6.27 QUIET CONFIG)
|
||||
if (TARGET RocksDB::rocksdb)
|
||||
message (STATUS "Found RocksDB using config.")
|
||||
get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION_DEBUG)
|
||||
@@ -40,7 +40,7 @@ if (NOT local_rocksdb)
|
||||
# TBD if there is some way to extract transitive deps..then:
|
||||
#set (RocksDB_USE_STATIC ON)
|
||||
else ()
|
||||
find_package (RocksDB 6.7 MODULE)
|
||||
find_package (RocksDB 6.27 MODULE)
|
||||
if (ROCKSDB_FOUND)
|
||||
if (RocksDB_LIBRARY_DEBUG)
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_DEBUG ${RocksDB_LIBRARY_DEBUG})
|
||||
@@ -60,7 +60,7 @@ if (local_rocksdb)
|
||||
ExternalProject_Add (rocksdb
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
|
||||
GIT_TAG v6.7.3
|
||||
GIT_TAG v6.27.3
|
||||
PATCH_COMMAND
|
||||
# only used by windows build
|
||||
${CMAKE_COMMAND} -E copy_if_different
|
||||
@@ -96,9 +96,13 @@ if (local_rocksdb)
|
||||
-Dlz4_FOUND=ON
|
||||
-USNAPPY_*
|
||||
-Usnappy_*
|
||||
-USnappy_*
|
||||
-Dsnappy_INCLUDE_DIRS=$<JOIN:$<TARGET_PROPERTY:snappy_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-Dsnappy_LIBRARIES=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-Dsnappy_FOUND=ON
|
||||
-DSnappy_INCLUDE_DIRS=$<JOIN:$<TARGET_PROPERTY:snappy_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-DSnappy_LIBRARIES=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-DSnappy_FOUND=ON
|
||||
-DWITH_MD_LIBRARY=OFF
|
||||
-DWITH_RUNTIME_DEBUG=$<IF:$<CONFIG:Debug>,ON,OFF>
|
||||
-DFAIL_ON_WARNINGS=OFF
|
||||
@@ -132,7 +136,7 @@ if (local_rocksdb)
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
|
||||
@@ -42,7 +42,7 @@ else()
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
|
||||
@@ -113,7 +113,7 @@ else()
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
|
||||
@@ -56,7 +56,7 @@ else()
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
|
||||
@@ -112,6 +112,8 @@ if(reporting)
|
||||
-DLIBUV_LIBARY=${BINARY_DIR}/libuv_a.a
|
||||
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
||||
-DCASS_BUILD_STATIC=ON
|
||||
-DCASS_BUILD_SHARED=OFF
|
||||
-DOPENSSL_ROOT_DIR=/opt/local/openssl
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${ep_lib_prefix}cassandra_static.a
|
||||
LOG_BUILD TRUE
|
||||
|
||||
@@ -9,41 +9,10 @@
|
||||
|
||||
find_package (date QUIET)
|
||||
if (NOT TARGET date::date)
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.14)
|
||||
FetchContent_Declare(
|
||||
hh_date_src
|
||||
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
|
||||
GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829
|
||||
)
|
||||
FetchContent_MakeAvailable(hh_date_src)
|
||||
else ()
|
||||
ExternalProject_Add (hh_date_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
|
||||
GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
ExternalProject_Get_Property (hh_date_src SOURCE_DIR)
|
||||
set (hh_date_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${hh_date_src_SOURCE_DIR}/include)
|
||||
add_library (date_interface INTERFACE)
|
||||
add_library (date::date ALIAS date_interface)
|
||||
add_dependencies (date_interface hh_date_src)
|
||||
file (TO_CMAKE_PATH "${hh_date_src_SOURCE_DIR}" hh_date_src_SOURCE_DIR)
|
||||
target_include_directories (date_interface
|
||||
SYSTEM INTERFACE
|
||||
$<BUILD_INTERFACE:${hh_date_src_SOURCE_DIR}/include>
|
||||
$<INSTALL_INTERFACE:include>)
|
||||
install (
|
||||
FILES
|
||||
${hh_date_src_SOURCE_DIR}/include/date/date.h
|
||||
DESTINATION include/date)
|
||||
install (TARGETS date_interface
|
||||
EXPORT RippleExports
|
||||
INCLUDES DESTINATION include)
|
||||
endif ()
|
||||
FetchContent_Declare(
|
||||
hh_date_src
|
||||
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
|
||||
GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829
|
||||
)
|
||||
FetchContent_MakeAvailable(hh_date_src)
|
||||
endif ()
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ else ()
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
|
||||
@@ -169,7 +169,7 @@ else ()
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
|
||||
@@ -237,7 +237,7 @@ else ()
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
|
||||
@@ -1,4 +1,71 @@
|
||||
#include "build_version.h"
|
||||
const char* rocksdb_build_git_sha = "rocksdb_build_git_sha: N/A";
|
||||
const char* rocksdb_build_git_date = "rocksdb_build_git_date: N/A";
|
||||
const char* rocksdb_build_compile_date = "N/A";
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "rocksdb/version.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
// The build script may replace these values with real values based
|
||||
// on whether or not GIT is available and the platform settings
|
||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:@GIT_SHA@";
|
||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:@GIT_TAG@";
|
||||
#define HAS_GIT_CHANGES @GIT_MOD@
|
||||
#if HAS_GIT_CHANGES == 0
|
||||
// If HAS_GIT_CHANGES is 0, the GIT date is used.
|
||||
// Use the time the branch/tag was last modified
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:@GIT_DATE@";
|
||||
#else
|
||||
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
|
||||
// Use the time the build was created.
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:@BUILD_DATE@";
|
||||
#endif
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||
size_t colon = name.find(":");
|
||||
if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) {
|
||||
// If we found a "@:", then this property was a build-time substitution that failed. Skip it
|
||||
size_t at = name.find("@", colon);
|
||||
if (at != colon + 1) {
|
||||
// Everything before the colon is the name, after is the value
|
||||
(*props)[name.substr(0, colon)] = name.substr(colon + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static std::unordered_map<std::string, std::string>* LoadPropertiesSet() {
|
||||
auto * properties = new std::unordered_map<std::string, std::string>();
|
||||
AddProperty(properties, rocksdb_build_git_sha);
|
||||
AddProperty(properties, rocksdb_build_git_tag);
|
||||
AddProperty(properties, rocksdb_build_date);
|
||||
return properties;
|
||||
}
|
||||
|
||||
const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
||||
static std::unique_ptr<std::unordered_map<std::string, std::string>> props(LoadPropertiesSet());
|
||||
return *props;
|
||||
}
|
||||
|
||||
std::string GetRocksVersionAsString(bool with_patch) {
|
||||
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
|
||||
if (with_patch) {
|
||||
return version + "." + ToString(ROCKSDB_PATCH);
|
||||
} else {
|
||||
return version;
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||
std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true);
|
||||
if (verbose) {
|
||||
for (const auto& it : GetRocksBuildProperties()) {
|
||||
info.append("\n ");
|
||||
info.append(it.first);
|
||||
info.append(": ");
|
||||
info.append(it.second);
|
||||
}
|
||||
}
|
||||
return info;
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ time cmake \
|
||||
-Dpackages_only=ON \
|
||||
-Dcontainer_label="${container_tag}" \
|
||||
-Dhave_package_container=ON \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=OFF \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
||||
-Dunity=OFF \
|
||||
-G Ninja ../..
|
||||
time cmake --build . --target ${pkgtype}
|
||||
time cmake --build . --target ${pkgtype} -- -v
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
set -ex
|
||||
set -e
|
||||
# used as a before/setup script for docker steps in gitlab-ci
|
||||
# expects to be run in standard alpine/dind image
|
||||
echo $(nproc)
|
||||
@@ -13,4 +13,3 @@ apk add \
|
||||
pip3 install awscli
|
||||
# list curdir contents to build log:
|
||||
ls -la
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# can be overridden by project or group variables as needed.
|
||||
variables:
|
||||
# these containers are built manually using the rippled
|
||||
# cmake build (container targets) and tagged/pushed so they
|
||||
# cmake build (container targets) and tagged/pushed so they
|
||||
# can be used here
|
||||
RPM_CONTAINER_TAG: "2020-02-10"
|
||||
RPM_CONTAINER_NAME: "rippled-rpm-builder"
|
||||
@@ -184,44 +184,36 @@ centos_7_smoketest:
|
||||
name: artifactory.ops.ripple.com/centos:7
|
||||
<<: *run_local_smoketest
|
||||
|
||||
fedora_29_smoketest:
|
||||
# TODO: Remove "allow_failure" when tests fixed
|
||||
rocky_8_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- rpm_build
|
||||
- rpm_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:29
|
||||
name: rockylinux/rockylinux:8
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_28_smoketest:
|
||||
fedora_34_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- rpm_build
|
||||
- rpm_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:28
|
||||
name: artifactory.ops.ripple.com/fedora:34
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_27_smoketest:
|
||||
fedora_35_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- rpm_build
|
||||
- rpm_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:27
|
||||
<<: *run_local_smoketest
|
||||
|
||||
## this one is not LTS, but we
|
||||
## get some extra coverage by including it
|
||||
## consider dropping it when 20.04 is ready
|
||||
ubuntu_20_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:20.04
|
||||
name: artifactory.ops.ripple.com/fedora:35
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
ubuntu_18_smoketest:
|
||||
stage: smoketest
|
||||
@@ -232,15 +224,26 @@ ubuntu_18_smoketest:
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
<<: *run_local_smoketest
|
||||
|
||||
ubuntu_16_smoketest:
|
||||
ubuntu_20_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:16.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:20.04
|
||||
<<: *run_local_smoketest
|
||||
|
||||
# TODO: remove "allow_failure" when 22.04 released in 4/2022...
|
||||
ubuntu_22_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:22.04
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
debian_9_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
@@ -250,6 +253,24 @@ debian_9_smoketest:
|
||||
name: artifactory.ops.ripple.com/debian:9
|
||||
<<: *run_local_smoketest
|
||||
|
||||
debian_10_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:10
|
||||
<<: *run_local_smoketest
|
||||
|
||||
debian_11_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:11
|
||||
<<: *run_local_smoketest
|
||||
|
||||
#########################################################################
|
||||
## ##
|
||||
## stage: verify_sig ##
|
||||
@@ -346,38 +367,53 @@ centos_7_verify_repo_test:
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
fedora_29_verify_repo_test:
|
||||
rocky_8_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:29
|
||||
name: rockylinux/rockylinux:8
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_28_verify_repo_test:
|
||||
fedora_34_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:28
|
||||
name: artifactory.ops.ripple.com/fedora:34
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_27_verify_repo_test:
|
||||
fedora_35_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:27
|
||||
name: artifactory.ops.ripple.com/fedora:35
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
ubuntu_18_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_20_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
@@ -391,29 +427,19 @@ ubuntu_20_verify_repo_test:
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_18_verify_repo_test:
|
||||
# TODO: remove "allow_failure" when 22.04 released in 4/2022...
|
||||
ubuntu_22_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DISTRO: "jammy"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_16_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "xenial"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:16.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:22.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
debian_9_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
@@ -427,6 +453,30 @@ debian_9_verify_repo_test:
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_10_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "buster"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:10
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_11_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "bullseye"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:11
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
#########################################################################
|
||||
## ##
|
||||
## stage: wait_approval_prod ##
|
||||
@@ -492,38 +542,53 @@ centos_7_verify_repo_prod:
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
fedora_29_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
rocky_8_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm"
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:29
|
||||
name: rockylinux/rockylinux:8
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_28_verify_repo_prod:
|
||||
fedora_34_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:28
|
||||
name: artifactory.ops.ripple.com/fedora:34
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_27_verify_repo_prod:
|
||||
fedora_35_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/fedora:27
|
||||
name: artifactory.ops.ripple.com/fedora:35
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
ubuntu_18_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_20_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
@@ -537,29 +602,19 @@ ubuntu_20_verify_repo_prod:
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_18_verify_repo_prod:
|
||||
# TODO: remove "allow_failure" when 22.04 released in 4/2022...
|
||||
ubuntu_22_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DISTRO: "jammy"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_16_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "xenial"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:16.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:22.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
debian_9_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
@@ -573,6 +628,30 @@ debian_9_verify_repo_prod:
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_10_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "buster"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:10
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_11_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "bullseye"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:11
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
#########################################################################
|
||||
## ##
|
||||
## stage: get_final_hashes ##
|
||||
@@ -622,5 +701,3 @@ build_ubuntu_container:
|
||||
script:
|
||||
- . ./Builds/containers/gitlab-ci/build_container.sh dpkg
|
||||
allow_failure: true
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
set -ex
|
||||
set -e
|
||||
action=$1
|
||||
filter=$2
|
||||
|
||||
@@ -15,15 +15,17 @@ cd build/dpkg/packages
|
||||
CURLARGS="-sk -X${action} -urippled:${ARTIFACTORY_DEPLOY_KEY_RIPPLED}"
|
||||
RIPPLED_PKG=$(ls rippled_*.deb)
|
||||
RIPPLED_DEV_PKG=$(ls rippled-dev_*.deb)
|
||||
RIPPLED_REPORTING_PKG=$(ls rippled-reporting_*.deb)
|
||||
RIPPLED_DBG_PKG=$(ls rippled-dbgsym_*.deb)
|
||||
RIPPLED_REPORTING_DBG_PKG=$(ls rippled-reporting-dbgsym_*.deb)
|
||||
# TODO - where to upload src tgz?
|
||||
RIPPLED_SRC=$(ls rippled_*.orig.tar.gz)
|
||||
DEB_MATRIX=";deb.component=${COMPONENT};deb.architecture=amd64"
|
||||
for dist in stretch buster xenial bionic disco focal ; do
|
||||
for dist in stretch buster bullseye bionic focal jammy; do
|
||||
DEB_MATRIX="${DEB_MATRIX};deb.distribution=${dist}"
|
||||
done
|
||||
echo "{ \"debs\": {" > "${TOPDIR}/files.info"
|
||||
for deb in ${RIPPLED_PKG} ${RIPPLED_DEV_PKG} ${RIPPLED_DBG_PKG} ; do
|
||||
for deb in ${RIPPLED_PKG} ${RIPPLED_DEV_PKG} ${RIPPLED_DBG_PKG} ${RIPPLED_REPORTING_PKG} ${RIPPLED_REPORTING_DBG_PKG}; do
|
||||
# first item doesn't get a comma separator
|
||||
if [ $deb != $RIPPLED_PKG ] ; then
|
||||
echo "," >> "${TOPDIR}/files.info"
|
||||
@@ -48,10 +50,11 @@ cd build/rpm/packages
|
||||
RIPPLED_PKG=$(ls rippled-[0-9]*.x86_64.rpm)
|
||||
RIPPLED_DEV_PKG=$(ls rippled-devel*.rpm)
|
||||
RIPPLED_DBG_PKG=$(ls rippled-debuginfo*.rpm)
|
||||
RIPPLED_REPORTING_PKG=$(ls rippled-reporting*.rpm)
|
||||
# TODO - where to upload src rpm ?
|
||||
RIPPLED_SRC=$(ls rippled-[0-9]*.src.rpm)
|
||||
echo "\"rpms\": {" >> "${TOPDIR}/files.info"
|
||||
for rpm in ${RIPPLED_PKG} ${RIPPLED_DEV_PKG} ${RIPPLED_DBG_PKG} ; do
|
||||
for rpm in ${RIPPLED_PKG} ${RIPPLED_DEV_PKG} ${RIPPLED_DBG_PKG} ${RIPPLED_REPORTING_PKG}; do
|
||||
# first item doesn't get a comma separator
|
||||
if [ $rpm != $RIPPLED_PKG ] ; then
|
||||
echo "," >> "${TOPDIR}/files.info"
|
||||
@@ -88,4 +91,3 @@ JSON
|
||||
)
|
||||
curl ${SLACK_NOTIFY_URL} --data-urlencode "${CONTENT}"
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
set -ex
|
||||
set -e
|
||||
install_from=$1
|
||||
use_private=${2:-0} # this option not currently needed by any CI scripts,
|
||||
# reserved for possible future use
|
||||
@@ -16,7 +16,7 @@ case ${ID} in
|
||||
ubuntu|debian)
|
||||
pkgtype="dpkg"
|
||||
;;
|
||||
fedora|centos|rhel|scientific)
|
||||
fedora|centos|rhel|scientific|rocky)
|
||||
pkgtype="rpm"
|
||||
;;
|
||||
*)
|
||||
@@ -51,7 +51,7 @@ if [ "${pkgtype}" = "dpkg" ] ; then
|
||||
elif [ "${install_from}" = "local" ] ; then
|
||||
# cached pkg install
|
||||
updateWithRetry
|
||||
apt-get -y install libprotobuf-dev libssl-dev
|
||||
apt-get -y install libprotobuf-dev libprotoc-dev protobuf-compiler libssl-dev
|
||||
rm -f build/dpkg/packages/rippled-dbgsym*.*
|
||||
dpkg --no-debsig -i build/dpkg/packages/*.deb
|
||||
else
|
||||
@@ -61,7 +61,11 @@ if [ "${pkgtype}" = "dpkg" ] ; then
|
||||
else
|
||||
yum -y update
|
||||
if [ "${install_from}" = "repo" ] ; then
|
||||
yum -y install yum-utils coreutils util-linux
|
||||
pkgs=("yum-utils coreutils util-linux")
|
||||
if [ "$ID" = "rocky" ]; then
|
||||
pkgs="${pkgs[@]/coreutils}"
|
||||
fi
|
||||
yum install -y $pkgs
|
||||
REPOFILE="/etc/yum.repos.d/artifactory.repo"
|
||||
echo "[Artifactory]" > ${REPOFILE}
|
||||
echo "name=Artifactory" >> ${REPOFILE}
|
||||
@@ -76,7 +80,12 @@ else
|
||||
yum -y install ${rpm_version_release}
|
||||
elif [ "${install_from}" = "local" ] ; then
|
||||
# cached pkg install
|
||||
yum install -y yum-utils openssl-static zlib-static
|
||||
pkgs=("yum-utils openssl-static zlib-static")
|
||||
if [ "$ID" = "rocky" ]; then
|
||||
sed -i 's/enabled=0/enabled=1/g' /etc/yum.repos.d/Rocky-PowerTools.repo
|
||||
pkgs="${pkgs[@]/openssl-static}"
|
||||
fi
|
||||
yum install -y $pkgs
|
||||
rm -f build/rpm/packages/rippled-debug*.rpm
|
||||
rm -f build/rpm/packages/*.src.rpm
|
||||
rpm -i build/rpm/packages/*.rpm
|
||||
@@ -95,5 +104,3 @@ fi
|
||||
# run unit tests
|
||||
/opt/ripple/bin/rippled --unittest --unittest-jobs $(nproc)
|
||||
/opt/ripple/bin/validator-keys --unittest
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
set -ex
|
||||
set -e
|
||||
docker login -u rippled \
|
||||
-p ${ARTIFACTORY_DEPLOY_KEY_RIPPLED} "${ARTIFACTORY_HUB}"
|
||||
# this gives us rippled_version :
|
||||
@@ -19,4 +19,3 @@ for label in ${rippled_version} latest ; do
|
||||
docker push \
|
||||
"${ARTIFACTORY_HUB}/${DPKG_CONTAINER_NAME}:${label}_${CI_COMMIT_REF_SLUG}"
|
||||
done
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ set -ex
|
||||
# make sure pkg source files are up to date with repo
|
||||
cd /opt/rippled_bld/pkg
|
||||
cp -fpru rippled/Builds/containers/packaging/dpkg/debian/. debian/
|
||||
cp -fpu rippled/Builds/containers/shared/rippled.service debian/
|
||||
cp -fpu rippled/Builds/containers/shared/rippled*.service debian/
|
||||
cp -fpu rippled/Builds/containers/shared/update_sources.sh .
|
||||
source update_sources.sh
|
||||
|
||||
@@ -52,14 +52,15 @@ rc=$?; if [[ $rc != 0 ]]; then
|
||||
error "error building dpkg"
|
||||
fi
|
||||
cd ..
|
||||
ls -latr
|
||||
|
||||
# copy artifacts
|
||||
cp rippled-dev_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR}
|
||||
cp rippled-reporting_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR}
|
||||
cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR}
|
||||
cp rippled_${RIPPLED_DPKG_FULL_VERSION}.dsc ${PKG_OUTDIR}
|
||||
# dbgsym suffix is ddeb under newer debuild, but just deb under earlier
|
||||
cp rippled-dbgsym_${RIPPLED_DPKG_FULL_VERSION}_amd64.* ${PKG_OUTDIR}
|
||||
cp rippled-reporting-dbgsym_${RIPPLED_DPKG_FULL_VERSION}_amd64.* ${PKG_OUTDIR}
|
||||
cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.changes ${PKG_OUTDIR}
|
||||
cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.build ${PKG_OUTDIR}
|
||||
cp rippled_${RIPPLED_DPKG_VERSION}.orig.tar.gz ${PKG_OUTDIR}
|
||||
@@ -81,15 +82,20 @@ DEB_SHA256=$(cat shasums | \
|
||||
grep "rippled_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1)
|
||||
DBG_SHA256=$(cat shasums | \
|
||||
grep "rippled-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1)
|
||||
REPORTING_DBG_SHA256=$(cat shasums | \
|
||||
grep "rippled-reporting-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1)
|
||||
DEV_SHA256=$(cat shasums | \
|
||||
grep "rippled-dev_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1)
|
||||
REPORTING_SHA256=$(cat shasums | \
|
||||
grep "rippled-reporting_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1)
|
||||
SRC_SHA256=$(cat shasums | \
|
||||
grep "rippled_${RIPPLED_DPKG_VERSION}.orig.tar.gz" | cut -d " " -f 1)
|
||||
echo "deb_sha256=${DEB_SHA256}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dbg_sha256=${DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dev_sha256=${DEV_SHA256}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "reporting_sha256=${REPORTING_SHA256}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "reporting_dbg_sha256=${REPORTING_DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "src_sha256=${SRC_SHA256}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "rippled_version=${RIPPLED_VERSION}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dpkg_version=${RIPPLED_DPKG_VERSION}" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dpkg_full_version=${RIPPLED_DPKG_FULL_VERSION}" >> ${PKG_OUTDIR}/build_vars
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
9
|
||||
10
|
||||
|
||||
@@ -12,10 +12,16 @@ Multi-Arch: foreign
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||
Description: rippled daemon
|
||||
|
||||
Package: rippled-reporting
|
||||
Architecture: any
|
||||
Multi-Arch: foreign
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||
Description: rippled reporting daemon
|
||||
|
||||
Package: rippled-dev
|
||||
Section: devel
|
||||
Recommends: rippled (= ${binary:Version})
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, libprotobuf-dev, libssl-dev
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, libprotobuf-dev, libprotoc-dev, protobuf-compiler
|
||||
Description: development files for applications using xrpl core library (serialize + sign)
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
/var/log/rippled-reporting/
|
||||
/var/lib/rippled-reporting/
|
||||
/etc/systemd/system/rippled-reporting.service.d/
|
||||
@@ -0,0 +1,8 @@
|
||||
bld/rippled-reporting/rippled-reporting opt/rippled-reporting/bin
|
||||
cfg/rippled-reporting.cfg opt/rippled-reporting/etc
|
||||
debian/tmp/opt/rippled-reporting/etc/validators.txt opt/rippled-reporting/etc
|
||||
|
||||
opt/rippled-reporting/bin/update-rippled-reporting.sh
|
||||
opt/rippled-reporting/bin/getRippledReportingInfo
|
||||
opt/rippled-reporting/etc/update-rippled-reporting-cron
|
||||
etc/logrotate.d/rippled-reporting
|
||||
@@ -0,0 +1,3 @@
|
||||
opt/rippled-reporting/etc/rippled-reporting.cfg etc/opt/rippled-reporting/rippled-reporting.cfg
|
||||
opt/rippled-reporting/etc/validators.txt etc/opt/rippled-reporting/validators.txt
|
||||
opt/rippled-reporting/bin/rippled-reporting usr/local/bin/rippled-reporting
|
||||
@@ -0,0 +1,33 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
USER_NAME=rippled-reporting
|
||||
GROUP_NAME=rippled-reporting
|
||||
case "$1" in
|
||||
configure)
|
||||
id -u $USER_NAME >/dev/null 2>&1 || \
|
||||
adduser --system --quiet \
|
||||
--home /nonexistent --no-create-home \
|
||||
--disabled-password \
|
||||
--group "$GROUP_NAME"
|
||||
chown -R $USER_NAME:$GROUP_NAME /var/log/rippled-reporting/
|
||||
chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled-reporting/
|
||||
chmod 755 /var/log/rippled-reporting/
|
||||
chmod 755 /var/lib/rippled-reporting/
|
||||
chown -R $USER_NAME:$GROUP_NAME /opt/rippled-reporting
|
||||
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
@@ -1,3 +1,2 @@
|
||||
/opt/ripple/etc/rippled.cfg
|
||||
/opt/ripple/etc/validators.txt
|
||||
/etc/logrotate.d/rippled
|
||||
@@ -5,4 +5,4 @@ opt/ripple/bin/getRippledInfo
|
||||
opt/ripple/etc/rippled.cfg
|
||||
opt/ripple/etc/validators.txt
|
||||
opt/ripple/etc/update-rippled-cron
|
||||
etc/logrotate.d/rippled
|
||||
etc/logrotate.d/rippled
|
||||
@@ -16,28 +16,46 @@ override_dh_systemd_start:
|
||||
|
||||
override_dh_auto_configure:
|
||||
env
|
||||
rm -rf bld
|
||||
mkdir -p bld
|
||||
cd bld && \
|
||||
cmake .. -G Ninja \
|
||||
rm -rf bld && mkdir -p bld/rippled
|
||||
cd bld/rippled && \
|
||||
cmake ../.. -G Ninja \
|
||||
-DCMAKE_INSTALL_PREFIX=/opt/ripple \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-Dstatic=ON \
|
||||
-Dunity=OFF \
|
||||
-Dvalidator_keys=ON \
|
||||
-Dunity=OFF \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=OFF
|
||||
|
||||
|
||||
cmake -S . \
|
||||
-B bld/rippled-reporting \
|
||||
-G Ninja \
|
||||
-DCMAKE_INSTALL_PREFIX=/opt/rippled-reporting \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-Dstatic=ON \
|
||||
-Dunity=OFF \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=OFF \
|
||||
-Dreporting=ON
|
||||
|
||||
override_dh_auto_build:
|
||||
cd bld && \
|
||||
cmake --build . --target rippled --target validator-keys --parallel
|
||||
cmake --build bld/rippled --target rippled --target validator-keys --parallel
|
||||
cmake --build bld/rippled-reporting --target rippled --parallel
|
||||
|
||||
override_dh_auto_install:
|
||||
cd bld && DESTDIR=../debian/tmp cmake --build . --target install
|
||||
install -D bld/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys
|
||||
cmake --install bld/rippled --prefix debian/tmp/opt/ripple
|
||||
install -D bld/rippled/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys
|
||||
install -D Builds/containers/shared/update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh
|
||||
install -D bin/getRippledInfo debian/tmp/opt/ripple/bin/getRippledInfo
|
||||
install -D Builds/containers/shared/update-rippled-cron debian/tmp/opt/ripple/etc/update-rippled-cron
|
||||
install -D Builds/containers/shared/rippled-logrotate debian/tmp/etc/logrotate.d/rippled
|
||||
rm -rf debian/tmp/opt/ripple/lib64/cmake/date
|
||||
rm -rf bld
|
||||
rm -rf bld_vl
|
||||
|
||||
mkdir -p debian/tmp/opt/rippled-reporting/etc
|
||||
cp cfg/validators-example.txt debian/tmp/opt/rippled-reporting/etc/validators.txt
|
||||
install -D bld/rippled/validator-keys/validator-keys debian/tmp/opt/rippled-reporting/bin/validator-keys
|
||||
|
||||
sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled.sh > debian/tmp/opt/rippled-reporting/bin/update-rippled-reporting.sh
|
||||
sed -E 's/rippled?/rippled-reporting/g' bin/getRippledInfo > debian/tmp/opt/rippled-reporting/bin/getRippledReportingInfo
|
||||
sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled-cron > debian/tmp/opt/rippled-reporting/etc/update-rippled-reporting-cron
|
||||
sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/rippled-logrotate > debian/tmp/etc/logrotate.d/rippled-reporting
|
||||
@@ -0,0 +1 @@
|
||||
enable rippled-reporting.service
|
||||
@@ -30,8 +30,8 @@ fi
|
||||
|
||||
cd /opt/rippled_bld/pkg/rippled
|
||||
if [[ -n $(git status --porcelain) ]]; then
|
||||
git status
|
||||
error "Unstaged changes in this repo - please commit first"
|
||||
git status
|
||||
error "Unstaged changes in this repo - please commit first"
|
||||
fi
|
||||
git archive --format tar.gz --prefix rippled/ -o ../rpmbuild/SOURCES/rippled.tar.gz HEAD
|
||||
# TODO include validator-keys sources
|
||||
@@ -54,18 +54,22 @@ cp ./rpmbuild/SRPMS/* ${PKG_OUTDIR}
|
||||
RPM_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm 2>/dev/null)
|
||||
DBG_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-debuginfo*.rpm 2>/dev/null)
|
||||
DEV_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-devel*.rpm 2>/dev/null)
|
||||
REP_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-reporting*.rpm 2>/dev/null)
|
||||
SRC_MD5SUM=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/SRPMS/*.rpm 2>/dev/null)
|
||||
|
||||
RPM_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm | awk '{ print $1}')"
|
||||
DBG_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-debuginfo*.rpm | awk '{ print $1}')"
|
||||
REP_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-reporting*.rpm | awk '{ print $1}')"
|
||||
DEV_SHA256="$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-devel*.rpm | awk '{ print $1}')"
|
||||
SRC_SHA256="$(sha256sum ./rpmbuild/SRPMS/*.rpm | awk '{ print $1}')"
|
||||
|
||||
echo "rpm_md5sum=$RPM_MD5SUM" > ${PKG_OUTDIR}/build_vars
|
||||
echo "rep_md5sum=$REP_MD5SUM" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dbg_md5sum=$DBG_MD5SUM" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dev_md5sum=$DEV_MD5SUM" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "src_md5sum=$SRC_MD5SUM" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "rpm_sha256=$RPM_SHA256" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "rep_sha256=$REP_SHA256" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dbg_sha256=$DBG_SHA256" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "dev_sha256=$DEV_SHA256" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "src_sha256=$SRC_SHA256" >> ${PKG_OUTDIR}/build_vars
|
||||
@@ -73,4 +77,3 @@ echo "rippled_version=$RIPPLED_VERSION" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "rpm_version=$RIPPLED_RPM_VERSION" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "rpm_file_name=$tar_file" >> ${PKG_OUTDIR}/build_vars
|
||||
echo "rpm_version_release=$RPM_VERSION_RELEASE" >> ${PKG_OUTDIR}/build_vars
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
%define rpm_release %(echo $RPM_RELEASE)
|
||||
%define rpm_patch %(echo $RPM_PATCH)
|
||||
%define _prefix /opt/ripple
|
||||
|
||||
Name: rippled
|
||||
# Dashes in Version extensions must be converted to underscores
|
||||
Version: %{rippled_version}
|
||||
@@ -20,34 +21,46 @@ rippled
|
||||
%package devel
|
||||
Summary: Files for development of applications using xrpl core library
|
||||
Group: Development/Libraries
|
||||
Requires: openssl-static, zlib-static
|
||||
Requires: zlib-static
|
||||
|
||||
%description devel
|
||||
core library for development of standalone applications that sign transactions.
|
||||
|
||||
%package reporting
|
||||
Summary: Reporting Server for rippled
|
||||
|
||||
%description reporting
|
||||
History server for XRP Ledger
|
||||
|
||||
%prep
|
||||
%setup -c -n rippled
|
||||
|
||||
%build
|
||||
cd rippled
|
||||
mkdir -p bld.release
|
||||
cd bld.release
|
||||
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dstatic=true -Dunity=OFF -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON
|
||||
cmake --build . --parallel --target rippled --target validator-keys
|
||||
mkdir -p bld.rippled
|
||||
pushd bld.rippled
|
||||
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON
|
||||
cmake --build . --parallel $(nproc) --target rippled --target validator-keys
|
||||
popd
|
||||
|
||||
mkdir -p bld.rippled-reporting
|
||||
cd bld.rippled-reporting
|
||||
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix}-reporting -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dreporting=ON
|
||||
cmake --build . --parallel $(nproc) --target rippled
|
||||
|
||||
%pre
|
||||
test -e /etc/pki/tls || { mkdir -p /etc/pki; ln -s /usr/lib/ssl /etc/pki/tls; }
|
||||
|
||||
%install
|
||||
rm -rf $RPM_BUILD_ROOT
|
||||
DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.release --target install
|
||||
DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.rippled --target install -- -v
|
||||
rm -rf ${RPM_BUILD_ROOT}/%{_prefix}/lib64/cmake/date
|
||||
install -d ${RPM_BUILD_ROOT}/etc/opt/ripple
|
||||
install -d ${RPM_BUILD_ROOT}/usr/local/bin
|
||||
ln -s %{_prefix}/etc/rippled.cfg ${RPM_BUILD_ROOT}/etc/opt/ripple/rippled.cfg
|
||||
ln -s %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/ripple/validators.txt
|
||||
ln -s %{_prefix}/bin/rippled ${RPM_BUILD_ROOT}/usr/local/bin/rippled
|
||||
install -D rippled/bld.release/validator-keys/validator-keys ${RPM_BUILD_ROOT}%{_bindir}/validator-keys
|
||||
install -D rippled/bld.rippled/validator-keys/validator-keys ${RPM_BUILD_ROOT}%{_bindir}/validator-keys
|
||||
install -D ./rippled/Builds/containers/shared/rippled.service ${RPM_BUILD_ROOT}/usr/lib/systemd/system/rippled.service
|
||||
install -D ./rippled/Builds/containers/packaging/rpm/50-rippled.preset ${RPM_BUILD_ROOT}/usr/lib/systemd/system-preset/50-rippled.preset
|
||||
install -D ./rippled/Builds/containers/shared/update-rippled.sh ${RPM_BUILD_ROOT}%{_bindir}/update-rippled.sh
|
||||
@@ -57,7 +70,27 @@ install -D ./rippled/Builds/containers/shared/rippled-logrotate ${RPM_BUILD_ROOT
|
||||
install -d $RPM_BUILD_ROOT/var/log/rippled
|
||||
install -d $RPM_BUILD_ROOT/var/lib/rippled
|
||||
|
||||
# reporting mode
|
||||
%define _prefix /opt/rippled-reporting
|
||||
mkdir -p ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/
|
||||
install -D rippled/bld.rippled-reporting/rippled-reporting ${RPM_BUILD_ROOT}%{_bindir}/rippled-reporting
|
||||
install -D ./rippled/cfg/rippled-reporting.cfg ${RPM_BUILD_ROOT}%{_prefix}/etc/rippled-reporting.cfg
|
||||
install -D ./rippled/cfg/validators-example.txt ${RPM_BUILD_ROOT}%{_prefix}/etc/validators.txt
|
||||
install -D ./rippled/Builds/containers/packaging/rpm/50-rippled-reporting.preset ${RPM_BUILD_ROOT}/usr/lib/systemd/system-preset/50-rippled-reporting.preset
|
||||
ln -s %{_prefix}/bin/rippled-reporting ${RPM_BUILD_ROOT}/usr/local/bin/rippled-reporting
|
||||
ln -s %{_prefix}/etc/rippled-reporting.cfg ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/rippled-reporting.cfg
|
||||
ln -s %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/rippled-reporting/validators.txt
|
||||
install -d $RPM_BUILD_ROOT/var/log/rippled-reporting
|
||||
install -d $RPM_BUILD_ROOT/var/lib/rippled-reporting
|
||||
install -D ./rippled/Builds/containers/shared/rippled-reporting.service ${RPM_BUILD_ROOT}/usr/lib/systemd/system/rippled-reporting.service
|
||||
sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/update-rippled.sh > ${RPM_BUILD_ROOT}%{_bindir}/update-rippled-reporting.sh
|
||||
sed -E 's/rippled?/rippled-reporting/g' ./rippled/bin/getRippledInfo > ${RPM_BUILD_ROOT}%{_bindir}/getRippledReportingInfo
|
||||
sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/update-rippled-cron > ${RPM_BUILD_ROOT}%{_prefix}/etc/update-rippled-reporting-cron
|
||||
sed -E 's/rippled?/rippled-reporting/g' ./rippled/Builds/containers/shared/rippled-logrotate > ${RPM_BUILD_ROOT}/etc/logrotate.d/rippled-reporting
|
||||
|
||||
|
||||
%post
|
||||
%define _prefix /opt/ripple
|
||||
USER_NAME=rippled
|
||||
GROUP_NAME=rippled
|
||||
|
||||
@@ -75,7 +108,25 @@ chmod 644 %{_prefix}/etc/update-rippled-cron
|
||||
chmod 644 /etc/logrotate.d/rippled
|
||||
chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron
|
||||
|
||||
%post reporting
|
||||
%define _prefix /opt/rippled-reporting
|
||||
USER_NAME=rippled-reporting
|
||||
GROUP_NAME=rippled-reporting
|
||||
|
||||
getent passwd $USER_NAME &>/dev/null || useradd -r $USER_NAME
|
||||
getent group $GROUP_NAME &>/dev/null || groupadd $GROUP_NAME
|
||||
|
||||
chown -R $USER_NAME:$GROUP_NAME /var/log/rippled-reporting/
|
||||
chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled-reporting/
|
||||
chown -R $USER_NAME:$GROUP_NAME %{_prefix}/
|
||||
|
||||
chmod 755 /var/log/rippled-reporting/
|
||||
chmod 755 /var/lib/rippled-reporting/
|
||||
chmod -x /usr/lib/systemd/system/rippled-reporting.service
|
||||
|
||||
|
||||
%files
|
||||
%define _prefix /opt/ripple
|
||||
%doc rippled/README.md rippled/LICENSE.md
|
||||
%{_bindir}/rippled
|
||||
/usr/local/bin/rippled
|
||||
@@ -98,6 +149,25 @@ chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron
|
||||
%{_prefix}/lib/*.a
|
||||
%{_prefix}/lib/cmake/ripple
|
||||
|
||||
%files reporting
|
||||
%define _prefix /opt/rippled-reporting
|
||||
%doc rippled/README.md rippled/LICENSE.md
|
||||
|
||||
%{_bindir}/rippled-reporting
|
||||
/usr/local/bin/rippled-reporting
|
||||
%config(noreplace) /etc/opt/rippled-reporting/rippled-reporting.cfg
|
||||
%config(noreplace) %{_prefix}/etc/rippled-reporting.cfg
|
||||
%config(noreplace) %{_prefix}/etc/validators.txt
|
||||
%config(noreplace) /etc/opt/rippled-reporting/validators.txt
|
||||
%config(noreplace) /usr/lib/systemd/system/rippled-reporting.service
|
||||
%config(noreplace) /usr/lib/systemd/system-preset/50-rippled-reporting.preset
|
||||
%dir /var/log/rippled-reporting/
|
||||
%dir /var/lib/rippled-reporting/
|
||||
%{_bindir}/update-rippled-reporting.sh
|
||||
%{_bindir}/getRippledReportingInfo
|
||||
%{_prefix}/etc/update-rippled-reporting-cron
|
||||
%config(noreplace) /etc/logrotate.d/rippled-reporting
|
||||
|
||||
%changelog
|
||||
* Wed Aug 28 2019 Mike Ellery <mellery451@gmail.com>
|
||||
- Switch to subproject build for validator-keys
|
||||
|
||||
@@ -30,7 +30,7 @@ cd openssl-${OPENSSL_VER}
|
||||
SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\")
|
||||
./config -fPIC --prefix=/opt/local/openssl --openssldir=${SSLDIR} zlib shared
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
make install >> make_output.txt 2>&1
|
||||
cd ..
|
||||
rm -f openssl-${OPENSSL_VER}.tar.gz
|
||||
rm -rf openssl-${OPENSSL_VER}
|
||||
@@ -43,7 +43,7 @@ cd libarchive-3.4.1
|
||||
mkdir _bld && cd _bld
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
make install >> make_output.txt 2>&1
|
||||
cd ../..
|
||||
rm -f libarchive-3.4.1.tar.gz
|
||||
rm -rf libarchive-3.4.1
|
||||
@@ -55,7 +55,7 @@ cd protobuf-3.10.1
|
||||
./autogen.sh
|
||||
./configure
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
make install >> make_output.txt 2>&1
|
||||
ldconfig
|
||||
cd ..
|
||||
rm -f protobuf-all-3.10.1.tar.gz
|
||||
@@ -78,7 +78,7 @@ cmake \
|
||||
-DCARES_BUILD_CONTAINER_TESTS=OFF \
|
||||
..
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
make install >> make_output.txt 2>&1
|
||||
cd ../..
|
||||
rm -f c-ares-1.15.0.tar.gz
|
||||
rm -rf c-ares-1.15.0
|
||||
@@ -98,7 +98,7 @@ cmake \
|
||||
-DProtobuf_USE_STATIC_LIBS=ON \
|
||||
..
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
make install >> make_output.txt 2>&1
|
||||
cd ../..
|
||||
rm -f xf v1.25.0.tar.gz
|
||||
rm -rf grpc-1.25.0
|
||||
@@ -115,7 +115,7 @@ if [ "${CI_USE}" = true ] ; then
|
||||
cd build
|
||||
cmake -G "Unix Makefiles" ..
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
make install >> make_output.txt 2>&1
|
||||
cd ../..
|
||||
rm -f Release_1_8_16.tar.gz
|
||||
rm -rf doxygen-Release_1_8_16
|
||||
@@ -136,8 +136,8 @@ if [ "${CI_USE}" = true ] ; then
|
||||
tar xf ccache-3.7.6.tar.gz
|
||||
cd ccache-3.7.6
|
||||
./configure --prefix=/usr/local
|
||||
make
|
||||
make install
|
||||
make >> make_output.txt 2>&1
|
||||
make install >> make_output.txt 2>&1
|
||||
cd ..
|
||||
rm -f ccache-3.7.6.tar.gz
|
||||
rm -rf ccache-3.7.6
|
||||
|
||||
15
Builds/containers/shared/rippled-reporting.service
Normal file
15
Builds/containers/shared/rippled-reporting.service
Normal file
@@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=Ripple Daemon
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/opt/rippled-reporting/bin/rippled-reporting --silent --conf /etc/opt/rippled-reporting/rippled-reporting.cfg
|
||||
Restart=on-failure
|
||||
User=rippled-reporting
|
||||
Group=rippled-reporting
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -36,12 +36,13 @@ listed later.
|
||||
| 07 | ripple/shamap ripple/overlay
|
||||
| 08 | ripple/app
|
||||
| 09 | ripple/rpc
|
||||
| 10 | test/jtx test/beast test/csf
|
||||
| 11 | test/unit_test
|
||||
| 12 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay
|
||||
| 13 | test
|
||||
| 14 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore
|
||||
| 15 | test/rpc test/app
|
||||
| 10 | ripple/perflog
|
||||
| 11 | test/jtx test/beast test/csf
|
||||
| 12 | test/unit_test
|
||||
| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay
|
||||
| 14 | test
|
||||
| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore
|
||||
| 16 | test/rpc test/app
|
||||
|
||||
(Note that `test` levelization is *much* less important and *much* less
|
||||
strictly enforced than `ripple` levelization, other than the requirement
|
||||
|
||||
@@ -14,7 +14,7 @@ Loop: ripple.app ripple.overlay
|
||||
ripple.overlay ~= ripple.app
|
||||
|
||||
Loop: ripple.app ripple.peerfinder
|
||||
ripple.peerfinder ~= ripple.app
|
||||
ripple.app > ripple.peerfinder
|
||||
|
||||
Loop: ripple.app ripple.rpc
|
||||
ripple.rpc > ripple.app
|
||||
@@ -28,15 +28,9 @@ Loop: ripple.basics ripple.core
|
||||
Loop: ripple.basics ripple.json
|
||||
ripple.json ~= ripple.basics
|
||||
|
||||
Loop: ripple.basics ripple.nodestore
|
||||
ripple.nodestore > ripple.basics
|
||||
|
||||
Loop: ripple.basics ripple.protocol
|
||||
ripple.protocol > ripple.basics
|
||||
|
||||
Loop: ripple.basics ripple.rpc
|
||||
ripple.rpc > ripple.basics
|
||||
|
||||
Loop: ripple.core ripple.net
|
||||
ripple.net > ripple.core
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ ripple.net > ripple.beast
|
||||
ripple.net > ripple.json
|
||||
ripple.net > ripple.protocol
|
||||
ripple.net > ripple.resource
|
||||
ripple.nodestore > ripple.basics
|
||||
ripple.nodestore > ripple.beast
|
||||
ripple.nodestore > ripple.core
|
||||
ripple.nodestore > ripple.json
|
||||
@@ -46,6 +47,13 @@ ripple.peerfinder > ripple.basics
|
||||
ripple.peerfinder > ripple.beast
|
||||
ripple.peerfinder > ripple.core
|
||||
ripple.peerfinder > ripple.protocol
|
||||
ripple.perflog > ripple.basics
|
||||
ripple.perflog > ripple.beast
|
||||
ripple.perflog > ripple.core
|
||||
ripple.perflog > ripple.json
|
||||
ripple.perflog > ripple.nodestore
|
||||
ripple.perflog > ripple.protocol
|
||||
ripple.perflog > ripple.rpc
|
||||
ripple.protocol > ripple.beast
|
||||
ripple.protocol > ripple.crypto
|
||||
ripple.protocol > ripple.json
|
||||
@@ -53,6 +61,7 @@ ripple.resource > ripple.basics
|
||||
ripple.resource > ripple.beast
|
||||
ripple.resource > ripple.json
|
||||
ripple.resource > ripple.protocol
|
||||
ripple.rpc > ripple.basics
|
||||
ripple.rpc > ripple.beast
|
||||
ripple.rpc > ripple.core
|
||||
ripple.rpc > ripple.crypto
|
||||
|
||||
@@ -19,7 +19,7 @@ Use `apt-get` to install the dependencies provided by the distribution
|
||||
|
||||
```
|
||||
$ apt-get update
|
||||
$ apt-get install -y gcc g++ wget git cmake pkg-config protobuf-compiler libprotobuf-dev libssl-dev
|
||||
$ apt-get install -y gcc g++ wget git cmake pkg-config libprotoc-dev protobuf-compiler libprotobuf-dev libssl-dev
|
||||
```
|
||||
|
||||
To build the software in reporting mode, install these additional dependencies:
|
||||
@@ -239,3 +239,32 @@ change the `/opt/local` module path above to match your chosen installation pref
|
||||
`rippled` builds a set of unit tests into the server executable. To run these unit
|
||||
tests after building, pass the `--unittest` option to the compiled `rippled`
|
||||
executable. The executable will exit with summary info after running the unit tests.
|
||||
|
||||
## Workaround for a compile error in soci
|
||||
|
||||
Compilation errors have been observed with Apple Clang 13.1.6+ and soci v4.x. soci compiles with the `-Werror` flag which causes warnings to be treated as errors. These warnings pertain to style (not correctness). However, they cause the cmake process to fail.
|
||||
|
||||
Here's an example of how this looks:
|
||||
```
|
||||
.../rippled/.nih_c/unix_makefiles/AppleClang_13.1.6.13160021/Debug/src/soci/src/core/session.cpp:450:66: note: in instantiation of function template specialization 'soci::use<std::string>' requested here
|
||||
return prepare << backEnd_->get_column_descriptions_query(), use(table_name, "t");
|
||||
^
|
||||
1 error generated.
|
||||
```
|
||||
|
||||
Please apply the below patch (courtesy of Scott Determan) to remove these errors. `.nih_c/unix_makefiles/AppleClang_13.1.6.13160021/Debug/src/soci/cmake/SociConfig.cmake` file needs to be edited. This file is an example for Mac OS and it might be slightly different for other OS/Architectures.
|
||||
|
||||
```
|
||||
diff --git a/cmake/SociConfig.cmake b/cmake/SociConfig.cmake
|
||||
index 97d907e4..11bcd1f3 100644
|
||||
--- a/cmake/SociConfig.cmake
|
||||
+++ b/cmake/SociConfig.cmake
|
||||
@@ -58,8 +58,8 @@ if (MSVC)
|
||||
|
||||
else()
|
||||
|
||||
- set(SOCI_GCC_CLANG_COMMON_FLAGS
|
||||
- "-pedantic -Werror -Wno-error=parentheses -Wall -Wextra -Wpointer-arith -Wcast-align -Wcast-qual -Wfloat-equal -Woverloaded-virtual -Wredundant-decls -Wno-long-long")
|
||||
+ set(SOCI_GCC_CLANG_COMMON_FLAGS "")
|
||||
+ # "-pedantic -Werror -Wno-error=parentheses -Wall -Wextra -Wpointer-arith -Wcast-align -Wcast-qual -Wfloat-equal -Woverloaded-virtual -Wredundant-decls -Wno-long-long")
|
||||
```
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
cmake_minimum_required (VERSION 3.9.0)
|
||||
cmake_minimum_required (VERSION 3.16)
|
||||
|
||||
if (POLICY CMP0074)
|
||||
cmake_policy(SET CMP0074 NEW)
|
||||
endif ()
|
||||
|
||||
project (rippled)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
# make GIT_COMMIT_HASH define available to all sources
|
||||
find_package(Git)
|
||||
if(Git_FOUND)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} describe --always --abbrev=40
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE GIT_COMMIT_HASH)
|
||||
message(STATUS gch: ${GIT_COMMIT_HASH})
|
||||
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||
if(gch)
|
||||
set(GIT_COMMIT_HASH "${gch}")
|
||||
message(STATUS gch: ${GIT_COMMIT_HASH})
|
||||
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
|
||||
endif()
|
||||
endif() #git
|
||||
|
||||
if (thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
add_compile_options("-stdlib=libc++")
|
||||
add_link_options("-stdlib=libc++")
|
||||
endif()
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake")
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps")
|
||||
|
||||
include (CheckCXXCompilerFlag)
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.11)
|
||||
include (FetchContent)
|
||||
endif ()
|
||||
if (MSVC AND CMAKE_VERSION VERSION_LESS 3.12)
|
||||
message (FATAL_ERROR "MSVC requires cmake 3.12 or greater for proper boost support")
|
||||
endif ()
|
||||
include (FetchContent)
|
||||
include (ExternalProject)
|
||||
include (CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
|
||||
include (ProcessorCount)
|
||||
|
||||
67
CONTRIBUTING.md
Normal file
67
CONTRIBUTING.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Contributing
|
||||
The XRP Ledger has many and diverse stakeholders, and everyone deserves a chance to contribute meaningful changes to the code that runs the XRPL.
|
||||
To contribute, please:
|
||||
1. Fork the repository under your own user.
|
||||
2. Create a new branch on which to write your changes. Please note that changes which alter transaction processing must be composed via and guarded using [Amendments](https://xrpl.org/amendments.html). Changes which are _read only_ i.e. RPC, or changes which are only refactors and maintain the existing behaviour do not need to be made through an Amendment.
|
||||
3. Write and test your code.
|
||||
4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate.
|
||||
5. Write test cases for your code and include those in `src/test` such that they are runnable from the command line using `./rippled -u`. (Some changes will not be able to be tested this way.)
|
||||
6. Ensure your code passes automated checks (e.g. clang-format and levelization.)
|
||||
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change.)
|
||||
8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template.
|
||||
|
||||
# Major Changes
|
||||
If your code change is a major feature, a breaking change or in some other way makes a significant alteration to the way the XRPL will operate, then you must first write an XLS document (XRP Ledger Standard) describing your change.
|
||||
To do this:
|
||||
1. Go to [XLS Standards](https://github.com/XRPLF/XRPL-Standards/discussions).
|
||||
2. Choose the next available standard number.
|
||||
3. Open a discussion with the appropriate title to propose your draft standard.
|
||||
4. Link your XLS in your PR.
|
||||
|
||||
# Style guide
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments.
|
||||
|
||||
## Formatting
|
||||
All code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||
To change your code to conform use `clang-format -i <your changed files>`.
|
||||
|
||||
## Avoid
|
||||
1. Proliferation of nearly identical code.
|
||||
2. Proliferation of new files and classes.
|
||||
3. Complex inheritance and complex OOP patterns.
|
||||
4. Unmanaged memory allocation and raw pointers.
|
||||
5. Macros and non-trivial templates (unless they add significant value.)
|
||||
6. Lambda patterns (unless these add significant value.)
|
||||
7. CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments.
|
||||
8. Importing new libraries unless there is a very good reason to do so.
|
||||
|
||||
## Seek to
|
||||
9. Extend functionality of existing code rather than creating new code.
|
||||
10. Prefer readability over terseness where important logic is concerned.
|
||||
11. Inline functions that are not used or are not likely to be used elsewhere in the codebase.
|
||||
12. Use clear and self-explanatory names for functions, variables, structs and classes.
|
||||
13. Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders.
|
||||
14. Provide as many comments as you feel that a competent programmer would need to understand what your code does.
|
||||
|
||||
# Maintainers
|
||||
Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc.
|
||||
|
||||
## Code Review
|
||||
New contributors' PRs must be reviewed by at least two of the maintainers. Well established prior contributors can be reviewed by a single maintainer.
|
||||
|
||||
## Adding and Removing
|
||||
New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected.
|
||||
|
||||
Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote.
|
||||
|
||||
## Existing Maintainers
|
||||
* [JoelKatz](https://github.com/JoelKatz) (Ripple)
|
||||
* [Manojsdoshi](https://github.com/manojsdoshi) (Ripple)
|
||||
* [N3tc4t](https://github.com/n3tc4t) (XRPL Labs)
|
||||
* [Nikolaos D Bougalis](https://github.com/nbougalis) (Ripple)
|
||||
* [Nixer89](https://github.com/nixer89) (XRP Ledger Foundation)
|
||||
* [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation)
|
||||
* [Seelabs](https://github.com/seelabs) (Ripple)
|
||||
* [Silkjaer](https://github.com/Silkjaer) (XRP Ledger Foundation)
|
||||
* [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation)
|
||||
* [Ximinez](https://github.com/ximinez) (Ripple)
|
||||
@@ -56,3 +56,4 @@ git-subtree. See those directories' README files for more details.
|
||||
* [XRP Ledger Dev Portal](https://xrpl.org/)
|
||||
* [Setup and Installation](https://xrpl.org/install-rippled.html)
|
||||
* [Source Documentation (Doxygen)](https://ripple.github.io/rippled)
|
||||
* [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi)
|
||||
|
||||
230
RELEASENOTES.md
230
RELEASENOTES.md
@@ -5,7 +5,148 @@
|
||||
This document contains the release notes for `rippled`, the reference server implementation of the XRP Ledger protocol. To learn more about how to build, run or update a `rippled` server, visit https://xrpl.org/install-rippled.html
|
||||
|
||||
|
||||
Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/ripple/rippled/issues/new/choose)
|
||||
Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/xrplf/rippled/issues/new/choose)
|
||||
|
||||
# Introducing XRP Ledger version 1.9.2
|
||||
|
||||
Version 1.9.2 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes several fixes and improvements, including a second new fix amendment to correct a bug in Non-Fungible Tokens (NFTs) code, a new API method for order book changes, less noisy logging, and other small fixes.
|
||||
|
||||
<!-- BREAK -->
|
||||
|
||||
|
||||
## Action Required
|
||||
|
||||
This release introduces a two new amendments to the XRP Ledger protocol. The first, **fixNFTokenNegOffer**, fixes a bug in code associated with the **NonFungibleTokensV1** amendment, originally introduced in [version 1.9.0](https://xrpl.org/blog/2022/rippled-1.9.0.html). The second, **NonFungibleTokensV1_1**, is a "roll-up" amendment that enables the **NonFungibleTokensV1** feature plus the two fix amendments associated with it, **fixNFTokenDirV1** and **fixNFTokenNegOffer**.
|
||||
|
||||
If you want to enable NFT code on the XRP Ledger Mainnet, you can vote in favor of only the **NonFungibleTokensV1_1** amendment to support enabling the feature and fixes together, without risk that the unfixed NFT code may become enabled first.
|
||||
|
||||
These amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators.
|
||||
|
||||
If you operate an XRP Ledger server, then you should upgrade to version 1.9.2 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network.
|
||||
|
||||
For more information about NFTs on the XRP Ledger, see [NFT Conceptual Overview](https://xrpl.org/nft-conceptual-overview.html).
|
||||
|
||||
## Install / Upgrade
|
||||
|
||||
On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html).
|
||||
|
||||
## Changelog
|
||||
|
||||
This release contains the following features and improvements.
|
||||
|
||||
- **Introduce fixNFTokenNegOffer amendment.** This amendment fixes a bug in the Non-Fungible Tokens (NFTs) functionality provided by the NonFungibleTokensV1 amendment (not currently enabled on Mainnet). The bug allowed users to place offers to buy tokens for negative amounts of money when using Brokered Mode. Anyone who accepted such an offer would transfer the token _and_ pay money. This amendment explicitly disallows offers to buy or sell NFTs for negative amounts of money, and returns an appropriate error code. This also corrects the error code returned when placing offers to buy or sell NFTs for negative amounts in Direct Mode. ([8266d9d](https://github.com/XRPLF/rippled/commit/8266d9d598d19f05e1155956b30ca443c27e119e))
|
||||
- **Introduce `NonFungibleTokensV1_1` amendment.** This amendment encompasses three NFT-related amendments: the original NonFungibleTokensV1 amendment (from version 1.9.0), the fixNFTokenDirV1 amendment (from version 1.9.1), and the new fixNFTokenNegOffer amendment from this release. This amendment contains no changes other than enabling those three amendments together; this allows validators to vote in favor of _only_ enabling the feature and fixes at the same time. ([59326bb](https://github.com/XRPLF/rippled/commit/59326bbbc552287e44b3a0d7b8afbb1ddddb3e3b))
|
||||
- **Handle invalid port numbers.** If the user specifies a URL with an invalid port number, the server would silently attempt to use port 0 instead. Now it raises an error instead. This affects admin API methods and config file parameters for downloading history shards and specifying validator list sites. ([#4213](https://github.com/XRPLF/rippled/pull/4213))
|
||||
- **Reduce log noisiness.** Decreased the severity of benign log messages in several places: "addPathsForType" messages during regular operation, expected errors during unit tests, and missing optional documentation components when compiling from source. ([#4178](https://github.com/XRPLF/rippled/pull/4178), [#4166](https://github.com/XRPLF/rippled/pull/4166), [#4180](https://github.com/XRPLF/rippled/pull/4180))
|
||||
- **Fix race condition in history shard implementation and support clang's ThreadSafetyAnalysis tool.** Added build settings so that developers can use this feature of the clang compiler to analyze the code for correctness, and fix an error found by this tool, which was the source of rare crashes in unit tests. ([#4188](https://github.com/XRPLF/rippled/pull/4188))
|
||||
- **Prevent crash when rotating a database with missing data.** When rotating databases, a missing entry could cause the server to crash. While there should never be a missing database entry, this change keeps the server running by aborting database rotation. ([#4182](https://github.com/XRPLF/rippled/pull/4182))
|
||||
- **Fix bitwise comparison in OfferCreate.** Fixed an expression that incorrectly used a bitwise comparison for two boolean values rather than a true boolean comparison. The outcome of the two comparisons is equivalent, so this is not a transaction processing change, but the bitwise comparison relied on compilers to implicitly fix the expression. ([#4183](https://github.com/XRPLF/rippled/pull/4183))
|
||||
- **Disable cluster timer when not in a cluster.** Disabled a timer that was unused on servers not running in clustered mode. The functionality of clustered servers is unchanged. ([#4173](https://github.com/XRPLF/rippled/pull/4173))
|
||||
- **Limit how often to process peer discovery messages.** In the peer-to-peer network, servers periodically share IP addresses of their peers with each other to facilitate peer discovery. It is not necessary to process these types of messages too often; previously, the code tracked whether it needed to process new messages of this type but always processed them anyway. With this change, the server no longer processes peer discovery messages if it has done so recently. ([#4202](https://github.com/XRPLF/rippled/pull/4202))
|
||||
- **Improve STVector256 deserialization.** Optimized the processing of this data type in protocol messages. This data type is used in several types of ledger entry that are important for bookkeeping, including directory pages that track other ledger types, amendments tracking, and the ledger hashes history. ([#4204](https://github.com/XRPLF/rippled/pull/4204))
|
||||
- **Fix and refactor spinlock code.** The spinlock code, which protects the `SHAMapInnerNode` child lists, had a mistake that allowed the same child to be repeatedly locked under some circumstances. Fixed this bug and improved the spinlock code to make it easier to use correctly and easier to verify that the code works correctly. ([#4201](https://github.com/XRPLF/rippled/pull/4201))
|
||||
- **Improve comments and contributor documentation.** Various minor documentation changes including some to reflect the fact that the source code repository is now owned by the XRP Ledger Foundation. ([#4214](https://github.com/XRPLF/rippled/pull/4214), [#4179](https://github.com/XRPLF/rippled/pull/4179), [#4222](https://github.com/XRPLF/rippled/pull/4222))
|
||||
- **Introduces a new API book_changes to provide information in a format that is useful for building charts that highlight DEX activity at a per-ledger level.** ([#4212](https://github.com/XRPLF/rippled/pull/4212))
|
||||
|
||||
## Contributions
|
||||
|
||||
### GitHub
|
||||
|
||||
The public source code repository for `rippled` is hosted on GitHub at <https://github.com/XRPLF/rippled>.
|
||||
|
||||
We welcome contributions, big and small, and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value.
|
||||
|
||||
### Credits
|
||||
|
||||
The following people contributed directly to this release:
|
||||
|
||||
- Chenna Keshava B S <ckbs.keshava56@gmail.com>
|
||||
- Ed Hennis <ed@ripple.com>
|
||||
- Ikko Ashimine <eltociear@gmail.com>
|
||||
- Nik Bougalis <nikb@bougalis.net>
|
||||
- Richard Holland <richard.holland@starstone.co.nz>
|
||||
- Scott Schurr <scott@ripple.com>
|
||||
- Scott Determan <scott.determan@yahoo.com>
|
||||
|
||||
For a real-time view of all lifetime contributors, including links to the commits made by each, please visit the "Contributors" section of the GitHub repository: <https://github.com/XRPLF/rippled/graphs/contributors>.
|
||||
|
||||
# Introducing XRP Ledger version 1.9.1
|
||||
|
||||
Version 1.9.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes several important fixes, including a fix for a syncing issue from 1.9.0, a new fix amendment to correct a bug in the new Non-Fungible Tokens (NFTs) code, and a new amendment to allow multi-signing by up to 32 signers.
|
||||
|
||||
<!-- BREAK -->
|
||||
|
||||
|
||||
## Action Required
|
||||
|
||||
This release introduces two new amendments to the XRP Ledger protocol. These amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators.
|
||||
|
||||
If you operate an XRP Ledger server, then you should upgrade to version 1.9.1 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network.
|
||||
|
||||
The **fixNFTokenDirV1** amendment fixes a bug in code associated with the **NonFungibleTokensV1** amendment, so the fixNFTokenDirV1 amendment should be enabled first. All validator operators are encouraged to [configure amendment voting](https://xrpl.org/configure-amendment-voting.html) to oppose the NonFungibleTokensV1 amendment until _after_ the fixNFTokenDirV1 amendment has become enabled. For more information about NFTs on the XRP Ledger, see [NFT Conceptual Overview](https://xrpl.org/nft-conceptual-overview.html).
|
||||
|
||||
The **ExpandedSignerList** amendment extends the ledger's built-in multi-signing functionality so that each list can contain up to 32 entries instead of the current limit of 8. Additionally, this amendment allows each signer to have an arbitrary 256-bit data field associated with it. This data can be used to identify the signer or provide other metadata that is useful for organizations, smart contracts, or other purposes.
|
||||
|
||||
## Install / Upgrade
|
||||
|
||||
On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html).
|
||||
|
||||
## Changelog
|
||||
|
||||
This release contains the following features and improvements.
|
||||
|
||||
## New Features and Amendments
|
||||
|
||||
- **Introduce fixNFTokenDirV1 Amendment** - This amendment fixes an off-by-one error that occurred in some corner cases when determining which `NFTokenPage` an `NFToken` object belongs on. It also adjusts the constraints of `NFTokenPage` invariant checks, so that certain error cases fail with a suitable error code such as `tecNO_SUITABLE_TOKEN_PAGE` instead of failing with a `tecINVARIANT_FAILED` error code. ([#4155](https://github.com/ripple/rippled/pull/4155))
|
||||
|
||||
- **Introduce ExpandedSignerList Amendment** - This amendment expands the maximum signer list size to 32 entries and allows each signer to have an optional 256-bit `WalletLocator` field containing arbitrary data. ([#4097](https://github.com/ripple/rippled/pull/4097))
|
||||
|
||||
- **Pause online deletion rather than canceling it if the server fails health check** - The server stops performing online deletion of old ledger history if the server fails its internal health check during this time. Online deletion can now resume after the server recovers, rather than having to start over. ([#4139](https://github.com/ripple/rippled/pull/4139))
|
||||
|
||||
|
||||
## Bug Fixes and Performance Improvements
|
||||
|
||||
- **Fix performance issues introduced in 1.9.0** - Readjusts some parameters of the ledger acquisition engine to revert some changes introduced in 1.9.0 that had adverse effects on some systems, including causing some systems to fail to sync to the network. ([#4152](https://github.com/ripple/rippled/pull/4152))
|
||||
|
||||
- **Improve Memory Efficiency of Path Finding** - Finding paths for cross-currency payments is a resource-intensive operation. While that remains true, this fix improves memory usage of pathfinding by discarding trust line results that cannot be used before those results are fully loaded or cached. ([#4111](https://github.com/ripple/rippled/pull/4111))
|
||||
|
||||
- **Fix incorrect CMake behavior on Windows when platform is unspecified or x64** - Fixes handling of platform selection when using the cmake-gui tool to build on Windows. The generator expects `Win64` but the GUI only provides `x64` as an option, which raises an error. This fix only raises an error if the platform is `Win32` instead, allowing the generation of solution files to succeed. ([#4150](https://github.com/ripple/rippled/pull/4150))
|
||||
|
||||
- **Fix test failures with newer MSVC compilers on Windows** - Fixes some cases where the API handler code used string pointer comparisons, which may not work correctly with some versions of the MSVC compiler. ([#4149](https://github.com/ripple/rippled/pull/4149))
|
||||
|
||||
- **Update minimum Boost version to 1.71.0** - This release is compatible with Boost library versions 1.71.0 through 1.77.0. The build configuration and documentation have been updated to reflect this. ([#4134](https://github.com/ripple/rippled/pull/4134))
|
||||
|
||||
- **Fix unit test failures for DatabaseDownloader** - Increases a timeout in the `DatabaseDownloader` code and adjusts unit tests so that the code does not return spurious failures, and more data is logged if it does fail. ([#4021](https://github.com/ripple/rippled/pull/4021))
|
||||
|
||||
- **Refactor relational database interface** - Improves code comments, naming, and organization of the module that interfaces with relational databases (such as the SQLite database used for tracking transaction history). ([#3965](https://github.com/ripple/rippled/pull/3965))
|
||||
|
||||
|
||||
## Contributions
|
||||
|
||||
### GitHub
|
||||
|
||||
The public source code repository for `rippled` is hosted on GitHub at <https://github.com/ripple/rippled>.
|
||||
|
||||
We welcome contributions, big and small, and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value.
|
||||
|
||||
|
||||
### Credits
|
||||
|
||||
The following people contributed directly to this release:
|
||||
|
||||
- Devon White <dwhite@ripple.com>
|
||||
- Ed Hennis <ed@ripple.com>
|
||||
- Gregory Popovitch <greg7mdp@gmail.com>
|
||||
- Mark Travis <mtravis@ripple.com>
|
||||
- Manoj Doshi <mdoshi@ripple.com>
|
||||
- Nik Bougalis <nikb@bougalis.net>
|
||||
- Richard Holland <richard.holland@starstone.co.nz>
|
||||
- Scott Schurr <scott@ripple.com>
|
||||
|
||||
For a real-time view of all lifetime contributors, including links to the commits made by each, please visit the "Contributors" section of the GitHub repository: <https://github.com/ripple/rippled/graphs/contributors>.
|
||||
|
||||
We welcome external contributions and are excited to see the broader XRP Ledger community continue to grow and thrive.
|
||||
|
||||
|
||||
# Change log
|
||||
|
||||
@@ -13,13 +154,98 @@ Have new ideas? Need help with setting up your node? Come visit us [here](https:
|
||||
|
||||
# Releases
|
||||
|
||||
## Version 1.9.0
|
||||
This is the 1.9.0 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release brings several features and improvements.
|
||||
|
||||
### New and Improved Features
|
||||
- **Introduce NFT support (XLS020):** This release introduces support for non-fungible tokens, currently available to the developer community for broader review and testing. Developers can create applications that allow users to mint, transfer, and ultimately burn (if desired) NFTs on the XRP Ledger. You can try out the new NFT transactions using the [nft-devnet](https://xrpl.org/xrp-testnet-faucet.html). Note that some fields and error codes from earlier releases of the supporting code have been refactored for this release, shown in the Code Refactoring section, below. [70779f](https://github.com/ripple/rippled/commit/70779f6850b5f33cdbb9cf4129bc1c259af0013e)
|
||||
|
||||
- **Simplify the Job Queue:** This is a refactor aimed at cleaning up and simplifying the existing job queue. Currently, all jobs are canceled at the same time and in the same way, so this commit removes the unnecessary per-job cancellation token. [#3656](https://github.com/ripple/rippled/pull/3656)
|
||||
|
||||
- **Optimize trust line caching:** The existing trust line caching code was suboptimal in that it stored redundant information, pinned SLEs into memory, and required multiple memory allocations per cached object. This commit eliminates redundant data, reduces the size of cached objects and unpinning SLEs from memory, and uses value types to avoid the need for `std::shared_ptr`. As a result of these changes, the effective size of a cached object includes the overhead of the memory allocator, and the `std::shared_ptr` should be reduced by at least 64 bytes. This is significant, as there can easily be tens of millions of these objects. [4d5459](https://github.com/ripple/rippled/commit/4d5459d041da8f5a349c5f458d664e5865e1f1b5)
|
||||
|
||||
- **Incremental improvements to pathfinding memory usage:** This commit aborts background pathfinding when closed or disconnected, exits the pathfinding job thread if there are no requests left, does not create the path find a job if there are no requests, and refactors to remove the circular dependency between InfoSub and PathRequest. [#4111](https://github.com/ripple/rippled/pull/4111)
|
||||
|
||||
- **Improve deterministic transaction sorting in TxQ:** This commit ensures that transactions with the same fee level are sorted by TxID XORed with the parent ledger hash, the TxQ is re-sorted after every ledger, and attempts to future-proof the TxQ tie-breaking test. [#4077](https://github.com/ripple/rippled/pull/4077)
|
||||
|
||||
- **Improve stop signaling for Application:** [34ca45](https://github.com/ripple/rippled/commit/34ca45713244d0defc39549dd43821784b2a5c1d)
|
||||
|
||||
- **Eliminate SHAMapInnerNode lock contention:** The `SHAMapInnerNode` class had a global mutex to protect the array of node children. Profiling suggested that around 4% of all attempts to lock the global would block. This commit removes that global mutex, and replaces it with a new per-node 16-way spinlock (implemented so as not to affect the size of an inner node object), effectively eliminating the lock contention. [1b9387](https://github.com/ripple/rippled/commit/1b9387eddc1f52165d3243d2ace9be0c62495eea)
|
||||
|
||||
- **Improve ledger-fetching logic:** When fetching ledgers, the existing code would isolate the peer that sent the most useful responses, and issue follow-up queries only to that peer. This commit increases the query aggressiveness, and changes the mechanism used to select which peers to issue follow-up queries to so as to more evenly spread the load among those peers that provided useful responses. [48803a](https://github.com/ripple/rippled/commit/48803a48afc3bede55d71618c2ee38fd9dbfd3b0)
|
||||
|
||||
- **Simplify and improve order book tracking:** The order book tracking code would use `std::shared_ptr` to track the lifetime of objects. This commit changes the logic to eliminate the overhead of `std::shared_ptr` by using value types, resulting in significant memory savings. [b9903b](https://github.com/ripple/rippled/commit/b9903bbcc483a384decf8d2665f559d123baaba2)
|
||||
|
||||
- **Negative cache support for node store:** This commit allows the cache to service requests for nodes that were previously looked up but not found, reducing the need to perform I/O in several common scenarios. [3eb8aa](https://github.com/ripple/rippled/commit/3eb8aa8b80bd818f04c99cee2cfc243192709667)
|
||||
|
||||
- **Improve asynchronous database handlers:** This commit optimizes the way asynchronous node store operations are processed, both by reducing the number of times locks are held and by minimizing the number of memory allocations and data copying. [6faaa9](https://github.com/ripple/rippled/commit/6faaa91850d6b2eb9fbf16c1256bf7ef11ac4646)
|
||||
|
||||
- **Cleanup AcceptedLedger and AcceptedLedgerTx:** This commit modernizes the `AcceptedLedger` and `AcceptedLedgerTx` classes, reduces their memory footprint, and reduces unnecessary dynamic memory allocations. [8f5868](https://github.com/ripple/rippled/commit/8f586870917818133924bf2e11acab5321c2b588)
|
||||
|
||||
### Code Refactoring
|
||||
|
||||
This release includes name changes in the NFToken API for SFields, RPC return labels, and error codes for clarity and consistency. To refactor your code, migrate the names of these items to the new names as listed below.
|
||||
|
||||
#### `SField` name changes:
|
||||
* `TokenTaxon -> NFTokenTaxon`
|
||||
* `MintedTokens -> MintedNFTokens`
|
||||
* `BurnedTokens -> BurnedNFTokens`
|
||||
* `TokenID -> NFTokenID`
|
||||
* `TokenOffers -> NFTokenOffers`
|
||||
* `BrokerFee -> NFTokenBrokerFee`
|
||||
* `Minter -> NFTokenMinter`
|
||||
* `NonFungibleToken -> NFToken`
|
||||
* `NonFungibleTokens -> NFTokens`
|
||||
* `BuyOffer -> NFTokenBuyOffer`
|
||||
* `SellOffer -> NFTokenSellOffer`
|
||||
* `OfferNode -> NFTokenOfferNode`
|
||||
|
||||
#### RPC return labels
|
||||
* `tokenid -> nft_id`
|
||||
* `index -> nft_offer_index`
|
||||
|
||||
#### Error codes
|
||||
* `temBAD_TRANSFER_FEE -> temBAD_NFTOKEN_TRANSFER_FEE`
|
||||
* `tefTOKEN_IS_NOT_TRANSFERABLE -> tefNFTOKEN_IS_NOT_TRANSFERABLE`
|
||||
* `tecNO_SUITABLE_PAGE -> tecNO_SUITABLE_NFTOKEN_PAGE`
|
||||
* `tecBUY_SELL_MISMATCH -> tecNFTOKEN_BUY_SELL_MISMATCH`
|
||||
* `tecOFFER_TYPE_MISMATCH -> tecNFTOKEN_OFFER_TYPE_MISMATCH`
|
||||
* `tecCANT_ACCEPT_OWN_OFFER -> tecCANT_ACCEPT_OWN_NFTOKEN_OFFER`
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
- **Fix deletion of orphan node store directories:** Orphaned node store directories should only be deleted if the proper node store directories are confirmed to exist. [06e87e](https://github.com/ripple/rippled/commit/06e87e0f6add5b880d647e14ab3d950decfcf416)
|
||||
|
||||
## Version 1.8.5
|
||||
This is the 1.8.5 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release includes fixes and updates for stability and security, and improvements to build scripts. There are no user-facing API or protocol changes in this release.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
This release contains the following bug fixes and under-the-hood improvements:
|
||||
|
||||
- **Correct TaggedPointer move constructor:** Fixes a bug in unused code for the TaggedPointer class. The old code would fail if a caller explicitly tried to remove a child that is not actually part of the node. (227a12d)
|
||||
|
||||
- **Ensure protocol buffer prerequisites are present:** The build scripts and packages now properly handle Protobuf packages and various packages. Prior to this change, building on Ubuntu 21.10 Impish Indri would fail unless the `libprotoc-dev` package was installed. (e06465f)
|
||||
|
||||
- **Improve handling of endpoints during peer discovery.** This hardens and improves handling of incoming messages on the peer protocol. (289bc0a)
|
||||
|
||||
- **Run tests on updated linux distros:** Test builds now run on Rocky Linux 8, Fedora 34 and 35, Ubuntu 18, 20, and 22, and Debian 9, 10, and 11. (a9ee802)
|
||||
|
||||
- **Avoid dereferencing empty optional in ReportingETL:** Fixes a bug in Reporting Mode that could dereference an empty optional value when throwing an error. (cdc215d)
|
||||
|
||||
- **Correctly add GIT_COMMIT_HASH into version string:** When building the server from a non-tagged release, the build files now add the commit ID in a way that follows the semantic-versioning standard, and correctly handle the case where the commit hash ID cannot be retrieved. (d23d37f)
|
||||
|
||||
- **Update RocksDB to version 6.27.3:** Updates the version of RocksDB included in the server from 6.7.3 (which was released on 2020-03-18) to 6.27.3 (released 2021-12-10).
|
||||
|
||||
|
||||
|
||||
## Version 1.8.4
|
||||
This is the 1.8.4 release of `rippled`, the reference implementation of the XRP Ledger protocol.
|
||||
|
||||
This release corrects a technical flaw introduced with 1.8.3 that may result in failures if the newly-introduced 'fast loading' is enabled. The release also adjusts default parameters used to configure the pathfinding engine to reduce resource usage.
|
||||
|
||||
### Bug Fixes
|
||||
- **Adjust mutex scope in `walkMapParallel`**: This commit corrects a technical flaw introduced with commit 7c12f0135897361398917ad2c8cda888249d42ae that would result in undefined behavior if the server operator configured their server to use the 'fast loading' mechanism introduced with 1.8.3.
|
||||
- **Adjust mutex scope in `walkMapParallel`**: This commit corrects a technical flaw introduced with commit [7c12f0135897361398917ad2c8cda888249d42ae] that would result in undefined behavior if the server operator configured their server to use the 'fast loading' mechanism introduced with 1.8.3.
|
||||
|
||||
- **Adjust pathfinding configuration defaults**: This commit adjusts the default configuration of the pathfinding engine, to account for the size of the XRP Ledger mainnet. Unless explicitly overriden, the changes mean that pathfinding operations will return fewer, shallower paths than previous releases.
|
||||
|
||||
|
||||
@@ -200,9 +200,19 @@
|
||||
#
|
||||
# admin = [ IP, IP, IP, ... ]
|
||||
#
|
||||
# A comma-separated list of IP addresses.
|
||||
# A comma-separated list of IP addresses or subnets. Subnets
|
||||
# should be represented in "slash" notation, such as:
|
||||
# 10.0.0.0/8
|
||||
# 172.16.0.0/12
|
||||
# 192.168.0.0/16
|
||||
# Those examples are ipv4, but ipv6 is also supported.
|
||||
# When configuring subnets, the address must match the
|
||||
# underlying network address. Otherwise, the desired IP range is
|
||||
# ambiguous. For example, 10.1.2.3/24 has a network address of
|
||||
# 10.1.2.0. Therefore, that subnet should be configured as
|
||||
# 10.1.2.0/24.
|
||||
#
|
||||
# When set, grants administrative command access to the specified IP
|
||||
# When set, grants administrative command access to the specified
|
||||
# addresses. These commands may be issued over http, https, ws, or wss
|
||||
# if configured on the port. If not provided, the default is to not allow
|
||||
# administrative commands.
|
||||
@@ -233,9 +243,10 @@
|
||||
#
|
||||
# secure_gateway = [ IP, IP, IP, ... ]
|
||||
#
|
||||
# A comma-separated list of IP addresses.
|
||||
# A comma-separated list of IP addresses or subnets. See the
|
||||
# details for the "admin" option above.
|
||||
#
|
||||
# When set, allows the specified IP addresses to pass HTTP headers
|
||||
# When set, allows the specified addresses to pass HTTP headers
|
||||
# containing username and remote IP address for each session. If a
|
||||
# non-empty username is passed in this way, then resource controls
|
||||
# such as often resulting in "tooBusy" errors will be lifted. However,
|
||||
@@ -250,9 +261,9 @@
|
||||
# proxies. Since rippled trusts these hosts, they must be
|
||||
# responsible for properly authenticating the remote user.
|
||||
#
|
||||
# The same IP address cannot be used in both "admin" and "secure_gateway"
|
||||
# lists for the same port. In this case, rippled will abort with an error
|
||||
# message to the console shortly after startup
|
||||
# If some IP addresses are included for both "admin" and
|
||||
# "secure_gateway" connections, then they will be treated as
|
||||
# "admin" addresses.
|
||||
#
|
||||
# ssl_key = <filename>
|
||||
# ssl_cert = <filename>
|
||||
@@ -1129,17 +1140,10 @@
|
||||
# The online delete process checks periodically
|
||||
# that rippled is still in sync with the network,
|
||||
# and that the validated ledger is less than
|
||||
# 'age_threshold_seconds' old. By default, if it
|
||||
# is not the online delete process aborts and
|
||||
# tries again later. If 'recovery_wait_seconds'
|
||||
# is set and rippled is out of sync, but likely to
|
||||
# recover quickly, then online delete will wait
|
||||
# this number of seconds for rippled to get back
|
||||
# into sync before it aborts.
|
||||
# Set this value if the node is otherwise staying
|
||||
# in sync, or recovering quickly, but the online
|
||||
# delete process is unable to finish.
|
||||
# Default is unset.
|
||||
# 'age_threshold_seconds' old. If not, then continue
|
||||
# sleeping for this number of seconds and
|
||||
# checking until healthy.
|
||||
# Default is 5.
|
||||
#
|
||||
# Optional keys for Cassandra:
|
||||
#
|
||||
|
||||
1703
cfg/rippled-reporting.cfg
Normal file
1703
cfg/rippled-reporting.cfg
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,3 +0,0 @@
|
||||
# Extras
|
||||
|
||||
These are not part of the official public Beast interface but they are used by the tests and some third party programs.
|
||||
@@ -1,170 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
|
||||
#ifndef BEAST_DOC_DEBUG_HPP
|
||||
#define BEAST_DOC_DEBUG_HPP
|
||||
|
||||
namespace beast {
|
||||
|
||||
#if BEAST_DOXYGEN
|
||||
|
||||
/// doc type (documentation debug helper)
|
||||
using doc_type = int;
|
||||
|
||||
/// doc enum (documentation debug helper)
|
||||
enum doc_enum
|
||||
{
|
||||
/// One (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// Two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc enum class (documentation debug helper)
|
||||
enum class doc_enum_class : unsigned
|
||||
{
|
||||
/// one (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc func (documentation debug helper)
|
||||
void doc_func();
|
||||
|
||||
/// doc class (documentation debug helper)
|
||||
struct doc_class
|
||||
{
|
||||
/// doc class member func (documentation debug helper)
|
||||
void func();
|
||||
};
|
||||
|
||||
/// (documentation debug helper)
|
||||
namespace nested {
|
||||
|
||||
/// doc type (documentation debug helper)
|
||||
using nested_doc_type = int;
|
||||
|
||||
/// doc enum (documentation debug helper)
|
||||
enum nested_doc_enum
|
||||
{
|
||||
/// One (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// Two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc enum class (documentation debug helper)
|
||||
enum class nested_doc_enum_class : unsigned
|
||||
{
|
||||
/// one (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc func (documentation debug helper)
|
||||
void nested_doc_func();
|
||||
|
||||
/// doc class (documentation debug helper)
|
||||
struct nested_doc_class
|
||||
{
|
||||
/// doc class member func (documentation debug helper)
|
||||
void func();
|
||||
};
|
||||
|
||||
} // nested
|
||||
|
||||
/** This is here to help troubleshoot doc/reference.xsl problems
|
||||
|
||||
Embedded references:
|
||||
|
||||
@li type @ref doc_type
|
||||
|
||||
@li enum @ref doc_enum
|
||||
|
||||
@li enum item @ref doc_enum::one
|
||||
|
||||
@li enum_class @ref doc_enum_class
|
||||
|
||||
@li enum_class item @ref doc_enum_class::one
|
||||
|
||||
@li func @ref doc_func
|
||||
|
||||
@li class @ref doc_class
|
||||
|
||||
@li class func @ref doc_class::func
|
||||
|
||||
@li nested type @ref nested::nested_doc_type
|
||||
|
||||
@li nested enum @ref nested::nested_doc_enum
|
||||
|
||||
@li nested enum item @ref nested::nested_doc_enum::one
|
||||
|
||||
@li nested enum_class @ref nested::nested_doc_enum_class
|
||||
|
||||
@li nested enum_class item @ref nested::nested_doc_enum_class::one
|
||||
|
||||
@li nested func @ref nested::nested_doc_func
|
||||
|
||||
@li nested class @ref nested::nested_doc_class
|
||||
|
||||
@li nested class func @ref nested::nested_doc_class::func
|
||||
*/
|
||||
void doc_debug();
|
||||
|
||||
namespace nested {
|
||||
|
||||
/** This is here to help troubleshoot doc/reference.xsl problems
|
||||
|
||||
Embedded references:
|
||||
|
||||
@li type @ref doc_type
|
||||
|
||||
@li enum @ref doc_enum
|
||||
|
||||
@li enum item @ref doc_enum::one
|
||||
|
||||
@li enum_class @ref doc_enum_class
|
||||
|
||||
@li enum_class item @ref doc_enum_class::one
|
||||
|
||||
@li func @ref doc_func
|
||||
|
||||
@li class @ref doc_class
|
||||
|
||||
@li class func @ref doc_class::func
|
||||
|
||||
@li nested type @ref nested_doc_type
|
||||
|
||||
@li nested enum @ref nested_doc_enum
|
||||
|
||||
@li nested enum item @ref nested_doc_enum::one
|
||||
|
||||
@li nested enum_class @ref nested_doc_enum_class
|
||||
|
||||
@li nested enum_class item @ref nested_doc_enum_class::one
|
||||
|
||||
@li nested func @ref nested_doc_func
|
||||
|
||||
@li nested class @ref nested_doc_class
|
||||
|
||||
@li nested class func @ref nested_doc_class::func
|
||||
*/
|
||||
void nested_doc_debug();
|
||||
|
||||
} // nested
|
||||
|
||||
#endif
|
||||
|
||||
} // beast
|
||||
|
||||
#endif
|
||||
@@ -131,9 +131,7 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
|
||||
acquiringLedger_ = hash;
|
||||
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE,
|
||||
"getConsensusLedger",
|
||||
[id = hash, &app = app_](Job&) {
|
||||
jtADVANCE, "getConsensusLedger", [id = hash, &app = app_]() {
|
||||
app.getInboundLedgers().acquire(
|
||||
id, 0, InboundLedger::Reason::CONSENSUS);
|
||||
});
|
||||
@@ -423,9 +421,7 @@ RCLConsensus::Adaptor::onAccept(
|
||||
Json::Value&& consensusJson)
|
||||
{
|
||||
app_.getJobQueue().addJob(
|
||||
jtACCEPT,
|
||||
"acceptLedger",
|
||||
[=, cj = std::move(consensusJson)](auto&) mutable {
|
||||
jtACCEPT, "acceptLedger", [=, cj = std::move(consensusJson)]() mutable {
|
||||
// Note that no lock is held or acquired during this job.
|
||||
// This is because generic Consensus guarantees that once a ledger
|
||||
// is accepted, the consensus results and capture by reference state
|
||||
@@ -636,7 +632,7 @@ RCLConsensus::Adaptor::doAccept(
|
||||
auto const lastVal = ledgerMaster_.getValidatedLedger();
|
||||
std::optional<Rules> rules;
|
||||
if (lastVal)
|
||||
rules.emplace(*lastVal, app_.config().features);
|
||||
rules = makeRulesGivenLedger(*lastVal, app_.config().features);
|
||||
else
|
||||
rules.emplace(app_.config().features);
|
||||
app_.openLedger().accept(
|
||||
|
||||
@@ -135,7 +135,7 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
|
||||
Application* pApp = &app_;
|
||||
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE, "getConsensusLedger", [pApp, hash](Job&) {
|
||||
jtADVANCE, "getConsensusLedger", [pApp, hash]() {
|
||||
pApp->getInboundLedgers().acquire(
|
||||
hash, 0, InboundLedger::Reason::CONSENSUS);
|
||||
});
|
||||
|
||||
@@ -19,8 +19,7 @@
|
||||
|
||||
#include <ripple/app/ledger/AcceptedLedger.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <algorithm>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -29,29 +28,34 @@ AcceptedLedger::AcceptedLedger(
|
||||
Application& app)
|
||||
: mLedger(ledger)
|
||||
{
|
||||
transactions_.reserve(256);
|
||||
|
||||
auto insertAll = [&](auto const& txns) {
|
||||
auto const& idcache = app.accountIDCache();
|
||||
|
||||
for (auto const& item : txns)
|
||||
{
|
||||
insert(std::make_shared<AcceptedLedgerTx>(
|
||||
ledger,
|
||||
item.first,
|
||||
item.second,
|
||||
app.accountIDCache(),
|
||||
app.logs()));
|
||||
}
|
||||
transactions_.emplace_back(std::make_unique<AcceptedLedgerTx>(
|
||||
ledger, item.first, item.second, idcache));
|
||||
};
|
||||
|
||||
if (app.config().reporting())
|
||||
insertAll(flatFetchTransactions(*ledger, app));
|
||||
{
|
||||
auto const txs = flatFetchTransactions(*ledger, app);
|
||||
transactions_.reserve(txs.size());
|
||||
insertAll(txs);
|
||||
}
|
||||
else
|
||||
{
|
||||
transactions_.reserve(256);
|
||||
insertAll(ledger->txs);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
AcceptedLedger::insert(AcceptedLedgerTx::ref at)
|
||||
{
|
||||
assert(mMap.find(at->getIndex()) == mMap.end());
|
||||
mMap.insert(std::make_pair(at->getIndex(), at));
|
||||
std::sort(
|
||||
transactions_.begin(),
|
||||
transactions_.end(),
|
||||
[](auto const& a, auto const& b) {
|
||||
return a->getTxnSeq() < b->getTxnSeq();
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -41,43 +41,40 @@ namespace ripple {
|
||||
the result of the a consensus process (though haven't validated
|
||||
it yet).
|
||||
*/
|
||||
class AcceptedLedger
|
||||
class AcceptedLedger : public CountedObject<AcceptedLedger>
|
||||
{
|
||||
public:
|
||||
using pointer = std::shared_ptr<AcceptedLedger>;
|
||||
using ret = const pointer&;
|
||||
using map_t = std::map<int, AcceptedLedgerTx::pointer>;
|
||||
// mapt_t must be an ordered map!
|
||||
using value_type = map_t::value_type;
|
||||
using const_iterator = map_t::const_iterator;
|
||||
AcceptedLedger(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
Application& app);
|
||||
|
||||
public:
|
||||
std::shared_ptr<ReadView const> const&
|
||||
getLedger() const
|
||||
{
|
||||
return mLedger;
|
||||
}
|
||||
const map_t&
|
||||
getMap() const
|
||||
|
||||
std::size_t
|
||||
size() const
|
||||
{
|
||||
return mMap;
|
||||
return transactions_.size();
|
||||
}
|
||||
|
||||
int
|
||||
getTxnCount() const
|
||||
auto
|
||||
begin() const
|
||||
{
|
||||
return mMap.size();
|
||||
return transactions_.begin();
|
||||
}
|
||||
|
||||
AcceptedLedger(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
Application& app);
|
||||
auto
|
||||
end() const
|
||||
{
|
||||
return transactions_.end();
|
||||
}
|
||||
|
||||
private:
|
||||
void insert(AcceptedLedgerTx::ref);
|
||||
|
||||
std::shared_ptr<ReadView const> mLedger;
|
||||
map_t mMap;
|
||||
std::vector<std::unique_ptr<AcceptedLedgerTx>> transactions_;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/AcceptedLedgerTx.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/UintTypes.h>
|
||||
@@ -30,72 +29,30 @@ AcceptedLedgerTx::AcceptedLedgerTx(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const& txn,
|
||||
std::shared_ptr<STObject const> const& met,
|
||||
AccountIDCache const& accountCache,
|
||||
Logs& logs)
|
||||
: mLedger(ledger)
|
||||
, mTxn(txn)
|
||||
, mMeta(std::make_shared<TxMeta>(
|
||||
txn->getTransactionID(),
|
||||
ledger->seq(),
|
||||
*met))
|
||||
, mAffected(mMeta->getAffectedAccounts(logs.journal("View")))
|
||||
, accountCache_(accountCache)
|
||||
, logs_(logs)
|
||||
AccountIDCache const& accountCache)
|
||||
: mTxn(txn)
|
||||
, mMeta(txn->getTransactionID(), ledger->seq(), *met)
|
||||
, mAffected(mMeta.getAffectedAccounts())
|
||||
{
|
||||
assert(!ledger->open());
|
||||
|
||||
mResult = mMeta->getResultTER();
|
||||
|
||||
Serializer s;
|
||||
met->add(s);
|
||||
mRawMeta = std::move(s.modData());
|
||||
|
||||
buildJson();
|
||||
}
|
||||
|
||||
AcceptedLedgerTx::AcceptedLedgerTx(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const& txn,
|
||||
TER result,
|
||||
AccountIDCache const& accountCache,
|
||||
Logs& logs)
|
||||
: mLedger(ledger)
|
||||
, mTxn(txn)
|
||||
, mResult(result)
|
||||
, mAffected(txn->getMentionedAccounts())
|
||||
, accountCache_(accountCache)
|
||||
, logs_(logs)
|
||||
{
|
||||
assert(ledger->open());
|
||||
buildJson();
|
||||
}
|
||||
|
||||
std::string
|
||||
AcceptedLedgerTx::getEscMeta() const
|
||||
{
|
||||
assert(!mRawMeta.empty());
|
||||
return sqlBlobLiteral(mRawMeta);
|
||||
}
|
||||
|
||||
void
|
||||
AcceptedLedgerTx::buildJson()
|
||||
{
|
||||
mJson = Json::objectValue;
|
||||
mJson[jss::transaction] = mTxn->getJson(JsonOptions::none);
|
||||
|
||||
if (mMeta)
|
||||
{
|
||||
mJson[jss::meta] = mMeta->getJson(JsonOptions::none);
|
||||
mJson[jss::raw_meta] = strHex(mRawMeta);
|
||||
}
|
||||
mJson[jss::meta] = mMeta.getJson(JsonOptions::none);
|
||||
mJson[jss::raw_meta] = strHex(mRawMeta);
|
||||
|
||||
mJson[jss::result] = transHuman(mResult);
|
||||
mJson[jss::result] = transHuman(mMeta.getResultTER());
|
||||
|
||||
if (!mAffected.empty())
|
||||
{
|
||||
Json::Value& affected = (mJson[jss::affected] = Json::arrayValue);
|
||||
for (auto const& account : mAffected)
|
||||
affected.append(accountCache_.toBase58(account));
|
||||
affected.append(accountCache.toBase58(account));
|
||||
}
|
||||
|
||||
if (mTxn->getTxnType() == ttOFFER_CREATE)
|
||||
@@ -107,14 +64,21 @@ AcceptedLedgerTx::buildJson()
|
||||
if (account != amount.issue().account)
|
||||
{
|
||||
auto const ownerFunds = accountFunds(
|
||||
*mLedger,
|
||||
*ledger,
|
||||
account,
|
||||
amount,
|
||||
fhIGNORE_FREEZE,
|
||||
logs_.journal("View"));
|
||||
beast::Journal{beast::Journal::getNullSink()});
|
||||
mJson[jss::transaction][jss::owner_funds] = ownerFunds.getText();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string
|
||||
AcceptedLedgerTx::getEscMeta() const
|
||||
{
|
||||
assert(!mRawMeta.empty());
|
||||
return sqlBlobLiteral(mRawMeta);
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -39,40 +39,22 @@ class Logs;
|
||||
- Which accounts are affected
|
||||
* This is used by InfoSub to report to clients
|
||||
- Cached stuff
|
||||
|
||||
@code
|
||||
@endcode
|
||||
|
||||
@see {uri}
|
||||
|
||||
@ingroup ripple_ledger
|
||||
*/
|
||||
class AcceptedLedgerTx
|
||||
class AcceptedLedgerTx : public CountedObject<AcceptedLedgerTx>
|
||||
{
|
||||
public:
|
||||
using pointer = std::shared_ptr<AcceptedLedgerTx>;
|
||||
using ref = const pointer&;
|
||||
|
||||
public:
|
||||
AcceptedLedgerTx(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const&,
|
||||
std::shared_ptr<STObject const> const&,
|
||||
AccountIDCache const&,
|
||||
Logs&);
|
||||
AcceptedLedgerTx(
|
||||
std::shared_ptr<ReadView const> const&,
|
||||
std::shared_ptr<STTx const> const&,
|
||||
TER,
|
||||
AccountIDCache const&,
|
||||
Logs&);
|
||||
AccountIDCache const&);
|
||||
|
||||
std::shared_ptr<STTx const> const&
|
||||
getTxn() const
|
||||
{
|
||||
return mTxn;
|
||||
}
|
||||
std::shared_ptr<TxMeta> const&
|
||||
TxMeta const&
|
||||
getMeta() const
|
||||
{
|
||||
return mMeta;
|
||||
@@ -97,45 +79,28 @@ public:
|
||||
TER
|
||||
getResult() const
|
||||
{
|
||||
return mResult;
|
||||
return mMeta.getResultTER();
|
||||
}
|
||||
std::uint32_t
|
||||
getTxnSeq() const
|
||||
{
|
||||
return mMeta->getIndex();
|
||||
}
|
||||
|
||||
bool
|
||||
isApplied() const
|
||||
{
|
||||
return bool(mMeta);
|
||||
}
|
||||
int
|
||||
getIndex() const
|
||||
{
|
||||
return mMeta ? mMeta->getIndex() : 0;
|
||||
return mMeta.getIndex();
|
||||
}
|
||||
std::string
|
||||
getEscMeta() const;
|
||||
Json::Value
|
||||
|
||||
Json::Value const&
|
||||
getJson() const
|
||||
{
|
||||
return mJson;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ReadView const> mLedger;
|
||||
std::shared_ptr<STTx const> mTxn;
|
||||
std::shared_ptr<TxMeta> mMeta;
|
||||
TER mResult;
|
||||
TxMeta mMeta;
|
||||
boost::container::flat_set<AccountID> mAffected;
|
||||
Blob mRawMeta;
|
||||
Json::Value mJson;
|
||||
AccountIDCache const& accountCache_;
|
||||
Logs& logs_;
|
||||
|
||||
void
|
||||
buildJson();
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -62,10 +62,9 @@ ConsensusTransSetSF::gotNode(
|
||||
auto stx = std::make_shared<STTx const>(std::ref(sit));
|
||||
assert(stx->getTransactionID() == nodeHash.as_uint256());
|
||||
auto const pap = &app_;
|
||||
app_.getJobQueue().addJob(
|
||||
jtTRANSACTION, "TXS->TXN", [pap, stx](Job&) {
|
||||
pap->getOPs().submitTransaction(stx);
|
||||
});
|
||||
app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() {
|
||||
pap->getOPs().submitTransaction(stx);
|
||||
});
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
|
||||
@@ -39,9 +39,6 @@ class InboundLedger final : public TimeoutCounter,
|
||||
public:
|
||||
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
|
||||
|
||||
using PeerDataPairType =
|
||||
std::pair<std::weak_ptr<Peer>, std::shared_ptr<protocol::TMLedgerData>>;
|
||||
|
||||
// These are the reasons we might acquire a ledger
|
||||
enum class Reason {
|
||||
HISTORY, // Acquiring past ledger
|
||||
@@ -193,7 +190,9 @@ private:
|
||||
|
||||
// Data we have received from peers
|
||||
std::mutex mReceivedDataLock;
|
||||
std::vector<PeerDataPairType> mReceivedData;
|
||||
std::vector<
|
||||
std::pair<std::weak_ptr<Peer>, std::shared_ptr<protocol::TMLedgerData>>>
|
||||
mReceivedData;
|
||||
bool mReceiveDispatched;
|
||||
std::unique_ptr<PeerSet> mPeerSet;
|
||||
};
|
||||
|
||||
@@ -29,8 +29,8 @@
|
||||
#include <ripple/app/misc/HashRouter.h>
|
||||
#include <ripple/app/misc/LoadFeeTrack.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfacePostgres.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
@@ -626,7 +626,7 @@ Ledger::setup(Config const& config)
|
||||
|
||||
try
|
||||
{
|
||||
rules_ = Rules(*this, config.features);
|
||||
rules_ = makeRulesGivenLedger(*this, config.features);
|
||||
}
|
||||
catch (SHAMapMissingNode const&)
|
||||
{
|
||||
@@ -774,7 +774,7 @@ Ledger::walkLedger(beast::Journal j, bool parallel) const
|
||||
else
|
||||
{
|
||||
if (parallel)
|
||||
stateMap_->walkMapParallel(missingNodes1, 32);
|
||||
return stateMap_->walkMapParallel(missingNodes1, 32);
|
||||
else
|
||||
stateMap_->walkMap(missingNodes1, 32);
|
||||
}
|
||||
@@ -930,9 +930,11 @@ saveValidatedLedger(
|
||||
return true;
|
||||
}
|
||||
|
||||
auto res = dynamic_cast<RelationalDBInterfaceSqlite*>(
|
||||
&app.getRelationalDBInterface())
|
||||
->saveValidatedLedger(ledger, current);
|
||||
auto const db = dynamic_cast<SQLiteDatabase*>(&app.getRelationalDatabase());
|
||||
if (!db)
|
||||
Throw<std::runtime_error>("Failed to get relational database");
|
||||
|
||||
auto const res = db->saveValidatedLedger(ledger, current);
|
||||
|
||||
// Clients can now trust the database for
|
||||
// information about this ledger sequence.
|
||||
@@ -981,10 +983,9 @@ pendSaveValidated(
|
||||
|
||||
// See if we can use the JobQueue.
|
||||
if (!isSynchronous &&
|
||||
app.getJobQueue().addJob(
|
||||
jobType, jobName, [&app, ledger, isCurrent](Job&) {
|
||||
saveValidatedLedger(app, ledger, isCurrent);
|
||||
}))
|
||||
app.getJobQueue().addJob(jobType, jobName, [&app, ledger, isCurrent]() {
|
||||
saveValidatedLedger(app, ledger, isCurrent);
|
||||
}))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@@ -1054,7 +1055,7 @@ std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
getLatestLedger(Application& app)
|
||||
{
|
||||
const std::optional<LedgerInfo> info =
|
||||
app.getRelationalDBInterface().getNewestLedgerInfo();
|
||||
app.getRelationalDatabase().getNewestLedgerInfo();
|
||||
if (!info)
|
||||
return {std::shared_ptr<Ledger>(), {}, {}};
|
||||
return {loadLedgerHelper(*info, app, true), info->seq, info->hash};
|
||||
@@ -1064,7 +1065,7 @@ std::shared_ptr<Ledger>
|
||||
loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire)
|
||||
{
|
||||
if (std::optional<LedgerInfo> info =
|
||||
app.getRelationalDBInterface().getLedgerInfoByIndex(ledgerIndex))
|
||||
app.getRelationalDatabase().getLedgerInfoByIndex(ledgerIndex))
|
||||
{
|
||||
std::shared_ptr<Ledger> ledger = loadLedgerHelper(*info, app, acquire);
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
@@ -1077,7 +1078,7 @@ std::shared_ptr<Ledger>
|
||||
loadByHash(uint256 const& ledgerHash, Application& app, bool acquire)
|
||||
{
|
||||
if (std::optional<LedgerInfo> info =
|
||||
app.getRelationalDBInterface().getLedgerInfoByHash(ledgerHash))
|
||||
app.getRelationalDatabase().getLedgerInfoByHash(ledgerHash))
|
||||
{
|
||||
std::shared_ptr<Ledger> ledger = loadLedgerHelper(*info, app, acquire);
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
@@ -1166,9 +1167,12 @@ flatFetchTransactions(ReadView const& ledger, Application& app)
|
||||
return {};
|
||||
}
|
||||
|
||||
auto nodestoreHashes = dynamic_cast<RelationalDBInterfacePostgres*>(
|
||||
&app.getRelationalDBInterface())
|
||||
->getTxHashes(ledger.info().seq);
|
||||
auto const db =
|
||||
dynamic_cast<PostgresDatabase*>(&app.getRelationalDatabase());
|
||||
if (!db)
|
||||
Throw<std::runtime_error>("Failed to get relational database");
|
||||
|
||||
auto nodestoreHashes = db->getTxHashes(ledger.info().seq);
|
||||
|
||||
return flatFetchTransactions(app, nodestoreHashes);
|
||||
}
|
||||
|
||||
@@ -26,14 +26,6 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// VFALCO TODO replace macros
|
||||
|
||||
#ifndef CACHED_LEDGER_NUM
|
||||
#define CACHED_LEDGER_NUM 96
|
||||
#endif
|
||||
|
||||
std::chrono::seconds constexpr CachedLedgerAge = std::chrono::minutes{2};
|
||||
|
||||
// FIXME: Need to clean up ledgers by index at some point
|
||||
|
||||
LedgerHistory::LedgerHistory(
|
||||
@@ -44,8 +36,8 @@ LedgerHistory::LedgerHistory(
|
||||
, mismatch_counter_(collector->make_counter("ledger.history", "mismatch"))
|
||||
, m_ledgers_by_hash(
|
||||
"LedgerCache",
|
||||
CACHED_LEDGER_NUM,
|
||||
CachedLedgerAge,
|
||||
app_.config().getValueFor(SizedItem::ledgerSize),
|
||||
std::chrono::seconds{app_.config().getValueFor(SizedItem::ledgerAge)},
|
||||
stopwatch(),
|
||||
app_.journal("TaggedCache"))
|
||||
, m_consensus_validated(
|
||||
@@ -523,13 +515,6 @@ LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
LedgerHistory::tune(int size, std::chrono::seconds age)
|
||||
{
|
||||
m_ledgers_by_hash.setTargetSize(size);
|
||||
m_ledgers_by_hash.setTargetAge(age);
|
||||
}
|
||||
|
||||
void
|
||||
LedgerHistory::clearLedgerCachePrior(LedgerIndex seq)
|
||||
{
|
||||
|
||||
@@ -70,13 +70,6 @@ public:
|
||||
LedgerHash
|
||||
getLedgerHash(LedgerIndex ledgerIndex);
|
||||
|
||||
/** Set the history cache's parameters
|
||||
@param size The target size of the cache
|
||||
@param age The target age of the cache, in seconds
|
||||
*/
|
||||
void
|
||||
tune(int size, std::chrono::seconds age);
|
||||
|
||||
/** Remove stale cache entries
|
||||
*/
|
||||
void
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_APP_LEDGER_LEDGERHOLDER_H_INCLUDED
|
||||
#define RIPPLE_APP_LEDGER_LEDGERHOLDER_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/CountedObject.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <mutex>
|
||||
|
||||
@@ -35,7 +36,7 @@ namespace ripple {
|
||||
way the object always holds a value. We can use the
|
||||
genesis ledger in all cases.
|
||||
*/
|
||||
class LedgerHolder
|
||||
class LedgerHolder : public CountedObject<LedgerHolder>
|
||||
{
|
||||
public:
|
||||
// Update the held ledger
|
||||
|
||||
@@ -219,8 +219,6 @@ public:
|
||||
bool
|
||||
getFullValidatedRange(std::uint32_t& minVal, std::uint32_t& maxVal);
|
||||
|
||||
void
|
||||
tune(int size, std::chrono::seconds age);
|
||||
void
|
||||
sweep();
|
||||
float
|
||||
@@ -301,7 +299,7 @@ private:
|
||||
setPubLedger(std::shared_ptr<Ledger const> const& l);
|
||||
|
||||
void
|
||||
tryFill(Job& job, std::shared_ptr<Ledger const> ledger);
|
||||
tryFill(std::shared_ptr<Ledger const> ledger);
|
||||
|
||||
void
|
||||
getFetchPack(LedgerIndex missing, InboundLedger::Reason reason);
|
||||
@@ -326,7 +324,7 @@ private:
|
||||
findNewLedgersToPublish(std::unique_lock<std::recursive_mutex>&);
|
||||
|
||||
void
|
||||
updatePaths(Job& job);
|
||||
updatePaths();
|
||||
|
||||
// Returns true if work started. Always called with m_mutex locked.
|
||||
// The passed lock is a reminder to callers.
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_APP_LEDGER_LEDGERREPLAY_H_INCLUDED
|
||||
#define RIPPLE_APP_LEDGER_LEDGERREPLAY_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/CountedObject.h>
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
@@ -29,7 +30,7 @@ namespace ripple {
|
||||
class Ledger;
|
||||
class STTx;
|
||||
|
||||
class LedgerReplay
|
||||
class LedgerReplay : public CountedObject<LedgerReplay>
|
||||
{
|
||||
std::shared_ptr<Ledger const> parent_;
|
||||
std::shared_ptr<Ledger const> replay_;
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/ledger/OrderBookDB.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/core/Config.h>
|
||||
#include <ripple/core/JobQueue.h>
|
||||
@@ -28,70 +29,72 @@
|
||||
namespace ripple {
|
||||
|
||||
OrderBookDB::OrderBookDB(Application& app)
|
||||
: app_(app), mSeq(0), j_(app.journal("OrderBookDB"))
|
||||
: app_(app), seq_(0), j_(app.journal("OrderBookDB"))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
OrderBookDB::invalidate()
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
mSeq = 0;
|
||||
}
|
||||
|
||||
void
|
||||
OrderBookDB::setup(std::shared_ptr<ReadView const> const& ledger)
|
||||
{
|
||||
if (!app_.config().standalone() && app_.getOPs().isNeedNetworkLedger())
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
auto seq = ledger->info().seq;
|
||||
|
||||
// Do a full update every 256 ledgers
|
||||
if (mSeq != 0)
|
||||
{
|
||||
if (seq == mSeq)
|
||||
return;
|
||||
if ((seq > mSeq) && ((seq - mSeq) < 256))
|
||||
return;
|
||||
if ((seq < mSeq) && ((mSeq - seq) < 16))
|
||||
return;
|
||||
}
|
||||
|
||||
JLOG(j_.debug()) << "Advancing from " << mSeq << " to " << seq;
|
||||
|
||||
mSeq = seq;
|
||||
JLOG(j_.warn()) << "Eliding full order book update: no ledger";
|
||||
return;
|
||||
}
|
||||
|
||||
auto seq = seq_.load();
|
||||
|
||||
if (seq != 0)
|
||||
{
|
||||
if ((seq > ledger->seq()) && ((ledger->seq() - seq) < 25600))
|
||||
return;
|
||||
|
||||
if ((ledger->seq() <= seq) && ((seq - ledger->seq()) < 16))
|
||||
return;
|
||||
}
|
||||
|
||||
if (seq_.exchange(ledger->seq()) != seq)
|
||||
return;
|
||||
|
||||
JLOG(j_.debug()) << "Full order book update: " << seq << " to "
|
||||
<< ledger->seq();
|
||||
|
||||
if (app_.config().PATH_SEARCH_MAX != 0)
|
||||
{
|
||||
if (app_.config().standalone())
|
||||
update(ledger);
|
||||
else
|
||||
app_.getJobQueue().addJob(
|
||||
jtUPDATE_PF, "OrderBookDB::update", [this, ledger](Job&) {
|
||||
update(ledger);
|
||||
});
|
||||
jtUPDATE_PF,
|
||||
"OrderBookDB::update: " + std::to_string(ledger->seq()),
|
||||
[this, ledger]() { update(ledger); });
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
|
||||
{
|
||||
hash_set<uint256> seen;
|
||||
OrderBookDB::IssueToOrderBook destMap;
|
||||
OrderBookDB::IssueToOrderBook sourceMap;
|
||||
hash_set<Issue> XRPBooks;
|
||||
|
||||
JLOG(j_.debug()) << "OrderBookDB::update>";
|
||||
|
||||
if (app_.config().PATH_SEARCH_MAX == 0)
|
||||
return; // pathfinding has been disabled
|
||||
|
||||
// A newer full update job is pending
|
||||
if (auto const seq = seq_.load(); seq > ledger->seq())
|
||||
{
|
||||
// pathfinding has been disabled
|
||||
JLOG(j_.debug()) << "Eliding update for " << ledger->seq()
|
||||
<< " because of pending update to later " << seq;
|
||||
return;
|
||||
}
|
||||
|
||||
decltype(allBooks_) allBooks;
|
||||
decltype(xrpBooks_) xrpBooks;
|
||||
|
||||
allBooks.reserve(allBooks_.size());
|
||||
xrpBooks.reserve(xrpBooks_.size());
|
||||
|
||||
JLOG(j_.debug()) << "Beginning update (" << ledger->seq() << ")";
|
||||
|
||||
// walk through the entire ledger looking for orderbook entries
|
||||
int books = 0;
|
||||
int cnt = 0;
|
||||
|
||||
try
|
||||
{
|
||||
@@ -100,9 +103,8 @@ OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
|
||||
if (app_.isStopping())
|
||||
{
|
||||
JLOG(j_.info())
|
||||
<< "OrderBookDB::update exiting due to isStopping";
|
||||
std::lock_guard sl(mLock);
|
||||
mSeq = 0;
|
||||
<< "Update halted because the process is stopping";
|
||||
seq_.store(0);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -111,40 +113,38 @@ OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
|
||||
sle->getFieldH256(sfRootIndex) == sle->key())
|
||||
{
|
||||
Book book;
|
||||
|
||||
book.in.currency = sle->getFieldH160(sfTakerPaysCurrency);
|
||||
book.in.account = sle->getFieldH160(sfTakerPaysIssuer);
|
||||
book.out.account = sle->getFieldH160(sfTakerGetsIssuer);
|
||||
book.out.currency = sle->getFieldH160(sfTakerGetsCurrency);
|
||||
book.out.account = sle->getFieldH160(sfTakerGetsIssuer);
|
||||
|
||||
uint256 index = getBookBase(book);
|
||||
if (seen.insert(index).second)
|
||||
{
|
||||
auto orderBook = std::make_shared<OrderBook>(index, book);
|
||||
sourceMap[book.in].push_back(orderBook);
|
||||
destMap[book.out].push_back(orderBook);
|
||||
if (isXRP(book.out))
|
||||
XRPBooks.insert(book.in);
|
||||
++books;
|
||||
}
|
||||
allBooks[book.in].insert(book.out);
|
||||
|
||||
if (isXRP(book.out))
|
||||
xrpBooks.insert(book.in);
|
||||
|
||||
++cnt;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (SHAMapMissingNode const& mn)
|
||||
{
|
||||
JLOG(j_.info()) << "OrderBookDB::update: " << mn.what();
|
||||
std::lock_guard sl(mLock);
|
||||
mSeq = 0;
|
||||
JLOG(j_.info()) << "Missing node in " << ledger->seq()
|
||||
<< " during update: " << mn.what();
|
||||
seq_.store(0);
|
||||
return;
|
||||
}
|
||||
|
||||
JLOG(j_.debug()) << "OrderBookDB::update< " << books << " books found";
|
||||
JLOG(j_.debug()) << "Update completed (" << ledger->seq() << "): " << cnt
|
||||
<< " books found";
|
||||
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
mXRPBooks.swap(XRPBooks);
|
||||
mSourceMap.swap(sourceMap);
|
||||
mDestMap.swap(destMap);
|
||||
allBooks_.swap(allBooks);
|
||||
xrpBooks_.swap(xrpBooks);
|
||||
}
|
||||
|
||||
app_.getLedgerMaster().newOrderBookDB();
|
||||
}
|
||||
|
||||
@@ -152,60 +152,50 @@ void
|
||||
OrderBookDB::addOrderBook(Book const& book)
|
||||
{
|
||||
bool toXRP = isXRP(book.out);
|
||||
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
if (toXRP)
|
||||
{
|
||||
// We don't want to search through all the to-XRP or from-XRP order
|
||||
// books!
|
||||
for (auto ob : mSourceMap[book.in])
|
||||
{
|
||||
if (isXRP(ob->getCurrencyOut())) // also to XRP
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto ob : mDestMap[book.out])
|
||||
{
|
||||
if (ob->getCurrencyIn() == book.in.currency &&
|
||||
ob->getIssuerIn() == book.in.account)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
uint256 index = getBookBase(book);
|
||||
auto orderBook = std::make_shared<OrderBook>(index, book);
|
||||
allBooks_[book.in].insert(book.out);
|
||||
|
||||
mSourceMap[book.in].push_back(orderBook);
|
||||
mDestMap[book.out].push_back(orderBook);
|
||||
if (toXRP)
|
||||
mXRPBooks.insert(book.in);
|
||||
xrpBooks_.insert(book.in);
|
||||
}
|
||||
|
||||
// return list of all orderbooks that want this issuerID and currencyID
|
||||
OrderBook::List
|
||||
std::vector<Book>
|
||||
OrderBookDB::getBooksByTakerPays(Issue const& issue)
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
auto it = mSourceMap.find(issue);
|
||||
return it == mSourceMap.end() ? OrderBook::List() : it->second;
|
||||
std::vector<Book> ret;
|
||||
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
if (auto it = allBooks_.find(issue); it != allBooks_.end())
|
||||
{
|
||||
ret.reserve(it->second.size());
|
||||
|
||||
for (auto const& gets : it->second)
|
||||
ret.push_back(Book(issue, gets));
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
OrderBookDB::getBookSize(Issue const& issue)
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
auto it = mSourceMap.find(issue);
|
||||
return it == mSourceMap.end() ? 0 : it->second.size();
|
||||
if (auto it = allBooks_.find(issue); it != allBooks_.end())
|
||||
return static_cast<int>(it->second.size());
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
OrderBookDB::isBookToXRP(Issue const& issue)
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
return mXRPBooks.count(issue) > 0;
|
||||
return xrpBooks_.count(issue) > 0;
|
||||
}
|
||||
|
||||
BookListeners::pointer
|
||||
@@ -247,63 +237,49 @@ OrderBookDB::processTxn(
|
||||
Json::Value const& jvObj)
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
if (alTx.getResult() == tesSUCCESS)
|
||||
|
||||
// For this particular transaction, maintain the set of unique
|
||||
// subscriptions that have already published it. This prevents sending
|
||||
// the transaction multiple times if it touches multiple ltOFFER
|
||||
// entries for the same book, or if it touches multiple books and a
|
||||
// single client has subscribed to those books.
|
||||
hash_set<std::uint64_t> havePublished;
|
||||
|
||||
for (auto const& node : alTx.getMeta().getNodes())
|
||||
{
|
||||
// For this particular transaction, maintain the set of unique
|
||||
// subscriptions that have already published it. This prevents sending
|
||||
// the transaction multiple times if it touches multiple ltOFFER
|
||||
// entries for the same book, or if it touches multiple books and a
|
||||
// single client has subscribed to those books.
|
||||
hash_set<std::uint64_t> havePublished;
|
||||
|
||||
// Check if this is an offer or an offer cancel or a payment that
|
||||
// consumes an offer.
|
||||
// Check to see what the meta looks like.
|
||||
for (auto& node : alTx.getMeta()->getNodes())
|
||||
try
|
||||
{
|
||||
try
|
||||
if (node.getFieldU16(sfLedgerEntryType) == ltOFFER)
|
||||
{
|
||||
if (node.getFieldU16(sfLedgerEntryType) == ltOFFER)
|
||||
{
|
||||
SField const* field = nullptr;
|
||||
|
||||
// We need a field that contains the TakerGets and TakerPays
|
||||
// parameters.
|
||||
if (node.getFName() == sfModifiedNode)
|
||||
field = &sfPreviousFields;
|
||||
else if (node.getFName() == sfCreatedNode)
|
||||
field = &sfNewFields;
|
||||
else if (node.getFName() == sfDeletedNode)
|
||||
field = &sfFinalFields;
|
||||
|
||||
if (field)
|
||||
auto process = [&, this](SField const& field) {
|
||||
if (auto data = dynamic_cast<STObject const*>(
|
||||
node.peekAtPField(field));
|
||||
data && data->isFieldPresent(sfTakerPays) &&
|
||||
data->isFieldPresent(sfTakerGets))
|
||||
{
|
||||
auto data = dynamic_cast<const STObject*>(
|
||||
node.peekAtPField(*field));
|
||||
|
||||
if (data && data->isFieldPresent(sfTakerPays) &&
|
||||
data->isFieldPresent(sfTakerGets))
|
||||
{
|
||||
// determine the OrderBook
|
||||
Book b{
|
||||
data->getFieldAmount(sfTakerGets).issue(),
|
||||
data->getFieldAmount(sfTakerPays).issue()};
|
||||
|
||||
auto listeners = getBookListeners(b);
|
||||
if (listeners)
|
||||
{
|
||||
listeners->publish(jvObj, havePublished);
|
||||
}
|
||||
}
|
||||
auto listeners = getBookListeners(
|
||||
{data->getFieldAmount(sfTakerGets).issue(),
|
||||
data->getFieldAmount(sfTakerPays).issue()});
|
||||
if (listeners)
|
||||
listeners->publish(jvObj, havePublished);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
JLOG(j_.info())
|
||||
<< "Fields not found in OrderBookDB::processTxn";
|
||||
};
|
||||
|
||||
// We need a field that contains the TakerGets and TakerPays
|
||||
// parameters.
|
||||
if (node.getFName() == sfModifiedNode)
|
||||
process(sfPreviousFields);
|
||||
else if (node.getFName() == sfCreatedNode)
|
||||
process(sfNewFields);
|
||||
else if (node.getFName() == sfDeletedNode)
|
||||
process(sfFinalFields);
|
||||
}
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
{
|
||||
JLOG(j_.info())
|
||||
<< "processTxn: field not found (" << ex.what() << ")";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include <ripple/app/ledger/AcceptedLedgerTx.h>
|
||||
#include <ripple/app/ledger/BookListeners.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/misc/OrderBook.h>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
@@ -37,15 +36,13 @@ public:
|
||||
setup(std::shared_ptr<ReadView const> const& ledger);
|
||||
void
|
||||
update(std::shared_ptr<ReadView const> const& ledger);
|
||||
void
|
||||
invalidate();
|
||||
|
||||
void
|
||||
addOrderBook(Book const&);
|
||||
|
||||
/** @return a list of all orderbooks that want this issuerID and currencyID.
|
||||
*/
|
||||
OrderBook::List
|
||||
std::vector<Book>
|
||||
getBooksByTakerPays(Issue const&);
|
||||
|
||||
/** @return a count of all orderbooks that want this issuerID and
|
||||
@@ -68,22 +65,14 @@ public:
|
||||
const AcceptedLedgerTx& alTx,
|
||||
Json::Value const& jvObj);
|
||||
|
||||
using IssueToOrderBook = hash_map<Issue, OrderBook::List>;
|
||||
|
||||
private:
|
||||
void
|
||||
rawAddBook(Book const&);
|
||||
|
||||
Application& app_;
|
||||
|
||||
// by ci/ii
|
||||
IssueToOrderBook mSourceMap;
|
||||
|
||||
// by co/io
|
||||
IssueToOrderBook mDestMap;
|
||||
// Maps order books by "issue in" to "issue out":
|
||||
hardened_hash_map<Issue, hardened_hash_set<Issue>> allBooks_;
|
||||
|
||||
// does an order book to XRP exist
|
||||
hash_set<Issue> mXRPBooks;
|
||||
hash_set<Issue> xrpBooks_;
|
||||
|
||||
std::recursive_mutex mLock;
|
||||
|
||||
@@ -91,7 +80,7 @@ private:
|
||||
|
||||
BookToListenersMap mListeners;
|
||||
|
||||
std::uint32_t mSeq;
|
||||
std::atomic<std::uint32_t> seq_;
|
||||
|
||||
beast::Journal const j_;
|
||||
};
|
||||
|
||||
@@ -162,7 +162,7 @@ There are also indirect peer queries. If there have been timeouts while
|
||||
acquiring ledger data then a server may issue indirect queries. In that
|
||||
case the server receiving the indirect query passes the query along to any
|
||||
of its peers that may have the requested data. This is important if the
|
||||
network has a byzantine failure. If also helps protect the validation
|
||||
network has a byzantine failure. It also helps protect the validation
|
||||
network. A validator may need to get a peer set from one of the other
|
||||
validators, and indirect queries improve the likelihood of success with
|
||||
that.
|
||||
@@ -487,4 +487,3 @@ ledger(s) for missing nodes in the back end node store
|
||||
---
|
||||
|
||||
# References #
|
||||
|
||||
|
||||
@@ -33,7 +33,10 @@
|
||||
#include <ripple/resource/Fees.h>
|
||||
#include <ripple/shamap/SHAMapNodeID.h>
|
||||
|
||||
#include <boost/iterator/function_output_iterator.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <random>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -41,19 +44,19 @@ using namespace std::chrono_literals;
|
||||
|
||||
enum {
|
||||
// Number of peers to start with
|
||||
peerCountStart = 4
|
||||
peerCountStart = 5
|
||||
|
||||
// Number of peers to add on a timeout
|
||||
,
|
||||
peerCountAdd = 2
|
||||
peerCountAdd = 3
|
||||
|
||||
// how many timeouts before we give up
|
||||
,
|
||||
ledgerTimeoutRetriesMax = 10
|
||||
ledgerTimeoutRetriesMax = 6
|
||||
|
||||
// how many timeouts before we get aggressive
|
||||
,
|
||||
ledgerBecomeAggressiveThreshold = 6
|
||||
ledgerBecomeAggressiveThreshold = 4
|
||||
|
||||
// Number of nodes to find initially
|
||||
,
|
||||
@@ -65,11 +68,11 @@ enum {
|
||||
|
||||
// Number of nodes to request blindly
|
||||
,
|
||||
reqNodes = 8
|
||||
reqNodes = 12
|
||||
};
|
||||
|
||||
// millisecond for each ledger timeout
|
||||
auto constexpr ledgerAcquireTimeout = 2500ms;
|
||||
auto constexpr ledgerAcquireTimeout = 3000ms;
|
||||
|
||||
InboundLedger::InboundLedger(
|
||||
Application& app,
|
||||
@@ -527,7 +530,7 @@ InboundLedger::done()
|
||||
|
||||
// We hold the PeerSet lock, so must dispatch
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()](Job&) {
|
||||
jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() {
|
||||
if (self->complete_ && !self->failed_)
|
||||
{
|
||||
self->app_.getLedgerMaster().checkAccept(self->getLedger());
|
||||
@@ -601,7 +604,7 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
tmBH.set_ledgerhash(hash_.begin(), hash_.size());
|
||||
for (auto const& p : need)
|
||||
{
|
||||
JLOG(journal_.warn()) << "Want: " << p.second;
|
||||
JLOG(journal_.debug()) << "Want: " << p.second;
|
||||
|
||||
if (!typeSet)
|
||||
{
|
||||
@@ -952,22 +955,23 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
|
||||
|
||||
try
|
||||
{
|
||||
auto const f = filter.get();
|
||||
|
||||
for (auto const& node : packet.nodes())
|
||||
{
|
||||
auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
|
||||
|
||||
if (!nodeID)
|
||||
{
|
||||
san.incInvalid();
|
||||
return;
|
||||
}
|
||||
throw std::runtime_error("data does not properly deserialize");
|
||||
|
||||
if (nodeID->isRoot())
|
||||
san += map.addRootNode(
|
||||
rootHash, makeSlice(node.nodedata()), filter.get());
|
||||
{
|
||||
san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f);
|
||||
}
|
||||
else
|
||||
san += map.addKnownNode(
|
||||
*nodeID, makeSlice(node.nodedata()), filter.get());
|
||||
{
|
||||
san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f);
|
||||
}
|
||||
|
||||
if (!san.isGood())
|
||||
{
|
||||
@@ -1120,19 +1124,19 @@ InboundLedger::processData(
|
||||
std::shared_ptr<Peer> peer,
|
||||
protocol::TMLedgerData& packet)
|
||||
{
|
||||
ScopedLockType sl(mtx_);
|
||||
|
||||
if (packet.type() == protocol::liBASE)
|
||||
{
|
||||
if (packet.nodes_size() < 1)
|
||||
if (packet.nodes().empty())
|
||||
{
|
||||
JLOG(journal_.warn()) << "Got empty header data";
|
||||
JLOG(journal_.warn()) << peer->id() << ": empty header data";
|
||||
peer->charge(Resource::feeInvalidRequest);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SHAMapAddNode san;
|
||||
|
||||
ScopedLockType sl(mtx_);
|
||||
|
||||
try
|
||||
{
|
||||
if (!mHaveHeader)
|
||||
@@ -1177,13 +1181,18 @@ InboundLedger::processData(
|
||||
if ((packet.type() == protocol::liTX_NODE) ||
|
||||
(packet.type() == protocol::liAS_NODE))
|
||||
{
|
||||
if (packet.nodes().size() == 0)
|
||||
std::string type = packet.type() == protocol::liTX_NODE ? "liTX_NODE: "
|
||||
: "liAS_NODE: ";
|
||||
|
||||
if (packet.nodes().empty())
|
||||
{
|
||||
JLOG(journal_.info()) << "Got response with no nodes";
|
||||
JLOG(journal_.info()) << peer->id() << ": response with no nodes";
|
||||
peer->charge(Resource::feeInvalidRequest);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ScopedLockType sl(mtx_);
|
||||
|
||||
// Verify node IDs and data are complete
|
||||
for (auto const& node : packet.nodes())
|
||||
{
|
||||
@@ -1198,14 +1207,10 @@ InboundLedger::processData(
|
||||
SHAMapAddNode san;
|
||||
receiveNode(packet, san);
|
||||
|
||||
if (packet.type() == protocol::liTX_NODE)
|
||||
{
|
||||
JLOG(journal_.debug()) << "Ledger TX node stats: " << san.get();
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.debug()) << "Ledger AS node stats: " << san.get();
|
||||
}
|
||||
JLOG(journal_.debug())
|
||||
<< "Ledger "
|
||||
<< ((packet.type() == protocol::liTX_NODE) ? "TX" : "AS")
|
||||
<< " node stats: " << san.get();
|
||||
|
||||
if (san.isUseful())
|
||||
progress_ = true;
|
||||
@@ -1217,20 +1222,100 @@ InboundLedger::processData(
|
||||
return -1;
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
// Track the amount of useful data that each peer returns
|
||||
struct PeerDataCounts
|
||||
{
|
||||
// Map from peer to amount of useful the peer returned
|
||||
std::unordered_map<std::shared_ptr<Peer>, int> counts;
|
||||
// The largest amount of useful data that any peer returned
|
||||
int maxCount = 0;
|
||||
|
||||
// Update the data count for a peer
|
||||
void
|
||||
update(std::shared_ptr<Peer>&& peer, int dataCount)
|
||||
{
|
||||
if (dataCount <= 0)
|
||||
return;
|
||||
maxCount = std::max(maxCount, dataCount);
|
||||
auto i = counts.find(peer);
|
||||
if (i == counts.end())
|
||||
{
|
||||
counts.emplace(std::move(peer), dataCount);
|
||||
return;
|
||||
}
|
||||
i->second = std::max(i->second, dataCount);
|
||||
}
|
||||
|
||||
// Prune all the peers that didn't return enough data.
|
||||
void
|
||||
prune()
|
||||
{
|
||||
// Remove all the peers that didn't return at least half as much data as
|
||||
// the best peer
|
||||
auto const thresh = maxCount / 2;
|
||||
auto i = counts.begin();
|
||||
while (i != counts.end())
|
||||
{
|
||||
if (i->second < thresh)
|
||||
i = counts.erase(i);
|
||||
else
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
// call F with the `peer` parameter with a random sample of at most n values
|
||||
// of the counts vector.
|
||||
template <class F>
|
||||
void
|
||||
sampleN(std::size_t n, F&& f)
|
||||
{
|
||||
if (counts.empty())
|
||||
return;
|
||||
|
||||
auto outFunc = [&f](auto&& v) { f(v.first); };
|
||||
std::minstd_rand rng{std::random_device{}()};
|
||||
#if _MSC_VER
|
||||
std::vector<std::pair<std::shared_ptr<Peer>, int>> s;
|
||||
s.reserve(n);
|
||||
std::sample(
|
||||
counts.begin(), counts.end(), std::back_inserter(s), n, rng);
|
||||
for (auto& v : s)
|
||||
{
|
||||
outFunc(v);
|
||||
}
|
||||
#else
|
||||
std::sample(
|
||||
counts.begin(),
|
||||
counts.end(),
|
||||
boost::make_function_output_iterator(outFunc),
|
||||
n,
|
||||
rng);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
/** Process pending TMLedgerData
|
||||
Query the 'best' peer
|
||||
Query the a random sample of the 'best' peers
|
||||
*/
|
||||
void
|
||||
InboundLedger::runData()
|
||||
{
|
||||
std::shared_ptr<Peer> chosenPeer;
|
||||
int chosenPeerCount = -1;
|
||||
// Maximum number of peers to request data from
|
||||
constexpr std::size_t maxUsefulPeers = 6;
|
||||
|
||||
std::vector<PeerDataPairType> data;
|
||||
decltype(mReceivedData) data;
|
||||
|
||||
// Reserve some memory so the first couple iterations don't reallocate
|
||||
data.reserve(8);
|
||||
|
||||
detail::PeerDataCounts dataCounts;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
data.clear();
|
||||
|
||||
{
|
||||
std::lock_guard sl(mReceivedDataLock);
|
||||
|
||||
@@ -1243,24 +1328,22 @@ InboundLedger::runData()
|
||||
data.swap(mReceivedData);
|
||||
}
|
||||
|
||||
// Select the peer that gives us the most nodes that are useful,
|
||||
// breaking ties in favor of the peer that responded first.
|
||||
for (auto& entry : data)
|
||||
{
|
||||
if (auto peer = entry.first.lock())
|
||||
{
|
||||
int count = processData(peer, *(entry.second));
|
||||
if (count > chosenPeerCount)
|
||||
{
|
||||
chosenPeerCount = count;
|
||||
chosenPeer = std::move(peer);
|
||||
}
|
||||
dataCounts.update(std::move(peer), count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (chosenPeer)
|
||||
trigger(chosenPeer, TriggerReason::reply);
|
||||
// Select a random sample of the peers that gives us the most nodes that are
|
||||
// useful
|
||||
dataCounts.prune();
|
||||
dataCounts.sampleN(maxUsefulPeers, [&](std::shared_ptr<Peer> const& peer) {
|
||||
trigger(peer, TriggerReason::reply);
|
||||
});
|
||||
}
|
||||
|
||||
Json::Value
|
||||
|
||||
@@ -74,6 +74,12 @@ public:
|
||||
reason != InboundLedger::Reason::SHARD ||
|
||||
(seq != 0 && app_.getShardStore()));
|
||||
|
||||
// probably not the right rule
|
||||
if (app_.getOPs().isNeedNetworkLedger() &&
|
||||
(reason != InboundLedger::Reason::GENERIC) &&
|
||||
(reason != InboundLedger::Reason::CONSENSUS))
|
||||
return {};
|
||||
|
||||
bool isNew = true;
|
||||
std::shared_ptr<InboundLedger> inbound;
|
||||
{
|
||||
@@ -82,6 +88,7 @@ public:
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
auto it = mLedgers.find(hash);
|
||||
if (it != mLedgers.end())
|
||||
{
|
||||
@@ -183,7 +190,7 @@ public:
|
||||
// dispatch
|
||||
if (ledger->gotData(std::weak_ptr<Peer>(peer), packet))
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "processLedgerData", [ledger](Job&) {
|
||||
jtLEDGER_DATA, "processLedgerData", [ledger]() {
|
||||
ledger->runData();
|
||||
});
|
||||
|
||||
@@ -198,7 +205,7 @@ public:
|
||||
if (packet->type() == protocol::liAS_NODE)
|
||||
{
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "gotStaleData", [this, packet](Job&) {
|
||||
jtLEDGER_DATA, "gotStaleData", [this, packet]() {
|
||||
gotStaleData(packet);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -71,6 +71,7 @@ public:
|
||||
, m_zeroSet(m_map[uint256()])
|
||||
, m_gotSet(std::move(gotSet))
|
||||
, m_peerSetBuilder(std::move(peerSetBuilder))
|
||||
, j_(app_.journal("InboundTransactions"))
|
||||
{
|
||||
m_zeroSet.mSet = std::make_shared<SHAMap>(
|
||||
SHAMapType::TRANSACTION, uint256(), app_.getNodeFamily());
|
||||
@@ -99,9 +100,7 @@ public:
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
auto it = m_map.find(hash);
|
||||
|
||||
if (it != m_map.end())
|
||||
if (auto it = m_map.find(hash); it != m_map.end())
|
||||
{
|
||||
if (acquire)
|
||||
{
|
||||
@@ -140,11 +139,8 @@ public:
|
||||
{
|
||||
protocol::TMLedgerData& packet = *packet_ptr;
|
||||
|
||||
JLOG(app_.journal("InboundLedger").trace())
|
||||
<< "Got data (" << packet.nodes().size()
|
||||
<< ") "
|
||||
"for acquiring ledger: "
|
||||
<< hash;
|
||||
JLOG(j_.trace()) << "Got data (" << packet.nodes().size()
|
||||
<< ") for acquiring ledger: " << hash;
|
||||
|
||||
TransactionAcquire::pointer ta = getAcquire(hash);
|
||||
|
||||
@@ -154,8 +150,9 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
std::list<SHAMapNodeID> nodeIDs;
|
||||
std::list<Blob> nodeData;
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> data;
|
||||
data.reserve(packet.nodes().size());
|
||||
|
||||
for (auto const& node : packet.nodes())
|
||||
{
|
||||
if (!node.has_nodeid() || !node.has_nodedata())
|
||||
@@ -172,12 +169,10 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
nodeIDs.emplace_back(*id);
|
||||
nodeData.emplace_back(
|
||||
node.nodedata().begin(), node.nodedata().end());
|
||||
data.emplace_back(std::make_pair(*id, makeSlice(node.nodedata())));
|
||||
}
|
||||
|
||||
if (!ta->takeNodes(nodeIDs, nodeData, peer).isUseful())
|
||||
if (!ta->takeNodes(data, peer).isUseful())
|
||||
peer->charge(Resource::feeUnwantedData);
|
||||
}
|
||||
|
||||
@@ -262,6 +257,8 @@ private:
|
||||
std::function<void(std::shared_ptr<SHAMap> const&, bool)> m_gotSet;
|
||||
|
||||
std::unique_ptr<PeerSetBuilder> m_peerSetBuilder;
|
||||
|
||||
beast::Journal j_;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -240,7 +240,7 @@ LedgerDeltaAcquire::onLedgerBuilt(
|
||||
app_.getJobQueue().addJob(
|
||||
jtREPLAY_TASK,
|
||||
"onLedgerBuilt",
|
||||
[=, ledger = this->fullLedger_, &app = this->app_](Job&) {
|
||||
[=, ledger = this->fullLedger_, &app = this->app_]() {
|
||||
for (auto reason : reasons)
|
||||
{
|
||||
switch (reason)
|
||||
|
||||
@@ -34,8 +34,7 @@
|
||||
#include <ripple/app/misc/TxQ.h>
|
||||
#include <ripple/app/misc/ValidatorList.h>
|
||||
#include <ripple/app/paths/PathRequests.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_postgres.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfacePostgres.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/tx/apply.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/MathUtilities.h>
|
||||
@@ -261,8 +260,13 @@ LedgerMaster::getPublishedLedgerAge()
|
||||
std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
|
||||
ret -= pubClose;
|
||||
ret = (ret > 0s) ? ret : 0s;
|
||||
static std::chrono::seconds lastRet = -1s;
|
||||
|
||||
JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
|
||||
if (ret != lastRet)
|
||||
{
|
||||
JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
|
||||
lastRet = ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -273,10 +277,10 @@ LedgerMaster::getValidatedLedgerAge()
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
return static_cast<RelationalDBInterfacePostgres*>(
|
||||
&app_.getRelationalDBInterface())
|
||||
return static_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->getValidatedLedgerAge();
|
||||
#endif
|
||||
|
||||
std::chrono::seconds valClose{mValidLedgerSign.load()};
|
||||
if (valClose == 0s)
|
||||
{
|
||||
@@ -287,8 +291,13 @@ LedgerMaster::getValidatedLedgerAge()
|
||||
std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
|
||||
ret -= valClose;
|
||||
ret = (ret > 0s) ? ret : 0s;
|
||||
static std::chrono::seconds lastRet = -1s;
|
||||
|
||||
JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
|
||||
if (ret != lastRet)
|
||||
{
|
||||
JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
|
||||
lastRet = ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -299,8 +308,7 @@ LedgerMaster::isCaughtUp(std::string& reason)
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
return static_cast<RelationalDBInterfacePostgres*>(
|
||||
&app_.getRelationalDBInterface())
|
||||
return static_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->isCaughtUp(reason);
|
||||
#endif
|
||||
|
||||
@@ -699,7 +707,7 @@ LedgerMaster::getEarliestFetch()
|
||||
}
|
||||
|
||||
void
|
||||
LedgerMaster::tryFill(Job& job, std::shared_ptr<Ledger const> ledger)
|
||||
LedgerMaster::tryFill(std::shared_ptr<Ledger const> ledger)
|
||||
{
|
||||
std::uint32_t seq = ledger->info().seq;
|
||||
uint256 prevHash = ledger->info().parentHash;
|
||||
@@ -710,7 +718,7 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr<Ledger const> ledger)
|
||||
std::uint32_t maxHas = seq;
|
||||
|
||||
NodeStore::Database& nodeStore{app_.getNodeStore()};
|
||||
while (!job.shouldCancel() && seq > 0)
|
||||
while (!app_.getJobQueue().isStopping() && seq > 0)
|
||||
{
|
||||
{
|
||||
std::lock_guard ml(m_mutex);
|
||||
@@ -733,7 +741,7 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr<Ledger const> ledger)
|
||||
mCompleteLedgers.insert(range(minHas, maxHas));
|
||||
}
|
||||
maxHas = minHas;
|
||||
ledgerHashes = app_.getRelationalDBInterface().getHashesByIndex(
|
||||
ledgerHashes = app_.getRelationalDatabase().getHashesByIndex(
|
||||
(seq < 500) ? 0 : (seq - 499), seq);
|
||||
it = ledgerHashes.find(seq);
|
||||
|
||||
@@ -917,8 +925,8 @@ LedgerMaster::setFullLedger(
|
||||
{
|
||||
// Check the SQL database's entry for the sequence before this
|
||||
// ledger, if it's not this ledger's parent, invalidate it
|
||||
uint256 prevHash = app_.getRelationalDBInterface().getHashByIndex(
|
||||
ledger->info().seq - 1);
|
||||
uint256 prevHash =
|
||||
app_.getRelationalDatabase().getHashByIndex(ledger->info().seq - 1);
|
||||
if (prevHash.isNonZero() && prevHash != ledger->info().parentHash)
|
||||
clearLedger(ledger->info().seq - 1);
|
||||
}
|
||||
@@ -1453,7 +1461,7 @@ LedgerMaster::tryAdvance()
|
||||
if (!mAdvanceThread && !mValidLedger.empty())
|
||||
{
|
||||
mAdvanceThread = true;
|
||||
app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this](Job&) {
|
||||
app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
|
||||
std::unique_lock sl(m_mutex);
|
||||
|
||||
assert(!mValidLedger.empty() && mAdvanceThread);
|
||||
@@ -1476,19 +1484,21 @@ LedgerMaster::tryAdvance()
|
||||
}
|
||||
|
||||
void
|
||||
LedgerMaster::updatePaths(Job& job)
|
||||
LedgerMaster::updatePaths()
|
||||
{
|
||||
{
|
||||
std::lock_guard ml(m_mutex);
|
||||
if (app_.getOPs().isNeedNetworkLedger())
|
||||
{
|
||||
--mPathFindThread;
|
||||
JLOG(m_journal.debug()) << "Need network ledger for updating paths";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
while (!job.shouldCancel())
|
||||
while (!app_.getJobQueue().isStopping())
|
||||
{
|
||||
JLOG(m_journal.debug()) << "updatePaths running";
|
||||
std::shared_ptr<ReadView const> lastLedger;
|
||||
{
|
||||
std::lock_guard ml(m_mutex);
|
||||
@@ -1506,6 +1516,7 @@ LedgerMaster::updatePaths(Job& job)
|
||||
else
|
||||
{ // Nothing to do
|
||||
--mPathFindThread;
|
||||
JLOG(m_journal.debug()) << "Nothing to do for updating paths";
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1527,8 +1538,31 @@ LedgerMaster::updatePaths(Job& job)
|
||||
|
||||
try
|
||||
{
|
||||
app_.getPathRequests().updateAll(
|
||||
lastLedger, job.getCancelCallback());
|
||||
auto& pathRequests = app_.getPathRequests();
|
||||
{
|
||||
std::lock_guard ml(m_mutex);
|
||||
if (!pathRequests.requestsPending())
|
||||
{
|
||||
--mPathFindThread;
|
||||
JLOG(m_journal.debug())
|
||||
<< "No path requests found. Nothing to do for updating "
|
||||
"paths. "
|
||||
<< mPathFindThread << " jobs remaining";
|
||||
return;
|
||||
}
|
||||
}
|
||||
JLOG(m_journal.debug()) << "Updating paths";
|
||||
pathRequests.updateAll(lastLedger);
|
||||
|
||||
std::lock_guard ml(m_mutex);
|
||||
if (!pathRequests.requestsPending())
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "No path requests left. No need for further updating "
|
||||
"paths";
|
||||
--mPathFindThread;
|
||||
return;
|
||||
}
|
||||
}
|
||||
catch (SHAMapMissingNode const& mn)
|
||||
{
|
||||
@@ -1588,10 +1622,14 @@ LedgerMaster::newPFWork(
|
||||
const char* name,
|
||||
std::unique_lock<std::recursive_mutex>&)
|
||||
{
|
||||
if (mPathFindThread < 2)
|
||||
if (!app_.isStopping() && mPathFindThread < 2 &&
|
||||
app_.getPathRequests().requestsPending())
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "newPFWork: Creating job. path find threads: "
|
||||
<< mPathFindThread;
|
||||
if (app_.getJobQueue().addJob(
|
||||
jtUPDATE_PF, name, [this](Job& j) { updatePaths(j); }))
|
||||
jtUPDATE_PF, name, [this]() { updatePaths(); }))
|
||||
{
|
||||
++mPathFindThread;
|
||||
}
|
||||
@@ -1624,7 +1662,7 @@ LedgerMaster::getValidatedLedger()
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
auto seq = app_.getRelationalDBInterface().getMaxLedgerSeq();
|
||||
auto seq = app_.getRelationalDatabase().getMaxLedgerSeq();
|
||||
if (!seq)
|
||||
return {};
|
||||
return getLedgerBySeq(*seq);
|
||||
@@ -1660,8 +1698,7 @@ LedgerMaster::getCompleteLedgers()
|
||||
{
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
return static_cast<RelationalDBInterfacePostgres*>(
|
||||
&app_.getRelationalDBInterface())
|
||||
return static_cast<PostgresDatabase*>(&app_.getRelationalDatabase())
|
||||
->getCompleteLedgers();
|
||||
#endif
|
||||
std::lock_guard sl(mCompleteLock);
|
||||
@@ -1706,7 +1743,7 @@ LedgerMaster::getHashBySeq(std::uint32_t index)
|
||||
if (hash.isNonZero())
|
||||
return hash;
|
||||
|
||||
return app_.getRelationalDBInterface().getHashByIndex(index);
|
||||
return app_.getRelationalDatabase().getHashByIndex(index);
|
||||
}
|
||||
|
||||
std::optional<LedgerHash>
|
||||
@@ -1829,12 +1866,6 @@ LedgerMaster::setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
|
||||
mCompleteLedgers.insert(range(minV, maxV));
|
||||
}
|
||||
|
||||
void
|
||||
LedgerMaster::tune(int size, std::chrono::seconds age)
|
||||
{
|
||||
mLedgerHistory.tune(size, age);
|
||||
}
|
||||
|
||||
void
|
||||
LedgerMaster::sweep()
|
||||
{
|
||||
@@ -1933,7 +1964,7 @@ LedgerMaster::fetchForHistory(
|
||||
fillInProgress = mFillInProgress;
|
||||
}
|
||||
if (fillInProgress == 0 &&
|
||||
app_.getRelationalDBInterface().getHashByIndex(seq - 1) ==
|
||||
app_.getRelationalDatabase().getHashByIndex(seq - 1) ==
|
||||
ledger->info().parentHash)
|
||||
{
|
||||
{
|
||||
@@ -1942,8 +1973,8 @@ LedgerMaster::fetchForHistory(
|
||||
mFillInProgress = seq;
|
||||
}
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE, "tryFill", [this, ledger](Job& j) {
|
||||
tryFill(j, ledger);
|
||||
jtADVANCE, "tryFill", [this, ledger]() {
|
||||
tryFill(ledger);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -2075,7 +2106,7 @@ LedgerMaster::doAdvance(std::unique_lock<std::recursive_mutex>& sl)
|
||||
{
|
||||
JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
|
||||
<< " ledgers to publish";
|
||||
for (auto ledger : pubLedgers)
|
||||
for (auto const& ledger : pubLedgers)
|
||||
{
|
||||
{
|
||||
ScopedUnlock sul{sl};
|
||||
@@ -2124,7 +2155,7 @@ LedgerMaster::gotFetchPack(bool progress, std::uint32_t seq)
|
||||
{
|
||||
if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire))
|
||||
{
|
||||
app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&](Job&) {
|
||||
app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() {
|
||||
app_.getInboundLedgers().gotFetchPack();
|
||||
mGotFetchPackThread.clear(std::memory_order_release);
|
||||
});
|
||||
@@ -2329,7 +2360,7 @@ LedgerMaster::getFetchPackCacheSize() const
|
||||
std::optional<LedgerIndex>
|
||||
LedgerMaster::minSqlSeq()
|
||||
{
|
||||
return app_.getRelationalDBInterface().getMinLedgerSeq();
|
||||
return app_.getRelationalDatabase().getMinLedgerSeq();
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -83,7 +83,7 @@ TimeoutCounter::queueJob(ScopedLockType& sl)
|
||||
app_.getJobQueue().addJob(
|
||||
queueJobParameter_.jobType,
|
||||
queueJobParameter_.jobName,
|
||||
[wptr = pmDowncast()](Job&) {
|
||||
[wptr = pmDowncast()]() {
|
||||
if (auto sptr = wptr.lock(); sptr)
|
||||
sptr->invokeOnTimer();
|
||||
});
|
||||
|
||||
@@ -65,7 +65,7 @@ TransactionAcquire::done()
|
||||
|
||||
if (failed_)
|
||||
{
|
||||
JLOG(journal_.warn()) << "Failed to acquire TX set " << hash_;
|
||||
JLOG(journal_.debug()) << "Failed to acquire TX set " << hash_;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -81,7 +81,7 @@ TransactionAcquire::done()
|
||||
// just updates the consensus and related structures when we acquire
|
||||
// a transaction set. No need to update them if we're shutting down.
|
||||
app_.getJobQueue().addJob(
|
||||
jtTXN_DATA, "completeAcquire", [pap, hash, map](Job&) {
|
||||
jtTXN_DATA, "completeAcquire", [pap, hash, map]() {
|
||||
pap->getInboundTransactions().giveSet(hash, map, true);
|
||||
});
|
||||
}
|
||||
@@ -176,8 +176,7 @@ TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
|
||||
|
||||
SHAMapAddNode
|
||||
TransactionAcquire::takeNodes(
|
||||
const std::list<SHAMapNodeID>& nodeIDs,
|
||||
const std::list<Blob>& data,
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> const& data,
|
||||
std::shared_ptr<Peer> const& peer)
|
||||
{
|
||||
ScopedLockType sl(mtx_);
|
||||
@@ -196,24 +195,20 @@ TransactionAcquire::takeNodes(
|
||||
|
||||
try
|
||||
{
|
||||
if (nodeIDs.empty())
|
||||
if (data.empty())
|
||||
return SHAMapAddNode::invalid();
|
||||
|
||||
std::list<SHAMapNodeID>::const_iterator nodeIDit = nodeIDs.begin();
|
||||
std::list<Blob>::const_iterator nodeDatait = data.begin();
|
||||
ConsensusTransSetSF sf(app_, app_.getTempNodeCache());
|
||||
|
||||
while (nodeIDit != nodeIDs.end())
|
||||
for (auto const& d : data)
|
||||
{
|
||||
if (nodeIDit->isRoot())
|
||||
if (d.first.isRoot())
|
||||
{
|
||||
if (mHaveRoot)
|
||||
JLOG(journal_.debug())
|
||||
<< "Got root TXS node, already have it";
|
||||
else if (!mMap->addRootNode(
|
||||
SHAMapHash{hash_},
|
||||
makeSlice(*nodeDatait),
|
||||
nullptr)
|
||||
SHAMapHash{hash_}, d.second, nullptr)
|
||||
.isGood())
|
||||
{
|
||||
JLOG(journal_.warn()) << "TX acquire got bad root node";
|
||||
@@ -221,24 +216,22 @@ TransactionAcquire::takeNodes(
|
||||
else
|
||||
mHaveRoot = true;
|
||||
}
|
||||
else if (!mMap->addKnownNode(*nodeIDit, makeSlice(*nodeDatait), &sf)
|
||||
.isGood())
|
||||
else if (!mMap->addKnownNode(d.first, d.second, &sf).isGood())
|
||||
{
|
||||
JLOG(journal_.warn()) << "TX acquire got bad non-root node";
|
||||
return SHAMapAddNode::invalid();
|
||||
}
|
||||
|
||||
++nodeIDit;
|
||||
++nodeDatait;
|
||||
}
|
||||
|
||||
trigger(peer);
|
||||
progress_ = true;
|
||||
return SHAMapAddNode::useful();
|
||||
}
|
||||
catch (std::exception const&)
|
||||
catch (std::exception const& ex)
|
||||
{
|
||||
JLOG(journal_.error()) << "Peer sends us junky transaction node data";
|
||||
JLOG(journal_.error())
|
||||
<< "Peer " << peer->id()
|
||||
<< " sent us junky transaction node data: " << ex.what();
|
||||
return SHAMapAddNode::invalid();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,8 +44,7 @@ public:
|
||||
|
||||
SHAMapAddNode
|
||||
takeNodes(
|
||||
const std::list<SHAMapNodeID>& IDs,
|
||||
const std::list<Blob>& data,
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> const& data,
|
||||
std::shared_ptr<Peer> const&);
|
||||
|
||||
void
|
||||
|
||||
@@ -45,8 +45,8 @@
|
||||
#include <ripple/app/misc/ValidatorKeys.h>
|
||||
#include <ripple/app/misc/ValidatorSite.h>
|
||||
#include <ripple/app/paths/PathRequests.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_global.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfacePostgres.h>
|
||||
#include <ripple/app/rdb/Wallet.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/reporting/ReportingETL.h>
|
||||
#include <ripple/app/tx/apply.h>
|
||||
#include <ripple/basics/ByteUtilities.h>
|
||||
@@ -219,15 +219,17 @@ public:
|
||||
boost::asio::steady_timer sweepTimer_;
|
||||
boost::asio::steady_timer entropyTimer_;
|
||||
|
||||
std::unique_ptr<RelationalDBInterface> mRelationalDBInterface;
|
||||
std::unique_ptr<RelationalDatabase> mRelationalDatabase;
|
||||
std::unique_ptr<DatabaseCon> mWalletDB;
|
||||
std::unique_ptr<Overlay> overlay_;
|
||||
|
||||
boost::asio::signal_set m_signals;
|
||||
|
||||
std::condition_variable cv_;
|
||||
mutable std::mutex mut_;
|
||||
bool isTimeToStop = false;
|
||||
// Once we get C++20, we could use `std::atomic_flag` for `isTimeToStop`
|
||||
// and eliminate the need for the condition variable and the mutex.
|
||||
std::condition_variable stoppingCondition_;
|
||||
mutable std::mutex stoppingMutex_;
|
||||
std::atomic<bool> isTimeToStop = false;
|
||||
|
||||
std::atomic<bool> checkSigs_;
|
||||
|
||||
@@ -875,11 +877,11 @@ public:
|
||||
return *txQ_;
|
||||
}
|
||||
|
||||
RelationalDBInterface&
|
||||
getRelationalDBInterface() override
|
||||
RelationalDatabase&
|
||||
getRelationalDatabase() override
|
||||
{
|
||||
assert(mRelationalDBInterface.get() != nullptr);
|
||||
return *mRelationalDBInterface;
|
||||
assert(mRelationalDatabase.get() != nullptr);
|
||||
return *mRelationalDatabase;
|
||||
}
|
||||
|
||||
DatabaseCon&
|
||||
@@ -905,14 +907,14 @@ public:
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
bool
|
||||
initRDBMS()
|
||||
initRelationalDatabase()
|
||||
{
|
||||
assert(mWalletDB.get() == nullptr);
|
||||
|
||||
try
|
||||
{
|
||||
mRelationalDBInterface =
|
||||
RelationalDBInterface::init(*this, *config_, *m_jobQueue);
|
||||
mRelationalDatabase =
|
||||
RelationalDatabase::init(*this, *config_, *m_jobQueue);
|
||||
|
||||
// wallet database
|
||||
auto setup = setup_DatabaseCon(*config_, m_journal);
|
||||
@@ -960,110 +962,9 @@ public:
|
||||
<< "' took " << elapsed.count() << " seconds.";
|
||||
}
|
||||
|
||||
// tune caches
|
||||
using namespace std::chrono;
|
||||
|
||||
m_ledgerMaster->tune(
|
||||
config_->getValueFor(SizedItem::ledgerSize),
|
||||
seconds{config_->getValueFor(SizedItem::ledgerAge)});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// Called to indicate shutdown.
|
||||
void
|
||||
stop()
|
||||
{
|
||||
JLOG(m_journal.debug()) << "Application stopping";
|
||||
|
||||
m_io_latency_sampler.cancel_async();
|
||||
|
||||
// VFALCO Enormous hack, we have to force the probe to cancel
|
||||
// before we stop the io_service queue or else it never
|
||||
// unblocks in its destructor. The fix is to make all
|
||||
// io_objects gracefully handle exit so that we can
|
||||
// naturally return from io_service::run() instead of
|
||||
// forcing a call to io_service::stop()
|
||||
m_io_latency_sampler.cancel();
|
||||
|
||||
m_resolver->stop_async();
|
||||
|
||||
// NIKB This is a hack - we need to wait for the resolver to
|
||||
// stop. before we stop the io_server_queue or weird
|
||||
// things will happen.
|
||||
m_resolver->stop();
|
||||
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
sweepTimer_.cancel(ec);
|
||||
if (ec)
|
||||
{
|
||||
JLOG(m_journal.error())
|
||||
<< "Application: sweepTimer cancel error: " << ec.message();
|
||||
}
|
||||
|
||||
ec.clear();
|
||||
entropyTimer_.cancel(ec);
|
||||
if (ec)
|
||||
{
|
||||
JLOG(m_journal.error())
|
||||
<< "Application: entropyTimer cancel error: "
|
||||
<< ec.message();
|
||||
}
|
||||
}
|
||||
// Make sure that any waitHandlers pending in our timers are done
|
||||
// before we declare ourselves stopped.
|
||||
using namespace std::chrono_literals;
|
||||
waitHandlerCounter_.join("Application", 1s, m_journal);
|
||||
|
||||
mValidations.flush();
|
||||
|
||||
validatorSites_->stop();
|
||||
|
||||
// TODO Store manifests in manifests.sqlite instead of wallet.db
|
||||
validatorManifests_->save(
|
||||
getWalletDB(),
|
||||
"ValidatorManifests",
|
||||
[this](PublicKey const& pubKey) {
|
||||
return validators().listed(pubKey);
|
||||
});
|
||||
|
||||
publisherManifests_->save(
|
||||
getWalletDB(),
|
||||
"PublisherManifests",
|
||||
[this](PublicKey const& pubKey) {
|
||||
return validators().trustedPublisher(pubKey);
|
||||
});
|
||||
|
||||
// The order of these stop calls is delicate.
|
||||
// Re-ordering them risks undefined behavior.
|
||||
m_loadManager->stop();
|
||||
m_shaMapStore->stop();
|
||||
m_jobQueue->stop();
|
||||
if (shardArchiveHandler_)
|
||||
shardArchiveHandler_->stop();
|
||||
if (overlay_)
|
||||
overlay_->stop();
|
||||
if (shardStore_)
|
||||
shardStore_->stop();
|
||||
grpcServer_->stop();
|
||||
m_networkOPs->stop();
|
||||
serverHandler_->stop();
|
||||
m_ledgerReplayer->stop();
|
||||
m_inboundTransactions->stop();
|
||||
m_inboundLedgers->stop();
|
||||
ledgerCleaner_->stop();
|
||||
if (reportingETL_)
|
||||
reportingETL_->stop();
|
||||
if (auto pg = dynamic_cast<RelationalDBInterfacePostgres*>(
|
||||
&*mRelationalDBInterface))
|
||||
pg->stop();
|
||||
m_nodeStore->stop();
|
||||
perfLog_->stop();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
//
|
||||
// PropertyStream
|
||||
@@ -1085,7 +986,7 @@ public:
|
||||
if (e.value() == boost::system::errc::success)
|
||||
{
|
||||
m_jobQueue->addJob(
|
||||
jtSWEEP, "sweep", [this](Job&) { doSweep(); });
|
||||
jtSWEEP, "sweep", [this]() { doSweep(); });
|
||||
}
|
||||
// Recover as best we can if an unexpected error occurs.
|
||||
if (e.value() != boost::system::errc::success &&
|
||||
@@ -1140,7 +1041,7 @@ public:
|
||||
doSweep()
|
||||
{
|
||||
if (!config_->standalone() &&
|
||||
!getRelationalDBInterface().transactionDbHasSpace(*config_))
|
||||
!getRelationalDatabase().transactionDbHasSpace(*config_))
|
||||
{
|
||||
signalStop();
|
||||
}
|
||||
@@ -1165,8 +1066,7 @@ public:
|
||||
cachedSLEs_.sweep();
|
||||
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (auto pg = dynamic_cast<RelationalDBInterfacePostgres*>(
|
||||
&*mRelationalDBInterface))
|
||||
if (auto pg = dynamic_cast<PostgresDatabase*>(&*mRelationalDatabase))
|
||||
pg->sweep();
|
||||
#endif
|
||||
|
||||
@@ -1261,7 +1161,7 @@ ApplicationImp::setup()
|
||||
if (!config_->standalone())
|
||||
timeKeeper_->run(config_->SNTP_SERVERS);
|
||||
|
||||
if (!initRDBMS() || !initNodeStore())
|
||||
if (!initRelationalDatabase() || !initNodeStore())
|
||||
return false;
|
||||
|
||||
if (shardStore_)
|
||||
@@ -1339,8 +1239,6 @@ ApplicationImp::setup()
|
||||
{
|
||||
// Fall back to syncing from the network, such as
|
||||
// when there's no existing data.
|
||||
if (startUp == Config::NETWORK && !config_->standalone())
|
||||
m_networkOPs->setNeedNetworkLedger();
|
||||
startGenesisLedger();
|
||||
}
|
||||
else
|
||||
@@ -1638,27 +1536,101 @@ ApplicationImp::run()
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lk{mut_};
|
||||
cv_.wait(lk, [this] { return isTimeToStop; });
|
||||
std::unique_lock<std::mutex> lk{stoppingMutex_};
|
||||
stoppingCondition_.wait(lk, [this] { return isTimeToStop.load(); });
|
||||
}
|
||||
|
||||
JLOG(m_journal.info()) << "Received shutdown request";
|
||||
stop();
|
||||
JLOG(m_journal.debug()) << "Application stopping";
|
||||
|
||||
m_io_latency_sampler.cancel_async();
|
||||
|
||||
// VFALCO Enormous hack, we have to force the probe to cancel
|
||||
// before we stop the io_service queue or else it never
|
||||
// unblocks in its destructor. The fix is to make all
|
||||
// io_objects gracefully handle exit so that we can
|
||||
// naturally return from io_service::run() instead of
|
||||
// forcing a call to io_service::stop()
|
||||
m_io_latency_sampler.cancel();
|
||||
|
||||
m_resolver->stop_async();
|
||||
|
||||
// NIKB This is a hack - we need to wait for the resolver to
|
||||
// stop. before we stop the io_server_queue or weird
|
||||
// things will happen.
|
||||
m_resolver->stop();
|
||||
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
sweepTimer_.cancel(ec);
|
||||
if (ec)
|
||||
{
|
||||
JLOG(m_journal.error())
|
||||
<< "Application: sweepTimer cancel error: " << ec.message();
|
||||
}
|
||||
|
||||
ec.clear();
|
||||
entropyTimer_.cancel(ec);
|
||||
if (ec)
|
||||
{
|
||||
JLOG(m_journal.error())
|
||||
<< "Application: entropyTimer cancel error: " << ec.message();
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that any waitHandlers pending in our timers are done
|
||||
// before we declare ourselves stopped.
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
waitHandlerCounter_.join("Application", 1s, m_journal);
|
||||
|
||||
mValidations.flush();
|
||||
|
||||
validatorSites_->stop();
|
||||
|
||||
// TODO Store manifests in manifests.sqlite instead of wallet.db
|
||||
validatorManifests_->save(
|
||||
getWalletDB(), "ValidatorManifests", [this](PublicKey const& pubKey) {
|
||||
return validators().listed(pubKey);
|
||||
});
|
||||
|
||||
publisherManifests_->save(
|
||||
getWalletDB(), "PublisherManifests", [this](PublicKey const& pubKey) {
|
||||
return validators().trustedPublisher(pubKey);
|
||||
});
|
||||
|
||||
// The order of these stop calls is delicate.
|
||||
// Re-ordering them risks undefined behavior.
|
||||
m_loadManager->stop();
|
||||
m_shaMapStore->stop();
|
||||
m_jobQueue->stop();
|
||||
if (shardArchiveHandler_)
|
||||
shardArchiveHandler_->stop();
|
||||
if (overlay_)
|
||||
overlay_->stop();
|
||||
if (shardStore_)
|
||||
shardStore_->stop();
|
||||
grpcServer_->stop();
|
||||
m_networkOPs->stop();
|
||||
serverHandler_->stop();
|
||||
m_ledgerReplayer->stop();
|
||||
m_inboundTransactions->stop();
|
||||
m_inboundLedgers->stop();
|
||||
ledgerCleaner_->stop();
|
||||
if (reportingETL_)
|
||||
reportingETL_->stop();
|
||||
if (auto pg = dynamic_cast<PostgresDatabase*>(&*mRelationalDatabase))
|
||||
pg->stop();
|
||||
m_nodeStore->stop();
|
||||
perfLog_->stop();
|
||||
|
||||
JLOG(m_journal.info()) << "Done.";
|
||||
}
|
||||
|
||||
void
|
||||
ApplicationImp::signalStop()
|
||||
{
|
||||
// Unblock the main thread (which is sitting in run()).
|
||||
// When we get C++20 this can use std::latch.
|
||||
std::lock_guard lk{mut_};
|
||||
|
||||
if (!isTimeToStop)
|
||||
{
|
||||
isTimeToStop = true;
|
||||
cv_.notify_all();
|
||||
}
|
||||
if (!isTimeToStop.exchange(true))
|
||||
stoppingCondition_.notify_all();
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -1676,8 +1648,7 @@ ApplicationImp::checkSigs(bool check)
|
||||
bool
|
||||
ApplicationImp::isStopping() const
|
||||
{
|
||||
std::lock_guard lk{mut_};
|
||||
return isTimeToStop;
|
||||
return isTimeToStop.load();
|
||||
}
|
||||
|
||||
int
|
||||
@@ -2164,7 +2135,7 @@ ApplicationImp::nodeToShards()
|
||||
void
|
||||
ApplicationImp::setMaxDisallowedLedger()
|
||||
{
|
||||
auto seq = getRelationalDBInterface().getMaxLedgerSeq();
|
||||
auto seq = getRelationalDatabase().getMaxLedgerSeq();
|
||||
if (seq)
|
||||
maxDisallowedLedger_ = *seq;
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ class ValidatorList;
|
||||
class ValidatorSite;
|
||||
class Cluster;
|
||||
|
||||
class RelationalDBInterface;
|
||||
class RelationalDatabase;
|
||||
class DatabaseCon;
|
||||
class SHAMapStore;
|
||||
|
||||
@@ -251,8 +251,8 @@ public:
|
||||
openLedger() = 0;
|
||||
virtual OpenLedger const&
|
||||
openLedger() const = 0;
|
||||
virtual RelationalDBInterface&
|
||||
getRelationalDBInterface() = 0;
|
||||
virtual RelationalDatabase&
|
||||
getRelationalDatabase() = 0;
|
||||
|
||||
virtual std::chrono::milliseconds
|
||||
getIOLatency() = 0;
|
||||
|
||||
@@ -72,12 +72,22 @@ inline constexpr std::array<char const*, 5> LgrDBInit{
|
||||
// Transaction database holds transactions and public keys
|
||||
inline constexpr auto TxDBName{"transaction.db"};
|
||||
|
||||
inline constexpr std::array TxDBPragma
|
||||
// In C++17 omitting the explicit template parameters caused
|
||||
// a crash
|
||||
inline constexpr std::array<char const*, 4> TxDBPragma
|
||||
{
|
||||
"PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;",
|
||||
"PRAGMA max_page_count=2147483646;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=17179869184;"
|
||||
#else
|
||||
|
||||
// Provide an explicit `no-op` SQL statement
|
||||
// in order to keep the size of the array
|
||||
// constant regardless of the preprocessor
|
||||
// condition evaluation
|
||||
"PRAGMA sqlite_noop_statement;"
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -117,12 +127,22 @@ inline constexpr std::array<char const*, 8> TxDBInit{
|
||||
// The Ledger Meta database maps ledger hashes to shard indexes
|
||||
inline constexpr auto LgrMetaDBName{"ledger_meta.db"};
|
||||
|
||||
inline constexpr std::array LgrMetaDBPragma
|
||||
// In C++17 omitting the explicit template parameters caused
|
||||
// a crash
|
||||
inline constexpr std::array<char const*, 4> LgrMetaDBPragma
|
||||
{
|
||||
"PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;",
|
||||
"PRAGMA max_page_count=2147483646;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=17179869184;"
|
||||
#else
|
||||
|
||||
// Provide an explicit `no-op` SQL statement
|
||||
// in order to keep the size of the array
|
||||
// constant regardless of the preprocessor
|
||||
// condition evaluation
|
||||
"PRAGMA sqlite_noop_statement;"
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -141,12 +161,22 @@ inline constexpr std::array<char const*, 3> LgrMetaDBInit{
|
||||
// Transaction Meta database maps transaction IDs to shard indexes
|
||||
inline constexpr auto TxMetaDBName{"transaction_meta.db"};
|
||||
|
||||
inline constexpr std::array TxMetaDBPragma
|
||||
// In C++17 omitting the explicit template parameters caused
|
||||
// a crash
|
||||
inline constexpr std::array<char const*, 4> TxMetaDBPragma
|
||||
{
|
||||
"PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;",
|
||||
"PRAGMA max_page_count=2147483646;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=17179869184;"
|
||||
#else
|
||||
|
||||
// Provide an explicit `no-op` SQL statement
|
||||
// in order to keep the size of the array
|
||||
// constant regardless of the preprocessor
|
||||
// condition evaluation
|
||||
"PRAGMA sqlite_noop_statement;"
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include <ripple/resource/Fees.h>
|
||||
|
||||
#include <beast/net/IPAddressConversion.h>
|
||||
#include <ripple/beast/net/IPAddressConversion.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/main/DBInit.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_global.h>
|
||||
#include <ripple/app/rdb/Vacuum.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
@@ -35,7 +35,7 @@
|
||||
#include <ripple/rpc/RPCHandler.h>
|
||||
|
||||
#ifdef ENABLE_TESTS
|
||||
#include <beast/unit_test/match.hpp>
|
||||
#include <ripple/beast/unit_test/match.hpp>
|
||||
#include <test/unit_test/multi_runner.h>
|
||||
#endif // ENABLE_TESTS
|
||||
|
||||
@@ -135,6 +135,7 @@ printHelp(const po::options_description& desc)
|
||||
"[strict]\n"
|
||||
" account_tx accountID [ledger_min [ledger_max [limit "
|
||||
"[offset]]]] [binary] [count] [descending]\n"
|
||||
" book_changes [<ledger hash|id>]\n"
|
||||
" book_offers <taker_pays> <taker_gets> [<taker [<ledger> "
|
||||
"[<limit> [<proof> [<marker>]]]]]\n"
|
||||
" can_delete [<ledgerid>|<ledgerhash>|now|always|never]\n"
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/main/NodeIdentity.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_global.h>
|
||||
#include <ripple/app/rdb/Wallet.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/core/Config.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
|
||||
@@ -32,7 +32,7 @@ NodeStoreScheduler::scheduleTask(NodeStore::Task& task)
|
||||
if (jobQueue_.isStopped())
|
||||
return;
|
||||
|
||||
if (!jobQueue_.addJob(jtWRITE, "NodeObject::store", [&task](Job&) {
|
||||
if (!jobQueue_.addJob(jtWRITE, "NodeObject::store", [&task]() {
|
||||
task.performScheduledTask();
|
||||
}))
|
||||
{
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_APP_MISC_CANONICALTXSET_H_INCLUDED
|
||||
#define RIPPLE_APP_MISC_CANONICALTXSET_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/CountedObject.h>
|
||||
#include <ripple/protocol/RippleLedgerHash.h>
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/SeqProxy.h>
|
||||
@@ -34,7 +35,7 @@ namespace ripple {
|
||||
|
||||
*/
|
||||
// VFALCO TODO rename to SortedTxSet
|
||||
class CanonicalTXSet
|
||||
class CanonicalTXSet : public CountedObject<CanonicalTXSet>
|
||||
{
|
||||
private:
|
||||
class Key
|
||||
|
||||
@@ -24,7 +24,9 @@
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/protocol/PublicKey.h>
|
||||
#include <ripple/protocol/SecretKey.h>
|
||||
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
|
||||
namespace ripple {
|
||||
@@ -223,9 +225,8 @@ class DatabaseCon;
|
||||
class ManifestCache
|
||||
{
|
||||
private:
|
||||
beast::Journal mutable j_;
|
||||
std::mutex apply_mutex_;
|
||||
std::mutex mutable read_mutex_;
|
||||
beast::Journal j_;
|
||||
std::shared_mutex mutable mutex_;
|
||||
|
||||
/** Active manifests stored by master public key. */
|
||||
hash_map<PublicKey, Manifest> map_;
|
||||
@@ -378,8 +379,10 @@ public:
|
||||
|
||||
/** Invokes the callback once for every populated manifest.
|
||||
|
||||
@note Undefined behavior results when calling ManifestCache members from
|
||||
within the callback
|
||||
@note Do not call ManifestCache member functions from within the
|
||||
callback. This can re-lock the mutex from the same thread, which is UB.
|
||||
@note Do not write ManifestCache member variables from within the
|
||||
callback. This can lead to data races.
|
||||
|
||||
@param f Function called for each manifest
|
||||
|
||||
@@ -391,7 +394,7 @@ public:
|
||||
void
|
||||
for_each_manifest(Function&& f) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
for (auto const& [_, manifest] : map_)
|
||||
{
|
||||
(void)_;
|
||||
@@ -401,8 +404,10 @@ public:
|
||||
|
||||
/** Invokes the callback once for every populated manifest.
|
||||
|
||||
@note Undefined behavior results when calling ManifestCache members from
|
||||
within the callback
|
||||
@note Do not call ManifestCache member functions from within the
|
||||
callback. This can re-lock the mutex from the same thread, which is UB.
|
||||
@note Do not write ManifestCache member variables from
|
||||
within the callback. This can lead to data races.
|
||||
|
||||
@param pf Pre-function called with the maximum number of times f will be
|
||||
called (useful for memory allocations)
|
||||
@@ -417,7 +422,7 @@ public:
|
||||
void
|
||||
for_each_manifest(PreFun&& pf, EachFun&& f) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
pf(map_.size());
|
||||
for (auto const& [_, manifest] : map_)
|
||||
{
|
||||
|
||||
@@ -37,9 +37,8 @@
|
||||
#include <ripple/app/misc/ValidatorKeys.h>
|
||||
#include <ripple/app/misc/ValidatorList.h>
|
||||
#include <ripple/app/misc/impl/AccountTxPaging.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfacePostgres.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
|
||||
#include <ripple/app/rdb/backend/PostgresDatabase.h>
|
||||
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <ripple/app/reporting/ReportingETL.h>
|
||||
#include <ripple/app/tx/apply.h>
|
||||
#include <ripple/basics/PerfLog.h>
|
||||
@@ -63,6 +62,7 @@
|
||||
#include <ripple/protocol/STParsedJSON.h>
|
||||
#include <ripple/resource/Fees.h>
|
||||
#include <ripple/resource/ResourceManager.h>
|
||||
#include <ripple/rpc/BookChanges.h>
|
||||
#include <ripple/rpc/DeliveredAmount.h>
|
||||
#include <ripple/rpc/impl/RPCHelpers.h>
|
||||
#include <boost/asio/ip/host_name.hpp>
|
||||
@@ -445,9 +445,9 @@ public:
|
||||
pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
|
||||
void
|
||||
pubProposedTransaction(
|
||||
std::shared_ptr<ReadView const> const& lpCurrent,
|
||||
std::shared_ptr<STTx const> const& stTxn,
|
||||
TER terResult) override;
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const& transaction,
|
||||
TER result) override;
|
||||
void
|
||||
pubValidation(std::shared_ptr<STValidation> const& val) override;
|
||||
|
||||
@@ -503,6 +503,11 @@ public:
|
||||
bool
|
||||
unsubLedger(std::uint64_t uListener) override;
|
||||
|
||||
bool
|
||||
subBookChanges(InfoSub::ref ispListener) override;
|
||||
bool
|
||||
unsubBookChanges(std::uint64_t uListener) override;
|
||||
|
||||
bool
|
||||
subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
|
||||
override;
|
||||
@@ -612,20 +617,26 @@ private:
|
||||
|
||||
Json::Value
|
||||
transJson(
|
||||
const STTx& stTxn,
|
||||
TER terResult,
|
||||
bool bValidated,
|
||||
std::shared_ptr<ReadView const> const& lpCurrent);
|
||||
const STTx& transaction,
|
||||
TER result,
|
||||
bool validated,
|
||||
std::shared_ptr<ReadView const> const& ledger);
|
||||
|
||||
void
|
||||
pubValidatedTransaction(
|
||||
std::shared_ptr<ReadView const> const& alAccepted,
|
||||
const AcceptedLedgerTx& alTransaction);
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
AcceptedLedgerTx const& transaction);
|
||||
|
||||
void
|
||||
pubAccountTransaction(
|
||||
std::shared_ptr<ReadView const> const& lpCurrent,
|
||||
const AcceptedLedgerTx& alTransaction,
|
||||
bool isAccepted);
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
AcceptedLedgerTx const& transaction);
|
||||
|
||||
void
|
||||
pubProposedAccountTransaction(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const& transaction,
|
||||
TER result);
|
||||
|
||||
void
|
||||
pubServer();
|
||||
@@ -738,9 +749,10 @@ private:
|
||||
sValidations, // Received validations.
|
||||
sPeerStatus, // Peer status changes.
|
||||
sConsensusPhase, // Consensus phase
|
||||
sBookChanges, // Per-ledger order book changes
|
||||
|
||||
sLastEntry = sConsensusPhase // as this name implies, any new entry
|
||||
// must be ADDED ABOVE this one
|
||||
sLastEntry = sBookChanges // as this name implies, any new entry
|
||||
// must be ADDED ABOVE this one
|
||||
};
|
||||
std::array<SubMapType, SubTypes::sLastEntry + 1> mStreamMaps;
|
||||
|
||||
@@ -907,7 +919,10 @@ void
|
||||
NetworkOPsImp::setStateTimer()
|
||||
{
|
||||
setHeartbeatTimer();
|
||||
setClusterTimer();
|
||||
|
||||
// Only do this work if a cluster is configured
|
||||
if (app_.cluster().size() != 0)
|
||||
setClusterTimer();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -949,7 +964,7 @@ NetworkOPsImp::setHeartbeatTimer()
|
||||
heartbeatTimer_,
|
||||
mConsensus.parms().ledgerGRANULARITY,
|
||||
[this]() {
|
||||
m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this](Job&) {
|
||||
m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
|
||||
processHeartbeatTimer();
|
||||
});
|
||||
},
|
||||
@@ -960,11 +975,12 @@ void
|
||||
NetworkOPsImp::setClusterTimer()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
setTimer(
|
||||
clusterTimer_,
|
||||
10s,
|
||||
[this]() {
|
||||
m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this](Job&) {
|
||||
m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
|
||||
processClusterTimer();
|
||||
});
|
||||
},
|
||||
@@ -1045,7 +1061,11 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
void
|
||||
NetworkOPsImp::processClusterTimer()
|
||||
{
|
||||
if (app_.cluster().size() == 0)
|
||||
return;
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
bool const update = app_.cluster().update(
|
||||
app_.nodeIdentity().first,
|
||||
"",
|
||||
@@ -1153,7 +1173,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
|
||||
|
||||
auto tx = std::make_shared<Transaction>(trans, reason, app_);
|
||||
|
||||
m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx](Job&) {
|
||||
m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
|
||||
auto t = tx;
|
||||
processTransaction(t, false, false, FailHard::no);
|
||||
});
|
||||
@@ -1172,6 +1192,7 @@ NetworkOPsImp::processTransaction(
|
||||
if ((newFlags & SF_BAD) != 0)
|
||||
{
|
||||
// cached bad
|
||||
JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
|
||||
transaction->setStatus(INVALID);
|
||||
transaction->setResult(temBAD_SIGNATURE);
|
||||
return;
|
||||
@@ -1224,9 +1245,8 @@ NetworkOPsImp::doTransactionAsync(
|
||||
|
||||
if (mDispatchState == DispatchState::none)
|
||||
{
|
||||
if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this](Job&) {
|
||||
transactionBatch();
|
||||
}))
|
||||
if (m_job_queue.addJob(
|
||||
jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
|
||||
{
|
||||
mDispatchState = DispatchState::scheduled;
|
||||
}
|
||||
@@ -1262,10 +1282,9 @@ NetworkOPsImp::doTransactionSync(
|
||||
if (mTransactions.size())
|
||||
{
|
||||
// More transactions need to be applied, but by another job.
|
||||
if (m_job_queue.addJob(
|
||||
jtBATCH, "transactionBatch", [this](Job&) {
|
||||
transactionBatch();
|
||||
}))
|
||||
if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
|
||||
transactionBatch();
|
||||
}))
|
||||
{
|
||||
mDispatchState = DispatchState::scheduled;
|
||||
}
|
||||
@@ -1744,7 +1763,7 @@ NetworkOPsImp::switchLastClosedLedger(
|
||||
auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
|
||||
std::optional<Rules> rules;
|
||||
if (lastVal)
|
||||
rules.emplace(*lastVal, app_.config().features);
|
||||
rules = makeRulesGivenLedger(*lastVal, app_.config().features);
|
||||
else
|
||||
rules.emplace(app_.config().features);
|
||||
app_.openLedger().accept(
|
||||
@@ -2312,10 +2331,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
if (!app_.config().SERVER_DOMAIN.empty())
|
||||
info[jss::server_domain] = app_.config().SERVER_DOMAIN;
|
||||
|
||||
if (!app_.config().reporting())
|
||||
if (auto const netid = app_.overlay().networkID())
|
||||
info[jss::network_id] = static_cast<Json::UInt>(*netid);
|
||||
|
||||
info[jss::build_version] = BuildInfo::getVersionString();
|
||||
|
||||
info[jss::server_state] = strOperatingMode(admin);
|
||||
@@ -2458,6 +2473,9 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
|
||||
|
||||
if (!app_.config().reporting())
|
||||
{
|
||||
if (auto const netid = app_.overlay().networkID())
|
||||
info[jss::network_id] = static_cast<Json::UInt>(*netid);
|
||||
|
||||
auto const escalationMetrics =
|
||||
app_.getTxQ().getMetrics(*app_.openLedger().current());
|
||||
|
||||
@@ -2645,11 +2663,11 @@ NetworkOPsImp::getLedgerFetchInfo()
|
||||
|
||||
void
|
||||
NetworkOPsImp::pubProposedTransaction(
|
||||
std::shared_ptr<ReadView const> const& lpCurrent,
|
||||
std::shared_ptr<STTx const> const& stTxn,
|
||||
TER terResult)
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const& transaction,
|
||||
TER result)
|
||||
{
|
||||
Json::Value jvObj = transJson(*stTxn, terResult, false, lpCurrent);
|
||||
Json::Value jvObj = transJson(*transaction, result, false, ledger);
|
||||
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
@@ -2670,10 +2688,8 @@ NetworkOPsImp::pubProposedTransaction(
|
||||
}
|
||||
}
|
||||
}
|
||||
AcceptedLedgerTx alt(
|
||||
lpCurrent, stTxn, terResult, app_.accountIDCache(), app_.logs());
|
||||
JLOG(m_journal.trace()) << "pubProposed: " << alt.getJson();
|
||||
pubAccountTransaction(lpCurrent, alt, false);
|
||||
|
||||
pubProposedAccountTransaction(ledger, transaction, result);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -2848,9 +2864,13 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
lpAccepted->info().hash, alpAccepted);
|
||||
}
|
||||
|
||||
assert(alpAccepted->getLedger().get() == lpAccepted.get());
|
||||
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "Publishing ledger = " << lpAccepted->info().seq;
|
||||
<< "Publishing ledger " << lpAccepted->info().seq << " "
|
||||
<< lpAccepted->info().hash;
|
||||
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
if (!mStreamMaps[sLedger].empty())
|
||||
@@ -2870,7 +2890,7 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
jvObj[jss::reserve_inc] =
|
||||
lpAccepted->fees().increment.jsonClipped();
|
||||
|
||||
jvObj[jss::txn_count] = Json::UInt(alpAccepted->getTxnCount());
|
||||
jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
|
||||
|
||||
if (mMode >= OperatingMode::SYNCING)
|
||||
{
|
||||
@@ -2884,10 +2904,6 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
InfoSub::pointer p = it->second.lock();
|
||||
if (p)
|
||||
{
|
||||
JLOG(m_journal.debug())
|
||||
<< "Publishing ledger = " << lpAccepted->info().seq
|
||||
<< " : consumer = " << p->getConsumer()
|
||||
<< " : obj = " << jvObj;
|
||||
p->send(jvObj, true);
|
||||
++it;
|
||||
}
|
||||
@@ -2896,6 +2912,24 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
}
|
||||
}
|
||||
|
||||
if (!mStreamMaps[sBookChanges].empty())
|
||||
{
|
||||
Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
|
||||
|
||||
auto it = mStreamMaps[sBookChanges].begin();
|
||||
while (it != mStreamMaps[sBookChanges].end())
|
||||
{
|
||||
InfoSub::pointer p = it->second.lock();
|
||||
if (p)
|
||||
{
|
||||
p->send(jvObj, true);
|
||||
++it;
|
||||
}
|
||||
else
|
||||
it = mStreamMaps[sBookChanges].erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
static bool firstTime = true;
|
||||
if (firstTime)
|
||||
@@ -2919,9 +2953,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
|
||||
}
|
||||
|
||||
// Don't lock since pubAcceptedTransaction is locking.
|
||||
for (auto const& [_, accTx] : alpAccepted->getMap())
|
||||
for (auto const& accTx : *alpAccepted)
|
||||
{
|
||||
(void)_;
|
||||
JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
|
||||
pubValidatedTransaction(lpAccepted, *accTx);
|
||||
}
|
||||
@@ -2941,7 +2974,7 @@ NetworkOPsImp::reportFeeChange()
|
||||
if (f != mLastFeeSummary)
|
||||
{
|
||||
m_job_queue.addJob(
|
||||
jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this](Job&) {
|
||||
jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
|
||||
pubServer();
|
||||
});
|
||||
}
|
||||
@@ -2953,7 +2986,7 @@ NetworkOPsImp::reportConsensusStateChange(ConsensusPhase phase)
|
||||
m_job_queue.addJob(
|
||||
jtCLIENT_CONSENSUS,
|
||||
"reportConsensusStateChange->pubConsensus",
|
||||
[this, phase](Job&) { pubConsensus(phase); });
|
||||
[this, phase]() { pubConsensus(phase); });
|
||||
}
|
||||
|
||||
inline void
|
||||
@@ -2971,26 +3004,26 @@ NetworkOPsImp::getLocalTxCount()
|
||||
// transactions.
|
||||
Json::Value
|
||||
NetworkOPsImp::transJson(
|
||||
const STTx& stTxn,
|
||||
TER terResult,
|
||||
bool bValidated,
|
||||
std::shared_ptr<ReadView const> const& lpCurrent)
|
||||
const STTx& transaction,
|
||||
TER result,
|
||||
bool validated,
|
||||
std::shared_ptr<ReadView const> const& ledger)
|
||||
{
|
||||
Json::Value jvObj(Json::objectValue);
|
||||
std::string sToken;
|
||||
std::string sHuman;
|
||||
|
||||
transResultInfo(terResult, sToken, sHuman);
|
||||
transResultInfo(result, sToken, sHuman);
|
||||
|
||||
jvObj[jss::type] = "transaction";
|
||||
jvObj[jss::transaction] = stTxn.getJson(JsonOptions::none);
|
||||
jvObj[jss::transaction] = transaction.getJson(JsonOptions::none);
|
||||
|
||||
if (bValidated)
|
||||
if (validated)
|
||||
{
|
||||
jvObj[jss::ledger_index] = lpCurrent->info().seq;
|
||||
jvObj[jss::ledger_hash] = to_string(lpCurrent->info().hash);
|
||||
jvObj[jss::ledger_index] = ledger->info().seq;
|
||||
jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
|
||||
jvObj[jss::transaction][jss::date] =
|
||||
lpCurrent->info().closeTime.time_since_epoch().count();
|
||||
ledger->info().closeTime.time_since_epoch().count();
|
||||
jvObj[jss::validated] = true;
|
||||
|
||||
// WRITEME: Put the account next seq here
|
||||
@@ -2998,24 +3031,24 @@ NetworkOPsImp::transJson(
|
||||
else
|
||||
{
|
||||
jvObj[jss::validated] = false;
|
||||
jvObj[jss::ledger_current_index] = lpCurrent->info().seq;
|
||||
jvObj[jss::ledger_current_index] = ledger->info().seq;
|
||||
}
|
||||
|
||||
jvObj[jss::status] = bValidated ? "closed" : "proposed";
|
||||
jvObj[jss::status] = validated ? "closed" : "proposed";
|
||||
jvObj[jss::engine_result] = sToken;
|
||||
jvObj[jss::engine_result_code] = terResult;
|
||||
jvObj[jss::engine_result_code] = result;
|
||||
jvObj[jss::engine_result_message] = sHuman;
|
||||
|
||||
if (stTxn.getTxnType() == ttOFFER_CREATE)
|
||||
if (transaction.getTxnType() == ttOFFER_CREATE)
|
||||
{
|
||||
auto const account = stTxn.getAccountID(sfAccount);
|
||||
auto const amount = stTxn.getFieldAmount(sfTakerGets);
|
||||
auto const account = transaction.getAccountID(sfAccount);
|
||||
auto const amount = transaction.getFieldAmount(sfTakerGets);
|
||||
|
||||
// If the offer create is not self funded then add the owner balance
|
||||
if (account != amount.issue().account)
|
||||
{
|
||||
auto const ownerFunds = accountFunds(
|
||||
*lpCurrent,
|
||||
*ledger,
|
||||
account,
|
||||
amount,
|
||||
fhIGNORE_FREEZE,
|
||||
@@ -3029,17 +3062,18 @@ NetworkOPsImp::transJson(
|
||||
|
||||
void
|
||||
NetworkOPsImp::pubValidatedTransaction(
|
||||
std::shared_ptr<ReadView const> const& alAccepted,
|
||||
const AcceptedLedgerTx& alTx)
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
const AcceptedLedgerTx& transaction)
|
||||
{
|
||||
std::shared_ptr<STTx const> stTxn = alTx.getTxn();
|
||||
Json::Value jvObj = transJson(*stTxn, alTx.getResult(), true, alAccepted);
|
||||
auto const& stTxn = transaction.getTxn();
|
||||
|
||||
Json::Value jvObj =
|
||||
transJson(*stTxn, transaction.getResult(), true, ledger);
|
||||
|
||||
if (auto const txMeta = alTx.getMeta())
|
||||
{
|
||||
jvObj[jss::meta] = txMeta->getJson(JsonOptions::none);
|
||||
RPC::insertDeliveredAmount(
|
||||
jvObj[jss::meta], *alAccepted, stTxn, *txMeta);
|
||||
auto const& meta = transaction.getMeta();
|
||||
jvObj[jss::meta] = meta.getJson(JsonOptions::none);
|
||||
RPC::insertDeliveredAmount(jvObj[jss::meta], *ledger, stTxn, meta);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -3074,32 +3108,31 @@ NetworkOPsImp::pubValidatedTransaction(
|
||||
it = mStreamMaps[sRTTransactions].erase(it);
|
||||
}
|
||||
}
|
||||
app_.getOrderBookDB().processTxn(alAccepted, alTx, jvObj);
|
||||
pubAccountTransaction(alAccepted, alTx, true);
|
||||
|
||||
if (transaction.getResult() == tesSUCCESS)
|
||||
app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
|
||||
|
||||
pubAccountTransaction(ledger, transaction);
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::pubAccountTransaction(
|
||||
std::shared_ptr<ReadView const> const& lpCurrent,
|
||||
const AcceptedLedgerTx& alTx,
|
||||
bool bAccepted)
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
AcceptedLedgerTx const& transaction)
|
||||
{
|
||||
hash_set<InfoSub::pointer> notify;
|
||||
int iProposed = 0;
|
||||
int iAccepted = 0;
|
||||
|
||||
std::vector<SubAccountHistoryInfo> accountHistoryNotify;
|
||||
auto const currLedgerSeq = lpCurrent->seq();
|
||||
auto const currLedgerSeq = ledger->seq();
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
if (!bAccepted && mSubRTAccount.empty())
|
||||
return;
|
||||
|
||||
if (!mSubAccount.empty() || (!mSubRTAccount.empty()) ||
|
||||
if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
|
||||
!mSubAccountHistory.empty())
|
||||
{
|
||||
for (auto const& affectedAccount : alTx.getAffected())
|
||||
for (auto const& affectedAccount : transaction.getAffected())
|
||||
{
|
||||
if (auto simiIt = mSubRTAccount.find(affectedAccount);
|
||||
simiIt != mSubRTAccount.end())
|
||||
@@ -3121,80 +3154,140 @@ NetworkOPsImp::pubAccountTransaction(
|
||||
}
|
||||
}
|
||||
|
||||
if (bAccepted)
|
||||
if (auto simiIt = mSubAccount.find(affectedAccount);
|
||||
simiIt != mSubAccount.end())
|
||||
{
|
||||
if (auto simiIt = mSubAccount.find(affectedAccount);
|
||||
simiIt != mSubAccount.end())
|
||||
auto it = simiIt->second.begin();
|
||||
while (it != simiIt->second.end())
|
||||
{
|
||||
auto it = simiIt->second.begin();
|
||||
while (it != simiIt->second.end())
|
||||
{
|
||||
InfoSub::pointer p = it->second.lock();
|
||||
InfoSub::pointer p = it->second.lock();
|
||||
|
||||
if (p)
|
||||
{
|
||||
notify.insert(p);
|
||||
++it;
|
||||
++iAccepted;
|
||||
}
|
||||
else
|
||||
it = simiIt->second.erase(it);
|
||||
if (p)
|
||||
{
|
||||
notify.insert(p);
|
||||
++it;
|
||||
++iAccepted;
|
||||
}
|
||||
else
|
||||
it = simiIt->second.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
if (auto histoIt = mSubAccountHistory.find(affectedAccount);
|
||||
histoIt != mSubAccountHistory.end())
|
||||
{
|
||||
auto& subs = histoIt->second;
|
||||
auto it = subs.begin();
|
||||
while (it != subs.end())
|
||||
{
|
||||
SubAccountHistoryInfoWeak const& info = it->second;
|
||||
if (currLedgerSeq <= info.index_->separationLedgerSeq_)
|
||||
{
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (auto isSptr = info.sinkWptr_.lock(); isSptr)
|
||||
{
|
||||
accountHistoryNotify.emplace_back(
|
||||
SubAccountHistoryInfo{isSptr, info.index_});
|
||||
++it;
|
||||
}
|
||||
else
|
||||
{
|
||||
it = subs.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
if (auto histoIt = mSubAccountHistory.find(affectedAccount);
|
||||
histoIt != mSubAccountHistory.end())
|
||||
{
|
||||
auto& subs = histoIt->second;
|
||||
auto it = subs.begin();
|
||||
while (it != subs.end())
|
||||
{
|
||||
SubAccountHistoryInfoWeak const& info = it->second;
|
||||
if (currLedgerSeq <=
|
||||
info.index_->separationLedgerSeq_)
|
||||
{
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (auto isSptr = info.sinkWptr_.lock(); isSptr)
|
||||
{
|
||||
accountHistoryNotify.emplace_back(
|
||||
SubAccountHistoryInfo{isSptr, info.index_});
|
||||
++it;
|
||||
}
|
||||
else
|
||||
{
|
||||
it = subs.erase(it);
|
||||
}
|
||||
}
|
||||
if (subs.empty())
|
||||
mSubAccountHistory.erase(histoIt);
|
||||
}
|
||||
if (subs.empty())
|
||||
mSubAccountHistory.erase(histoIt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(m_journal.trace())
|
||||
<< "pubAccountTransaction:"
|
||||
<< " iProposed=" << iProposed << " iAccepted=" << iAccepted;
|
||||
<< "pubAccountTransaction: "
|
||||
<< "proposed=" << iProposed << ", accepted=" << iAccepted;
|
||||
|
||||
if (!notify.empty() || !accountHistoryNotify.empty())
|
||||
{
|
||||
std::shared_ptr<STTx const> stTxn = alTx.getTxn();
|
||||
Json::Value jvObj =
|
||||
transJson(*stTxn, alTx.getResult(), bAccepted, lpCurrent);
|
||||
auto const& stTxn = transaction.getTxn();
|
||||
|
||||
Json::Value jvObj =
|
||||
transJson(*stTxn, transaction.getResult(), true, ledger);
|
||||
|
||||
if (alTx.isApplied())
|
||||
{
|
||||
if (auto const txMeta = alTx.getMeta())
|
||||
auto const& meta = transaction.getMeta();
|
||||
|
||||
jvObj[jss::meta] = meta.getJson(JsonOptions::none);
|
||||
RPC::insertDeliveredAmount(jvObj[jss::meta], *ledger, stTxn, meta);
|
||||
}
|
||||
|
||||
for (InfoSub::ref isrListener : notify)
|
||||
isrListener->send(jvObj, true);
|
||||
|
||||
assert(!jvObj.isMember(jss::account_history_tx_stream));
|
||||
for (auto& info : accountHistoryNotify)
|
||||
{
|
||||
auto& index = info.index_;
|
||||
if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
|
||||
jvObj[jss::account_history_tx_first] = true;
|
||||
jvObj[jss::account_history_tx_index] = index->forwardTxIndex_++;
|
||||
info.sink_->send(jvObj, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::pubProposedAccountTransaction(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const& tx,
|
||||
TER result)
|
||||
{
|
||||
hash_set<InfoSub::pointer> notify;
|
||||
int iProposed = 0;
|
||||
|
||||
std::vector<SubAccountHistoryInfo> accountHistoryNotify;
|
||||
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
|
||||
if (mSubRTAccount.empty())
|
||||
return;
|
||||
|
||||
if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
|
||||
!mSubAccountHistory.empty())
|
||||
{
|
||||
for (auto const& affectedAccount : tx->getMentionedAccounts())
|
||||
{
|
||||
jvObj[jss::meta] = txMeta->getJson(JsonOptions::none);
|
||||
RPC::insertDeliveredAmount(
|
||||
jvObj[jss::meta], *lpCurrent, stTxn, *txMeta);
|
||||
if (auto simiIt = mSubRTAccount.find(affectedAccount);
|
||||
simiIt != mSubRTAccount.end())
|
||||
{
|
||||
auto it = simiIt->second.begin();
|
||||
|
||||
while (it != simiIt->second.end())
|
||||
{
|
||||
InfoSub::pointer p = it->second.lock();
|
||||
|
||||
if (p)
|
||||
{
|
||||
notify.insert(p);
|
||||
++it;
|
||||
++iProposed;
|
||||
}
|
||||
else
|
||||
it = simiIt->second.erase(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
|
||||
|
||||
if (!notify.empty() || !accountHistoryNotify.empty())
|
||||
{
|
||||
Json::Value jvObj = transJson(*tx, result, false, ledger);
|
||||
|
||||
for (InfoSub::ref isrListener : notify)
|
||||
isrListener->send(jvObj, true);
|
||||
@@ -3304,8 +3397,9 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
#ifdef RIPPLED_REPORTING
|
||||
if (app_.config().reporting())
|
||||
{
|
||||
if (dynamic_cast<RelationalDBInterfacePostgres*>(
|
||||
&app_.getRelationalDBInterface()))
|
||||
// Use a dynamic_cast to return DatabaseType::None
|
||||
// on failure.
|
||||
if (dynamic_cast<PostgresDatabase*>(&app_.getRelationalDatabase()))
|
||||
{
|
||||
return DatabaseType::Postgres;
|
||||
}
|
||||
@@ -3313,16 +3407,18 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (dynamic_cast<RelationalDBInterfaceSqlite*>(
|
||||
&app_.getRelationalDBInterface()))
|
||||
// Use a dynamic_cast to return DatabaseType::None
|
||||
// on failure.
|
||||
if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
|
||||
{
|
||||
return DatabaseType::Sqlite;
|
||||
}
|
||||
return DatabaseType::None;
|
||||
}
|
||||
#else
|
||||
if (dynamic_cast<RelationalDBInterfaceSqlite*>(
|
||||
&app_.getRelationalDBInterface()))
|
||||
// Use a dynamic_cast to return DatabaseType::None
|
||||
// on failure.
|
||||
if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
|
||||
{
|
||||
return DatabaseType::Sqlite;
|
||||
}
|
||||
@@ -3346,7 +3442,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
app_.getJobQueue().addJob(
|
||||
jtCLIENT_ACCT_HIST,
|
||||
"AccountHistoryTxStream",
|
||||
[this, dbType = databaseType, subInfo](Job&) {
|
||||
[this, dbType = databaseType, subInfo]() {
|
||||
auto const& accountId = subInfo.index_->accountId_;
|
||||
auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
|
||||
auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
|
||||
@@ -3408,17 +3504,16 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
auto getMoreTxns =
|
||||
[&](std::uint32_t minLedger,
|
||||
std::uint32_t maxLedger,
|
||||
std::optional<RelationalDBInterface::AccountTxMarker>
|
||||
marker)
|
||||
std::optional<RelationalDatabase::AccountTxMarker> marker)
|
||||
-> std::optional<std::pair<
|
||||
RelationalDBInterface::AccountTxs,
|
||||
std::optional<RelationalDBInterface::AccountTxMarker>>> {
|
||||
RelationalDatabase::AccountTxs,
|
||||
std::optional<RelationalDatabase::AccountTxMarker>>> {
|
||||
switch (dbType)
|
||||
{
|
||||
case Postgres: {
|
||||
auto db = static_cast<RelationalDBInterfacePostgres*>(
|
||||
&app_.getRelationalDBInterface());
|
||||
RelationalDBInterface::AccountTxArgs args;
|
||||
auto db = static_cast<PostgresDatabase*>(
|
||||
&app_.getRelationalDatabase());
|
||||
RelationalDatabase::AccountTxArgs args;
|
||||
args.account = accountId;
|
||||
LedgerRange range{minLedger, maxLedger};
|
||||
args.ledger = range;
|
||||
@@ -3434,7 +3529,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
}
|
||||
|
||||
if (auto txns =
|
||||
std::get_if<RelationalDBInterface::AccountTxs>(
|
||||
std::get_if<RelationalDatabase::AccountTxs>(
|
||||
&txResult.transactions);
|
||||
txns)
|
||||
{
|
||||
@@ -3450,9 +3545,9 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
}
|
||||
}
|
||||
case Sqlite: {
|
||||
auto db = static_cast<RelationalDBInterfaceSqlite*>(
|
||||
&app_.getRelationalDBInterface());
|
||||
RelationalDBInterface::AccountTxPageOptions options{
|
||||
auto db = static_cast<SQLiteDatabase*>(
|
||||
&app_.getRelationalDatabase());
|
||||
RelationalDatabase::AccountTxPageOptions options{
|
||||
accountId, minLedger, maxLedger, marker, 0, true};
|
||||
return db->newestAccountTxPage(options);
|
||||
}
|
||||
@@ -3513,7 +3608,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
|
||||
return;
|
||||
}
|
||||
|
||||
std::optional<RelationalDBInterface::AccountTxMarker> marker{};
|
||||
std::optional<RelationalDatabase::AccountTxMarker> marker{};
|
||||
while (!subInfo.index_->stopHistorical_)
|
||||
{
|
||||
auto dbResult =
|
||||
@@ -3813,6 +3908,16 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult)
|
||||
.second;
|
||||
}
|
||||
|
||||
// <-- bool: true=added, false=already there
|
||||
bool
|
||||
NetworkOPsImp::subBookChanges(InfoSub::ref isrListener)
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
return mStreamMaps[sBookChanges]
|
||||
.emplace(isrListener->getSeq(), isrListener)
|
||||
.second;
|
||||
}
|
||||
|
||||
// <-- bool: true=erased, false=was not there
|
||||
bool
|
||||
NetworkOPsImp::unsubLedger(std::uint64_t uSeq)
|
||||
@@ -3821,6 +3926,14 @@ NetworkOPsImp::unsubLedger(std::uint64_t uSeq)
|
||||
return mStreamMaps[sLedger].erase(uSeq);
|
||||
}
|
||||
|
||||
// <-- bool: true=erased, false=was not there
|
||||
bool
|
||||
NetworkOPsImp::unsubBookChanges(std::uint64_t uSeq)
|
||||
{
|
||||
std::lock_guard sl(mSubLock);
|
||||
return mStreamMaps[sBookChanges].erase(uSeq);
|
||||
}
|
||||
|
||||
// <-- bool: true=added, false=already there
|
||||
bool
|
||||
NetworkOPsImp::subManifests(InfoSub::ref isrListener)
|
||||
|
||||
@@ -255,9 +255,9 @@ public:
|
||||
pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) = 0;
|
||||
virtual void
|
||||
pubProposedTransaction(
|
||||
std::shared_ptr<ReadView const> const& lpCurrent,
|
||||
std::shared_ptr<STTx const> const& stTxn,
|
||||
TER terResult) = 0;
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
std::shared_ptr<STTx const> const& transaction,
|
||||
TER result) = 0;
|
||||
virtual void
|
||||
pubValidation(std::shared_ptr<STValidation> const& val) = 0;
|
||||
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_APP_MISC_ORDERBOOK_H_INCLUDED
|
||||
#define RIPPLE_APP_MISC_ORDERBOOK_H_INCLUDED
|
||||
|
||||
namespace ripple {
|
||||
|
||||
/** Describes a serialized ledger entry for an order book. */
|
||||
class OrderBook
|
||||
{
|
||||
public:
|
||||
using pointer = std::shared_ptr<OrderBook>;
|
||||
using ref = std::shared_ptr<OrderBook> const&;
|
||||
using List = std::vector<pointer>;
|
||||
|
||||
/** Construct from a currency specification.
|
||||
|
||||
@param index ???
|
||||
@param book in and out currency/issuer pairs.
|
||||
*/
|
||||
// VFALCO NOTE what is the meaning of the index parameter?
|
||||
OrderBook(uint256 const& base, Book const& book)
|
||||
: mBookBase(base), mBook(book)
|
||||
{
|
||||
}
|
||||
|
||||
uint256 const&
|
||||
getBookBase() const
|
||||
{
|
||||
return mBookBase;
|
||||
}
|
||||
|
||||
Book const&
|
||||
book() const
|
||||
{
|
||||
return mBook;
|
||||
}
|
||||
|
||||
Currency const&
|
||||
getCurrencyIn() const
|
||||
{
|
||||
return mBook.in.currency;
|
||||
}
|
||||
|
||||
Currency const&
|
||||
getCurrencyOut() const
|
||||
{
|
||||
return mBook.out.currency;
|
||||
}
|
||||
|
||||
AccountID const&
|
||||
getIssuerIn() const
|
||||
{
|
||||
return mBook.in.account;
|
||||
}
|
||||
|
||||
AccountID const&
|
||||
getIssuerOut() const
|
||||
{
|
||||
return mBook.out.account;
|
||||
}
|
||||
|
||||
private:
|
||||
uint256 const mBookBase;
|
||||
Book const mBook;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -55,7 +55,7 @@ public:
|
||||
clampFetchDepth(std::uint32_t fetch_depth) const = 0;
|
||||
|
||||
virtual std::unique_ptr<NodeStore::Database>
|
||||
makeNodeStore(std::int32_t readThreads) = 0;
|
||||
makeNodeStore(int readThreads) = 0;
|
||||
|
||||
/** Highest ledger that may be deleted. */
|
||||
virtual LedgerIndex
|
||||
|
||||
@@ -17,17 +17,18 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/misc/SHAMapStoreImp.h>
|
||||
|
||||
#include <ripple/app/ledger/TransactionMaster.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/app/misc/SHAMapStoreImp.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_global.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
|
||||
#include <ripple/app/rdb/State.h>
|
||||
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <ripple/core/Pg.h>
|
||||
#include <ripple/nodestore/impl/DatabaseRotatingImp.h>
|
||||
|
||||
#include <ripple/nodestore/Scheduler.h>
|
||||
#include <ripple/nodestore/impl/DatabaseRotatingImp.h>
|
||||
#include <ripple/shamap/SHAMapMissingNode.h>
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
@@ -138,7 +139,7 @@ SHAMapStoreImp::SHAMapStoreImp(
|
||||
if (get_if_exists(section, "age_threshold_seconds", temp))
|
||||
ageThreshold_ = std::chrono::seconds{temp};
|
||||
if (get_if_exists(section, "recovery_wait_seconds", temp))
|
||||
recoveryWaitTime_.emplace(std::chrono::seconds{temp});
|
||||
recoveryWaitTime_ = std::chrono::seconds{temp};
|
||||
|
||||
get_if_exists(section, "advisory_delete", advisoryDelete_);
|
||||
|
||||
@@ -166,7 +167,7 @@ SHAMapStoreImp::SHAMapStoreImp(
|
||||
}
|
||||
|
||||
std::unique_ptr<NodeStore::Database>
|
||||
SHAMapStoreImp::makeNodeStore(std::int32_t readThreads)
|
||||
SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||
{
|
||||
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
|
||||
|
||||
@@ -268,7 +269,7 @@ SHAMapStoreImp::copyNode(std::uint64_t& nodeCount, SHAMapTreeNode const& node)
|
||||
true);
|
||||
if (!(++nodeCount % checkHealthInterval_))
|
||||
{
|
||||
if (health())
|
||||
if (stopping())
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -326,7 +327,7 @@ SHAMapStoreImp::run()
|
||||
|
||||
bool const readyToRotate =
|
||||
validatedSeq >= lastRotated + deleteInterval_ &&
|
||||
canDelete_ >= lastRotated - 1 && !health();
|
||||
canDelete_ >= lastRotated - 1 && !stopping();
|
||||
|
||||
// Make sure we don't delete ledgers currently being
|
||||
// imported into the ShardStore
|
||||
@@ -358,47 +359,39 @@ SHAMapStoreImp::run()
|
||||
<< ledgerMaster_->getValidatedLedgerAge().count() << 's';
|
||||
|
||||
clearPrior(lastRotated);
|
||||
switch (health())
|
||||
{
|
||||
case Health::stopping:
|
||||
return;
|
||||
case Health::unhealthy:
|
||||
continue;
|
||||
case Health::ok:
|
||||
default:;
|
||||
}
|
||||
if (stopping())
|
||||
return;
|
||||
|
||||
JLOG(journal_.debug()) << "copying ledger " << validatedSeq;
|
||||
std::uint64_t nodeCount = 0;
|
||||
validatedLedger->stateMap().snapShot(false)->visitNodes(std::bind(
|
||||
&SHAMapStoreImp::copyNode,
|
||||
this,
|
||||
std::ref(nodeCount),
|
||||
std::placeholders::_1));
|
||||
switch (health())
|
||||
|
||||
try
|
||||
{
|
||||
case Health::stopping:
|
||||
return;
|
||||
case Health::unhealthy:
|
||||
continue;
|
||||
case Health::ok:
|
||||
default:;
|
||||
validatedLedger->stateMap().snapShot(false)->visitNodes(
|
||||
std::bind(
|
||||
&SHAMapStoreImp::copyNode,
|
||||
this,
|
||||
std::ref(nodeCount),
|
||||
std::placeholders::_1));
|
||||
}
|
||||
catch (SHAMapMissingNode const& e)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Missing node while copying ledger before rotate: "
|
||||
<< e.what();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (stopping())
|
||||
return;
|
||||
// Only log if we completed without a "health" abort
|
||||
JLOG(journal_.debug()) << "copied ledger " << validatedSeq
|
||||
<< " nodecount " << nodeCount;
|
||||
|
||||
JLOG(journal_.debug()) << "freshening caches";
|
||||
freshenCaches();
|
||||
switch (health())
|
||||
{
|
||||
case Health::stopping:
|
||||
return;
|
||||
case Health::unhealthy:
|
||||
continue;
|
||||
case Health::ok:
|
||||
default:;
|
||||
}
|
||||
if (stopping())
|
||||
return;
|
||||
// Only log if we completed without a "health" abort
|
||||
JLOG(journal_.debug()) << validatedSeq << " freshened caches";
|
||||
|
||||
@@ -408,15 +401,8 @@ SHAMapStoreImp::run()
|
||||
<< validatedSeq << " new backend " << newBackend->getName();
|
||||
|
||||
clearCaches(validatedSeq);
|
||||
switch (health())
|
||||
{
|
||||
case Health::stopping:
|
||||
return;
|
||||
case Health::unhealthy:
|
||||
continue;
|
||||
case Health::ok:
|
||||
default:;
|
||||
}
|
||||
if (stopping())
|
||||
return;
|
||||
|
||||
lastRotated = validatedSeq;
|
||||
|
||||
@@ -485,6 +471,7 @@ SHAMapStoreImp::dbPaths()
|
||||
bool writableDbExists = false;
|
||||
bool archiveDbExists = false;
|
||||
|
||||
std::vector<boost::filesystem::path> pathsToDelete;
|
||||
for (boost::filesystem::directory_iterator it(dbPath);
|
||||
it != boost::filesystem::directory_iterator();
|
||||
++it)
|
||||
@@ -494,7 +481,7 @@ SHAMapStoreImp::dbPaths()
|
||||
else if (!state.archiveDb.compare(it->path().string()))
|
||||
archiveDbExists = true;
|
||||
else if (!dbPrefix_.compare(it->path().stem().string()))
|
||||
boost::filesystem::remove_all(it->path());
|
||||
pathsToDelete.push_back(it->path());
|
||||
}
|
||||
|
||||
if ((!writableDbExists && state.writableDb.size()) ||
|
||||
@@ -524,6 +511,10 @@ SHAMapStoreImp::dbPaths()
|
||||
|
||||
Throw<std::runtime_error>("state db error");
|
||||
}
|
||||
|
||||
// The necessary directories exist. Now, remove any others.
|
||||
for (boost::filesystem::path& p : pathsToDelete)
|
||||
boost::filesystem::remove_all(p);
|
||||
}
|
||||
|
||||
std::unique_ptr<NodeStore::Backend>
|
||||
@@ -575,7 +566,7 @@ SHAMapStoreImp::clearSql(
|
||||
min = *m;
|
||||
}
|
||||
|
||||
if (min > lastRotated || health() != Health::ok)
|
||||
if (min > lastRotated || stopping())
|
||||
return;
|
||||
if (min == lastRotated)
|
||||
{
|
||||
@@ -596,11 +587,11 @@ SHAMapStoreImp::clearSql(
|
||||
JLOG(journal_.trace())
|
||||
<< "End: Delete up to " << deleteBatch_ << " rows with LedgerSeq < "
|
||||
<< min << " from: " << TableName;
|
||||
if (health())
|
||||
if (stopping())
|
||||
return;
|
||||
if (min < lastRotated)
|
||||
std::this_thread::sleep_for(backOff_);
|
||||
if (health())
|
||||
if (stopping())
|
||||
return;
|
||||
}
|
||||
JLOG(journal_.debug()) << "finished deleting from: " << TableName;
|
||||
@@ -640,23 +631,21 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
|
||||
ledgerMaster_->clearPriorLedgers(lastRotated);
|
||||
JLOG(journal_.trace()) << "End: Clear internal ledgers up to "
|
||||
<< lastRotated;
|
||||
if (health())
|
||||
if (stopping())
|
||||
return;
|
||||
|
||||
RelationalDBInterfaceSqlite* iface =
|
||||
dynamic_cast<RelationalDBInterfaceSqlite*>(
|
||||
&app_.getRelationalDBInterface());
|
||||
SQLiteDatabase* const db =
|
||||
dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase());
|
||||
|
||||
if (!db)
|
||||
Throw<std::runtime_error>("Failed to get relational database");
|
||||
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"Ledgers",
|
||||
[&iface]() -> std::optional<LedgerIndex> {
|
||||
return iface->getMinLedgerSeq();
|
||||
},
|
||||
[&iface](LedgerIndex min) -> void {
|
||||
iface->deleteBeforeLedgerSeq(min);
|
||||
});
|
||||
if (health())
|
||||
[db]() -> std::optional<LedgerIndex> { return db->getMinLedgerSeq(); },
|
||||
[db](LedgerIndex min) -> void { db->deleteBeforeLedgerSeq(min); });
|
||||
if (stopping())
|
||||
return;
|
||||
|
||||
if (!app_.config().useTxTables())
|
||||
@@ -665,70 +654,48 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"Transactions",
|
||||
[&iface]() -> std::optional<LedgerIndex> {
|
||||
return iface->getTransactionsMinLedgerSeq();
|
||||
[&db]() -> std::optional<LedgerIndex> {
|
||||
return db->getTransactionsMinLedgerSeq();
|
||||
},
|
||||
[&iface](LedgerIndex min) -> void {
|
||||
iface->deleteTransactionsBeforeLedgerSeq(min);
|
||||
[&db](LedgerIndex min) -> void {
|
||||
db->deleteTransactionsBeforeLedgerSeq(min);
|
||||
});
|
||||
if (health())
|
||||
if (stopping())
|
||||
return;
|
||||
|
||||
clearSql(
|
||||
lastRotated,
|
||||
"AccountTransactions",
|
||||
[&iface]() -> std::optional<LedgerIndex> {
|
||||
return iface->getAccountTransactionsMinLedgerSeq();
|
||||
[&db]() -> std::optional<LedgerIndex> {
|
||||
return db->getAccountTransactionsMinLedgerSeq();
|
||||
},
|
||||
[&iface](LedgerIndex min) -> void {
|
||||
iface->deleteAccountTransactionsBeforeLedgerSeq(min);
|
||||
[&db](LedgerIndex min) -> void {
|
||||
db->deleteAccountTransactionsBeforeLedgerSeq(min);
|
||||
});
|
||||
if (health())
|
||||
if (stopping())
|
||||
return;
|
||||
}
|
||||
|
||||
SHAMapStoreImp::Health
|
||||
SHAMapStoreImp::health()
|
||||
bool
|
||||
SHAMapStoreImp::stopping()
|
||||
{
|
||||
auto age = ledgerMaster_->getValidatedLedgerAge();
|
||||
OperatingMode mode = netOPs_->getOperatingMode();
|
||||
std::unique_lock lock(mutex_);
|
||||
while (!stop_ && (mode != OperatingMode::FULL || age > ageThreshold_))
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
if (stop_)
|
||||
return Health::stopping;
|
||||
}
|
||||
if (!netOPs_)
|
||||
return Health::ok;
|
||||
assert(deleteInterval_);
|
||||
|
||||
if (healthy_)
|
||||
{
|
||||
auto age = ledgerMaster_->getValidatedLedgerAge();
|
||||
OperatingMode mode = netOPs_->getOperatingMode();
|
||||
if (recoveryWaitTime_ && mode == OperatingMode::SYNCING &&
|
||||
age < ageThreshold_)
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< "Waiting " << recoveryWaitTime_->count()
|
||||
<< "s for node to get back into sync with network. state: "
|
||||
<< app_.getOPs().strOperatingMode(mode, false) << ". age "
|
||||
<< age.count() << 's';
|
||||
std::this_thread::sleep_for(*recoveryWaitTime_);
|
||||
|
||||
age = ledgerMaster_->getValidatedLedgerAge();
|
||||
mode = netOPs_->getOperatingMode();
|
||||
}
|
||||
if (mode != OperatingMode::FULL || age > ageThreshold_)
|
||||
{
|
||||
JLOG(journal_.warn()) << "Not deleting. state: "
|
||||
<< app_.getOPs().strOperatingMode(mode, false)
|
||||
<< ". age " << age.count() << 's';
|
||||
healthy_ = false;
|
||||
}
|
||||
lock.unlock();
|
||||
JLOG(journal_.warn()) << "Waiting " << recoveryWaitTime_.count()
|
||||
<< "s for node to stabilize. state: "
|
||||
<< app_.getOPs().strOperatingMode(mode, false)
|
||||
<< ". age " << age.count() << 's';
|
||||
std::this_thread::sleep_for(recoveryWaitTime_);
|
||||
age = ledgerMaster_->getValidatedLedgerAge();
|
||||
mode = netOPs_->getOperatingMode();
|
||||
lock.lock();
|
||||
}
|
||||
|
||||
if (healthy_)
|
||||
return Health::ok;
|
||||
else
|
||||
return Health::unhealthy;
|
||||
return stop_;
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -22,8 +22,8 @@
|
||||
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/misc/SHAMapStore.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_global.h>
|
||||
#include <ripple/app/rdb/RelationalDatabase.h>
|
||||
#include <ripple/app/rdb/State.h>
|
||||
#include <ripple/core/DatabaseCon.h>
|
||||
#include <ripple/nodestore/DatabaseRotating.h>
|
||||
|
||||
@@ -40,8 +40,6 @@ class NetworkOPs;
|
||||
class SHAMapStoreImp : public SHAMapStore
|
||||
{
|
||||
private:
|
||||
enum Health : std::uint8_t { ok = 0, stopping, unhealthy };
|
||||
|
||||
class SavedStateDB
|
||||
{
|
||||
public:
|
||||
@@ -106,12 +104,12 @@ private:
|
||||
std::uint32_t deleteBatch_ = 100;
|
||||
std::chrono::milliseconds backOff_{100};
|
||||
std::chrono::seconds ageThreshold_{60};
|
||||
/// If set, and the node is out of sync during an
|
||||
/// If the node is out of sync during an
|
||||
/// online_delete health check, sleep the thread
|
||||
/// for this time and check again so the node can
|
||||
/// recover.
|
||||
/// for this time, and continue checking until
|
||||
/// recovery.
|
||||
/// See also: "recovery_wait_seconds" in rippled-example.cfg
|
||||
std::optional<std::chrono::seconds> recoveryWaitTime_;
|
||||
std::chrono::seconds recoveryWaitTime_{5};
|
||||
|
||||
// these do not exist upon SHAMapStore creation, but do exist
|
||||
// as of run() or before
|
||||
@@ -136,7 +134,7 @@ public:
|
||||
}
|
||||
|
||||
std::unique_ptr<NodeStore::Database>
|
||||
makeNodeStore(std::int32_t readThreads) override;
|
||||
makeNodeStore(int readThreads) override;
|
||||
|
||||
LedgerIndex
|
||||
setCanDelete(LedgerIndex seq) override
|
||||
@@ -201,7 +199,7 @@ private:
|
||||
{
|
||||
dbRotating_->fetchNodeObject(
|
||||
key, 0, NodeStore::FetchType::synchronous, true);
|
||||
if (!(++check % checkHealthInterval_) && health())
|
||||
if (!(++check % checkHealthInterval_) && stopping())
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -225,16 +223,15 @@ private:
|
||||
void
|
||||
clearPrior(LedgerIndex lastRotated);
|
||||
|
||||
// If rippled is not healthy, defer rotate-delete.
|
||||
// If already unhealthy, do not change state on further check.
|
||||
// Assume that, once unhealthy, a necessary step has been
|
||||
// aborted, so the online-delete process needs to restart
|
||||
// at next ledger.
|
||||
// If recoveryWaitTime_ is set, this may sleep to give rippled
|
||||
// time to recover, so never call it from any thread other than
|
||||
// the main "run()".
|
||||
Health
|
||||
health();
|
||||
/**
|
||||
* This is a health check for online deletion that waits until rippled is
|
||||
* stable until returning. If the server is stopping, then it returns
|
||||
* "true" to inform the caller to allow the server to stop.
|
||||
*
|
||||
* @return Whether the server is stopping.
|
||||
*/
|
||||
bool
|
||||
stopping();
|
||||
|
||||
public:
|
||||
void
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <ripple/app/tx/applySteps.h>
|
||||
#include <ripple/ledger/ApplyView.h>
|
||||
#include <ripple/ledger/OpenView.h>
|
||||
#include <ripple/protocol/RippleLedgerHash.h>
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/SeqProxy.h>
|
||||
#include <ripple/protocol/TER.h>
|
||||
@@ -340,7 +341,7 @@ public:
|
||||
in the queue.
|
||||
*/
|
||||
std::vector<TxDetails>
|
||||
getAccountTxs(AccountID const& account, ReadView const& view) const;
|
||||
getAccountTxs(AccountID const& account) const;
|
||||
|
||||
/** Returns information about all transactions currently
|
||||
in the queue.
|
||||
@@ -349,7 +350,7 @@ public:
|
||||
in the queue.
|
||||
*/
|
||||
std::vector<TxDetails>
|
||||
getTxs(ReadView const& view) const;
|
||||
getTxs() const;
|
||||
|
||||
/** Summarize current fee metrics for the `fee` RPC command.
|
||||
|
||||
@@ -575,6 +576,16 @@ private:
|
||||
*/
|
||||
static constexpr int retriesAllowed = 10;
|
||||
|
||||
/** The hash of the parent ledger.
|
||||
|
||||
This is used to pseudo-randomize the transaction order when
|
||||
populating byFee_, by XORing it with the transaction hash (txID).
|
||||
Using a single static and doing the XOR operation every time was
|
||||
tested to be as fast or faster than storing the computed "sort key",
|
||||
and obviously uses less memory.
|
||||
*/
|
||||
static LedgerHash parentHashComp;
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
MaybeTx(
|
||||
@@ -621,22 +632,26 @@ private:
|
||||
explicit OrderCandidates() = default;
|
||||
|
||||
/** Sort @ref MaybeTx by `feeLevel` descending, then by
|
||||
* transaction ID ascending
|
||||
* pseudo-randomized transaction ID ascending
|
||||
*
|
||||
* The transaction queue is ordered such that transactions
|
||||
* paying a higher fee are in front of transactions paying
|
||||
* a lower fee, giving them an opportunity to be processed into
|
||||
* the open ledger first. Within transactions paying the same
|
||||
* fee, order by the arbitrary but consistent transaction ID.
|
||||
* This allows validators to build similar queues in the same
|
||||
* order, and thus have more similar initial proposals.
|
||||
* fee, order by the arbitrary but consistent pseudo-randomized
|
||||
* transaction ID. The ID is pseudo-randomized by XORing it with
|
||||
* the open ledger's parent hash, which is deterministic, but
|
||||
* unpredictable. This allows validators to build similar queues
|
||||
* in the same order, and thus have more similar initial
|
||||
* proposals.
|
||||
*
|
||||
*/
|
||||
bool
|
||||
operator()(const MaybeTx& lhs, const MaybeTx& rhs) const
|
||||
{
|
||||
if (lhs.feeLevel == rhs.feeLevel)
|
||||
return lhs.txID < rhs.txID;
|
||||
return (lhs.txID ^ MaybeTx::parentHashComp) <
|
||||
(rhs.txID ^ MaybeTx::parentHashComp);
|
||||
return lhs.feeLevel > rhs.feeLevel;
|
||||
}
|
||||
};
|
||||
@@ -770,6 +785,14 @@ private:
|
||||
*/
|
||||
std::optional<size_t> maxSize_;
|
||||
|
||||
#if !NDEBUG
|
||||
/**
|
||||
parentHash_ checks that no unexpected ledger transitions
|
||||
happen, and is only checked via debug asserts.
|
||||
*/
|
||||
LedgerHash parentHash_{beast::zero};
|
||||
#endif
|
||||
|
||||
/** Most queue operations are done under the master lock,
|
||||
but use this mutex for the RPC "fee" command, which isn't.
|
||||
*/
|
||||
|
||||
@@ -31,7 +31,7 @@ namespace ripple {
|
||||
|
||||
void
|
||||
convertBlobsToTxResult(
|
||||
RelationalDBInterface::AccountTxs& to,
|
||||
RelationalDatabase::AccountTxs& to,
|
||||
std::uint32_t ledger_index,
|
||||
std::string const& status,
|
||||
Blob const& rawTxn,
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#ifndef RIPPLE_APP_MISC_IMPL_ACCOUNTTXPAGING_H_INCLUDED
|
||||
#define RIPPLE_APP_MISC_IMPL_ACCOUNTTXPAGING_H_INCLUDED
|
||||
|
||||
#include <ripple/app/rdb/RelationalDBInterface.h>
|
||||
#include <ripple/app/rdb/RelationalDatabase.h>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
@@ -31,7 +31,7 @@ namespace ripple {
|
||||
|
||||
void
|
||||
convertBlobsToTxResult(
|
||||
RelationalDBInterface::AccountTxs& to,
|
||||
RelationalDatabase::AccountTxs& to,
|
||||
std::uint32_t ledger_index,
|
||||
std::string const& status,
|
||||
Blob const& rawTxn,
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/misc/AmendmentTable.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_global.h>
|
||||
#include <ripple/app/rdb/Wallet.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <ripple/protocol/Feature.h>
|
||||
#include <ripple/protocol/STValidation.h>
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/misc/Manifest.h>
|
||||
#include <ripple/app/rdb/RelationalDBInterface_global.h>
|
||||
#include <ripple/app/rdb/Wallet.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/base64.h>
|
||||
@@ -28,8 +28,11 @@
|
||||
#include <ripple/json/json_reader.h>
|
||||
#include <ripple/protocol/PublicKey.h>
|
||||
#include <ripple/protocol/Sign.h>
|
||||
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
|
||||
#include <numeric>
|
||||
#include <shared_mutex>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace ripple {
|
||||
@@ -283,7 +286,7 @@ loadValidatorToken(std::vector<std::string> const& blob)
|
||||
PublicKey
|
||||
ManifestCache::getSigningKey(PublicKey const& pk) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
auto const iter = map_.find(pk);
|
||||
|
||||
if (iter != map_.end() && !iter->second.revoked())
|
||||
@@ -295,7 +298,7 @@ ManifestCache::getSigningKey(PublicKey const& pk) const
|
||||
PublicKey
|
||||
ManifestCache::getMasterKey(PublicKey const& pk) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
|
||||
if (auto const iter = signingToMasterKeys_.find(pk);
|
||||
iter != signingToMasterKeys_.end())
|
||||
@@ -307,7 +310,7 @@ ManifestCache::getMasterKey(PublicKey const& pk) const
|
||||
std::optional<std::uint32_t>
|
||||
ManifestCache::getSequence(PublicKey const& pk) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
auto const iter = map_.find(pk);
|
||||
|
||||
if (iter != map_.end() && !iter->second.revoked())
|
||||
@@ -319,7 +322,7 @@ ManifestCache::getSequence(PublicKey const& pk) const
|
||||
std::optional<std::string>
|
||||
ManifestCache::getDomain(PublicKey const& pk) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
auto const iter = map_.find(pk);
|
||||
|
||||
if (iter != map_.end() && !iter->second.revoked())
|
||||
@@ -331,7 +334,7 @@ ManifestCache::getDomain(PublicKey const& pk) const
|
||||
std::optional<std::string>
|
||||
ManifestCache::getManifest(PublicKey const& pk) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
auto const iter = map_.find(pk);
|
||||
|
||||
if (iter != map_.end() && !iter->second.revoked())
|
||||
@@ -343,7 +346,7 @@ ManifestCache::getManifest(PublicKey const& pk) const
|
||||
bool
|
||||
ManifestCache::revoked(PublicKey const& pk) const
|
||||
{
|
||||
std::lock_guard lock{read_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
auto const iter = map_.find(pk);
|
||||
|
||||
if (iter != map_.end())
|
||||
@@ -355,86 +358,115 @@ ManifestCache::revoked(PublicKey const& pk) const
|
||||
ManifestDisposition
|
||||
ManifestCache::applyManifest(Manifest m)
|
||||
{
|
||||
std::lock_guard applyLock{apply_mutex_};
|
||||
// Check the manifest against the conditions that do not require a
|
||||
// `unique_lock` (write lock) on the `mutex_`. Since the signature can be
|
||||
// relatively expensive, the `checkSignature` parameter determines if the
|
||||
// signature should be checked. Since `prewriteCheck` is run twice (see
|
||||
// comment below), `checkSignature` only needs to be set to true on the
|
||||
// first run.
|
||||
auto prewriteCheck =
|
||||
[this, &m](auto const& iter, bool checkSignature, auto const& lock)
|
||||
-> std::optional<ManifestDisposition> {
|
||||
assert(lock.owns_lock());
|
||||
(void)lock; // not used. parameter is present to ensure the mutex is
|
||||
// locked when the lambda is called.
|
||||
if (iter != map_.end() && m.sequence <= iter->second.sequence)
|
||||
{
|
||||
// We received a manifest whose sequence number is not strictly
|
||||
// greater than the one we already know about. This can happen in
|
||||
// several cases including when we receive manifests from a peer who
|
||||
// doesn't have the latest data.
|
||||
if (auto stream = j_.debug())
|
||||
logMftAct(
|
||||
stream,
|
||||
"Stale",
|
||||
m.masterKey,
|
||||
m.sequence,
|
||||
iter->second.sequence);
|
||||
return ManifestDisposition::stale;
|
||||
}
|
||||
|
||||
// Before we spend time checking the signature, make sure the
|
||||
// sequence number is newer than any we have.
|
||||
auto const iter = map_.find(m.masterKey);
|
||||
if (checkSignature && !m.verify())
|
||||
{
|
||||
if (auto stream = j_.warn())
|
||||
logMftAct(stream, "Invalid", m.masterKey, m.sequence);
|
||||
return ManifestDisposition::invalid;
|
||||
}
|
||||
|
||||
if (iter != map_.end() && m.sequence <= iter->second.sequence)
|
||||
{
|
||||
// We received a manifest whose sequence number is not strictly greater
|
||||
// than the one we already know about. This can happen in several cases
|
||||
// including when we receive manifests from a peer who doesn't have the
|
||||
// latest data.
|
||||
if (auto stream = j_.debug())
|
||||
logMftAct(
|
||||
stream,
|
||||
"Stale",
|
||||
m.masterKey,
|
||||
m.sequence,
|
||||
iter->second.sequence);
|
||||
return ManifestDisposition::stale;
|
||||
}
|
||||
// If the master key associated with a manifest is or might be
|
||||
// compromised and is, therefore, no longer trustworthy.
|
||||
//
|
||||
// A manifest revocation essentially marks a manifest as compromised. By
|
||||
// setting the sequence number to the highest value possible, the
|
||||
// manifest is effectively neutered and cannot be superseded by a forged
|
||||
// one.
|
||||
bool const revoked = m.revoked();
|
||||
|
||||
// Now check the signature
|
||||
if (!m.verify())
|
||||
{
|
||||
if (auto stream = j_.warn())
|
||||
logMftAct(stream, "Invalid", m.masterKey, m.sequence);
|
||||
return ManifestDisposition::invalid;
|
||||
}
|
||||
if (auto stream = j_.warn(); stream && revoked)
|
||||
logMftAct(stream, "Revoked", m.masterKey, m.sequence);
|
||||
|
||||
// If the master key associated with a manifest is or might be compromised
|
||||
// and is, therefore, no longer trustworthy.
|
||||
//
|
||||
// A manifest revocation essentially marks a manifest as compromised. By
|
||||
// setting the sequence number to the highest value possible, the manifest
|
||||
// is effectively neutered and cannot be superseded by a forged one.
|
||||
bool const revoked = m.revoked();
|
||||
|
||||
if (auto stream = j_.warn(); stream && revoked)
|
||||
logMftAct(stream, "Revoked", m.masterKey, m.sequence);
|
||||
|
||||
std::lock_guard readLock{read_mutex_};
|
||||
|
||||
// Sanity check: the master key of this manifest should not be used as
|
||||
// the ephemeral key of another manifest:
|
||||
if (auto const x = signingToMasterKeys_.find(m.masterKey);
|
||||
x != signingToMasterKeys_.end())
|
||||
{
|
||||
JLOG(j_.warn()) << to_string(m)
|
||||
<< ": Master key already used as ephemeral key for "
|
||||
<< toBase58(TokenType::NodePublic, x->second);
|
||||
|
||||
return ManifestDisposition::badMasterKey;
|
||||
}
|
||||
|
||||
if (!revoked)
|
||||
{
|
||||
// Sanity check: the ephemeral key of this manifest should not be used
|
||||
// as the master or ephemeral key of another manifest:
|
||||
if (auto const x = signingToMasterKeys_.find(m.signingKey);
|
||||
// Sanity check: the master key of this manifest should not be used as
|
||||
// the ephemeral key of another manifest:
|
||||
if (auto const x = signingToMasterKeys_.find(m.masterKey);
|
||||
x != signingToMasterKeys_.end())
|
||||
{
|
||||
JLOG(j_.warn())
|
||||
<< to_string(m)
|
||||
<< ": Ephemeral key already used as ephemeral key for "
|
||||
<< toBase58(TokenType::NodePublic, x->second);
|
||||
JLOG(j_.warn()) << to_string(m)
|
||||
<< ": Master key already used as ephemeral key for "
|
||||
<< toBase58(TokenType::NodePublic, x->second);
|
||||
|
||||
return ManifestDisposition::badEphemeralKey;
|
||||
return ManifestDisposition::badMasterKey;
|
||||
}
|
||||
|
||||
if (auto const x = map_.find(m.signingKey); x != map_.end())
|
||||
if (!revoked)
|
||||
{
|
||||
JLOG(j_.warn())
|
||||
<< to_string(m) << ": Ephemeral key used as master key for "
|
||||
<< to_string(x->second);
|
||||
// Sanity check: the ephemeral key of this manifest should not be
|
||||
// used as the master or ephemeral key of another manifest:
|
||||
if (auto const x = signingToMasterKeys_.find(m.signingKey);
|
||||
x != signingToMasterKeys_.end())
|
||||
{
|
||||
JLOG(j_.warn())
|
||||
<< to_string(m)
|
||||
<< ": Ephemeral key already used as ephemeral key for "
|
||||
<< toBase58(TokenType::NodePublic, x->second);
|
||||
|
||||
return ManifestDisposition::badEphemeralKey;
|
||||
return ManifestDisposition::badEphemeralKey;
|
||||
}
|
||||
|
||||
if (auto const x = map_.find(m.signingKey); x != map_.end())
|
||||
{
|
||||
JLOG(j_.warn())
|
||||
<< to_string(m) << ": Ephemeral key used as master key for "
|
||||
<< to_string(x->second);
|
||||
|
||||
return ManifestDisposition::badEphemeralKey;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
};
|
||||
|
||||
{
|
||||
std::shared_lock sl{mutex_};
|
||||
if (auto d =
|
||||
prewriteCheck(map_.find(m.masterKey), /*checkSig*/ true, sl))
|
||||
return *d;
|
||||
}
|
||||
|
||||
std::unique_lock sl{mutex_};
|
||||
auto const iter = map_.find(m.masterKey);
|
||||
// Since we released the previously held read lock, it's possible that the
|
||||
// collections have been written to. This means we need to run
|
||||
// `prewriteCheck` again. This re-does work, but `prewriteCheck` is
|
||||
// relatively inexpensive to run, and doing it this way allows us to run
|
||||
// `prewriteCheck` under a `shared_lock` above.
|
||||
// Note, the signature has already been checked above, so it
|
||||
// doesn't need to happen again (signature checks are somewhat expensive).
|
||||
// Note: It's a mistake to use an upgradable lock. This is a recipe for
|
||||
// deadlock.
|
||||
if (auto d = prewriteCheck(iter, /*checkSig*/ false, sl))
|
||||
return *d;
|
||||
|
||||
bool const revoked = m.revoked();
|
||||
// This is the first manifest we are seeing for a master key. This should
|
||||
// only ever happen once per validator run.
|
||||
if (iter == map_.end())
|
||||
@@ -543,7 +575,7 @@ ManifestCache::save(
|
||||
std::string const& dbTable,
|
||||
std::function<bool(PublicKey const&)> const& isTrusted)
|
||||
{
|
||||
std::lock_guard lock{apply_mutex_};
|
||||
std::shared_lock lock{mutex_};
|
||||
auto db = dbCon.checkoutDb();
|
||||
|
||||
saveManifests(*db, dbTable, isTrusted, map_, j_);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user