mirror of
https://github.com/XRPLF/rippled.git
synced 2026-01-10 01:35:26 +00:00
Compare commits
209 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2084d61efa | ||
|
|
57b9da62bd | ||
|
|
79c583e1f7 | ||
|
|
5743dc4537 | ||
|
|
d86b1f8b7d | ||
|
|
d57f88fc18 | ||
|
|
24cf8ab8c7 | ||
|
|
e56a85cb3b | ||
|
|
d1fdea9bc8 | ||
|
|
1a8eb5e6e3 | ||
|
|
6a8180c967 | ||
|
|
eb57679085 | ||
|
|
297def5ed3 | ||
|
|
a01cadbfd5 | ||
|
|
11ca9a946c | ||
|
|
f326f019bf | ||
|
|
90326bf756 | ||
|
|
255bf829ca | ||
|
|
0623a40f02 | ||
|
|
a529b218f3 | ||
|
|
c0cb389b20 | ||
|
|
8f82b62e0d | ||
|
|
dc213a4fab | ||
|
|
06e87e0f6a | ||
|
|
c2a08a1f26 | ||
|
|
df02eb125f | ||
|
|
0c13676d5f | ||
|
|
74e6ed1af3 | ||
|
|
72377e7bf2 | ||
|
|
5b085a75fd | ||
|
|
61389a8bef | ||
|
|
bd97e59254 | ||
|
|
95ecf296ad | ||
|
|
b7e0306d0a | ||
|
|
a9ee802240 | ||
|
|
d23d37fcfd | ||
|
|
289bc0afd9 | ||
|
|
c5dc00af74 | ||
|
|
d49b486224 | ||
|
|
417cfc2fb0 | ||
|
|
febbe14e6d | ||
|
|
44514930f9 | ||
|
|
dc778536ed | ||
|
|
18584ef2fd | ||
|
|
416ce35d73 | ||
|
|
7c12f01358 | ||
|
|
5a4654a0da | ||
|
|
89766c5f21 | ||
|
|
4ec11e692b | ||
|
|
d02f0e11c5 | ||
|
|
e28989638d | ||
|
|
915fe31274 | ||
|
|
db720a59e4 | ||
|
|
72752b1ee0 | ||
|
|
c663f1f62b | ||
|
|
d54f6278bb | ||
|
|
fc04336caa | ||
|
|
47376a0cc3 | ||
|
|
45aa0142a6 | ||
|
|
e3acb61d57 | ||
|
|
8fa33795a3 | ||
|
|
b1c9b134dc | ||
|
|
ae9930b87d | ||
|
|
aaa601841c | ||
|
|
8ca2d98496 | ||
|
|
ad805eb95b | ||
|
|
eb17325cbe | ||
|
|
b00787e161 | ||
|
|
81e7ec859d | ||
|
|
6f6179abb4 | ||
|
|
32a26a65d9 | ||
|
|
daccb5b4c0 | ||
|
|
cf97dcb992 | ||
|
|
6746b863b3 | ||
|
|
bf013c02ad | ||
|
|
fbedfb25ae | ||
|
|
9e877a929e | ||
|
|
e0eae9725b | ||
|
|
3083983fee | ||
|
|
5050b366d9 | ||
|
|
f0c237e001 | ||
|
|
970711f1fd | ||
|
|
19018e8959 | ||
|
|
7edfbbd8bd | ||
|
|
d36024394d | ||
|
|
35e0ab4280 | ||
|
|
ef60ac8348 | ||
|
|
0c47cfad6f | ||
|
|
eb6b79bed7 | ||
|
|
eaff0d30fb | ||
|
|
fae9f9b24b | ||
|
|
5d44998368 | ||
|
|
a145759d1e | ||
|
|
e2a42184b9 | ||
|
|
00a4c3a478 | ||
|
|
0320d2169e | ||
|
|
da26d11593 | ||
|
|
90aa3c75a7 | ||
|
|
1197e49068 | ||
|
|
b6ed50eb03 | ||
|
|
8c78c83d05 | ||
|
|
a5c4684273 | ||
|
|
2bbf0eb588 | ||
|
|
6095f55bf1 | ||
|
|
f64bd54093 | ||
|
|
bc91fd740f | ||
|
|
4a9bd7ed6d | ||
|
|
1ca8898703 | ||
|
|
8a25f32824 | ||
|
|
d9d001dffd | ||
|
|
bdfafa0b58 | ||
|
|
2266b04dd8 | ||
|
|
c50d166c23 | ||
|
|
de43d43560 | ||
|
|
f954faada6 | ||
|
|
9f75f2d522 | ||
|
|
cf70ecbd6d | ||
|
|
2a298469be | ||
|
|
3be668b343 | ||
|
|
2027f642ec | ||
|
|
9376d81d0d | ||
|
|
54e5d5fc35 | ||
|
|
b8552abcea | ||
|
|
3fb60a89a3 | ||
|
|
15b0ae5bf0 | ||
|
|
33b396c7b4 | ||
|
|
ea145d12c7 | ||
|
|
0d17dd8228 | ||
|
|
af5f28cbf8 | ||
|
|
234b754038 | ||
|
|
c231adf324 | ||
|
|
7a088a5280 | ||
|
|
96bbabbd2e | ||
|
|
e37c108195 | ||
|
|
53df35eef3 | ||
|
|
1061b01ab3 | ||
|
|
aee422e819 | ||
|
|
324667b877 | ||
|
|
10d73655bc | ||
|
|
96f11c786e | ||
|
|
9202197354 | ||
|
|
d78a396525 | ||
|
|
cd27b5f2bd | ||
|
|
78bc2727f7 | ||
|
|
8b58e93a2e | ||
|
|
3752234161 | ||
|
|
bf75094224 | ||
|
|
8d59c7dd40 | ||
|
|
a1fd579756 | ||
|
|
b9943d3746 | ||
|
|
b5502a49c3 | ||
|
|
7bd5d51e4e | ||
|
|
d4d937c37b | ||
|
|
2f0231025f | ||
|
|
f1a9e8840f | ||
|
|
2c559116fb | ||
|
|
4bedbd1d39 | ||
|
|
433feade5d | ||
|
|
10e4608ce0 | ||
|
|
ff3d2e7c29 | ||
|
|
7822a28c87 | ||
|
|
dcba79be48 | ||
|
|
2a7c573dec | ||
|
|
22cc9a254a | ||
|
|
09ae9168ca | ||
|
|
9eb9b8f631 | ||
|
|
6298daba1a | ||
|
|
2eb1c6a396 | ||
|
|
7717056cf2 | ||
|
|
9fd5cd303d | ||
|
|
04ff6249d5 | ||
|
|
fa9ecae2d6 | ||
|
|
62d2b76fa8 | ||
|
|
d95aab1139 | ||
|
|
80c2302fd3 | ||
|
|
38f954fd46 | ||
|
|
14b2f27c3e | ||
|
|
a2a37a928a | ||
|
|
c10c0be11b | ||
|
|
34ee4ca0cb | ||
|
|
30fd45890b | ||
|
|
36fe1966c3 | ||
|
|
1bb99e5d3c | ||
|
|
430802c1cf | ||
|
|
9106a06579 | ||
|
|
79e69da364 | ||
|
|
73116297aa | ||
|
|
8579eb0c19 | ||
|
|
9c8caddc5a | ||
|
|
2913847925 | ||
|
|
cf8438fe1d | ||
|
|
6d82fb83a0 | ||
|
|
207e1730e9 | ||
|
|
f0424fe7dd | ||
|
|
2e456a835d | ||
|
|
ab9039e77d | ||
|
|
9932a19139 | ||
|
|
64e4a89470 | ||
|
|
9d89d4c188 | ||
|
|
b2bf2b6e6b | ||
|
|
3b33318dc8 | ||
|
|
85307b29d0 | ||
|
|
95426efb8a | ||
|
|
3e2b568ef9 | ||
|
|
a06525649d | ||
|
|
b4699c3b46 | ||
|
|
27d978b891 | ||
|
|
f91b568069 | ||
|
|
06bd16c928 |
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -22,7 +22,7 @@ assignees: ''
|
||||
|
||||
## Environment
|
||||
<!--Please describe your environment setup (such as Ubuntu 18.04 with Boost 1.70).-->
|
||||
<!-- If you are using a formal release, please use the version returned by './rippled --version' as the verison number-->
|
||||
<!-- If you are using a formal release, please use the version returned by './rippled --version' as the version number-->
|
||||
<!-- If you are working off of develop, please add the git hash via 'git rev-parse HEAD'-->
|
||||
|
||||
## Supporting Files
|
||||
|
||||
35
.github/workflows/clang-format.yml
vendored
35
.github/workflows/clang-format.yml
vendored
@@ -23,5 +23,38 @@ jobs:
|
||||
- name: Format src/test
|
||||
run: find src/test -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-${CLANG_VERSION} -i
|
||||
- name: Check for differences
|
||||
run: git diff --exit-code
|
||||
id: assert
|
||||
run: |
|
||||
set -o pipefail
|
||||
git diff --exit-code | tee "clang-format.patch"
|
||||
- name: Upload patch
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
uses: actions/upload-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: clang-format.patch
|
||||
if-no-files-found: ignore
|
||||
path: clang-format.patch
|
||||
- name: What happened?
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
env:
|
||||
PREAMBLE: |
|
||||
If you are reading this, you are looking at a failed Github Actions
|
||||
job. That means you pushed one or more files that did not conform
|
||||
to the formatting specified in .clang-format. That may be because
|
||||
you neglected to run 'git clang-format' or 'clang-format' before
|
||||
committing, or that your version of clang-format has an
|
||||
incompatibility with the one on this
|
||||
machine, which is:
|
||||
SUGGESTION: |
|
||||
|
||||
To fix it, you can do one of two things:
|
||||
1. Download and apply the patch generated as an artifact of this
|
||||
job to your repo, commit, and push.
|
||||
2. Run 'git-clang-format --extensions c,cpp,h,cxx,ipp develop'
|
||||
in your repo, commit, and push.
|
||||
run: |
|
||||
echo "${PREAMBLE}"
|
||||
clang-format-${CLANG_VERSION} --version
|
||||
echo "${SUGGESTION}"
|
||||
exit 1
|
||||
|
||||
39
.github/workflows/levelization.yml
vendored
39
.github/workflows/levelization.yml
vendored
@@ -12,7 +12,38 @@ jobs:
|
||||
- name: Check levelization
|
||||
run: Builds/levelization/levelization.sh
|
||||
- name: Check for differences
|
||||
run: git diff --exit-code
|
||||
# If this workflow fails, and you have improved levelization, run
|
||||
# Builds/levelization/levelization.sh, and commit the changes to
|
||||
# loops.txt.
|
||||
id: assert
|
||||
run: |
|
||||
set -o pipefail
|
||||
git diff --exit-code | tee "levelization.patch"
|
||||
- name: Upload patch
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
uses: actions/upload-artifact@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: levelization.patch
|
||||
if-no-files-found: ignore
|
||||
path: levelization.patch
|
||||
- name: What happened?
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
env:
|
||||
MESSAGE: |
|
||||
If you are reading this, you are looking at a failed Github
|
||||
Actions job. That means you changed the dependency relationships
|
||||
between the modules in rippled. That may be an improvement or a
|
||||
regression. This check doesn't judge.
|
||||
|
||||
A rule of thumb, though, is that if your changes caused
|
||||
something to be removed from loops.txt, that's probably an
|
||||
improvement. If something was added, it's probably a regression.
|
||||
|
||||
To fix it, you can do one of two things:
|
||||
1. Download and apply the patch generated as an artifact of this
|
||||
job to your repo, commit, and push.
|
||||
2. Run './Builds/levelization/levelization.sh' in your repo,
|
||||
commit, and push.
|
||||
|
||||
See Builds/levelization/README.md for more info.
|
||||
run: |
|
||||
echo "${MESSAGE}"
|
||||
exit 1
|
||||
|
||||
42
.travis.yml
42
.travis.yml
@@ -36,9 +36,9 @@ env:
|
||||
- NIH_CACHE_ROOT=${CACHE_DIR}/nih_c
|
||||
- PARALLEL_TESTS=true
|
||||
# this is NOT used by linux container based builds (which already have boost installed)
|
||||
- BOOST_URL='https://dl.bintray.com/boostorg/release/1.70.0/source/boost_1_70_0.tar.bz2'
|
||||
- BOOST_URL='https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz'
|
||||
# Alternate dowload location
|
||||
- BOOST_URL2='https://downloads.sourceforge.net/project/boost/boost/1.70.0/boost_1_70_0.tar.bz2?r=&ts=1594393912&use_mirror=newcontinuum'
|
||||
- BOOST_URL2='https://downloads.sourceforge.net/project/boost/boost/1.75.0/boost_1_75_0.tar.bz2?r=&ts=1594393912&use_mirror=newcontinuum'
|
||||
# Travis downloader doesn't seem to have updated certs. Using this option
|
||||
# introduces obvious security risks, but they're Travis's risks.
|
||||
# Note that this option is only used if the "normal" build fails.
|
||||
@@ -123,6 +123,25 @@ matrix:
|
||||
- CMAKE_ADD="-Dcoverage=ON"
|
||||
- TARGET=coverage_report
|
||||
- SKIP_TESTS=true
|
||||
# test-free builds
|
||||
- <<: *linux
|
||||
if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/
|
||||
compiler: gcc-8
|
||||
name: no-tests-unity, gcc-8
|
||||
env:
|
||||
- MATRIX_EVAL="CC=gcc-8 && CXX=g++-8"
|
||||
- BUILD_TYPE=Debug
|
||||
- CMAKE_ADD="-Dtests=OFF"
|
||||
- SKIP_TESTS=true
|
||||
- <<: *linux
|
||||
if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/
|
||||
compiler: clang-8
|
||||
name: no-tests-non-unity, clang-8
|
||||
env:
|
||||
- MATRIX_EVAL="CC=clang-8 && CXX=clang++-8"
|
||||
- BUILD_TYPE=Debug
|
||||
- CMAKE_ADD="-Dtests=OFF -Dunity=OFF"
|
||||
- SKIP_TESTS=true
|
||||
# nounity
|
||||
- <<: *linux
|
||||
if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_nounity/
|
||||
@@ -263,7 +282,7 @@ matrix:
|
||||
env:
|
||||
- MATRIX_EVAL="CC=gcc-8 && CXX=g++-8"
|
||||
- BUILD_TYPE=Debug
|
||||
- CMAKE_EXE=/opt/local/cmake-3.9/bin/cmake
|
||||
- CMAKE_EXE=/opt/local/cmake/bin/cmake
|
||||
- SKIP_TESTS=true
|
||||
# validator keys project as subproj of rippled
|
||||
- <<: *linux
|
||||
@@ -280,15 +299,15 @@ matrix:
|
||||
if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_mac/
|
||||
stage: build
|
||||
os: osx
|
||||
osx_image: xcode11.2
|
||||
name: xcode11.2, debug
|
||||
osx_image: xcode13.1
|
||||
name: xcode13.1, debug
|
||||
env:
|
||||
# put NIH in non-cache location since it seems to
|
||||
# cause failures when homebrew updates
|
||||
- NIH_CACHE_ROOT=${TRAVIS_BUILD_DIR}/nih_c
|
||||
- BLD_CONFIG=Debug
|
||||
- TEST_EXTRA_ARGS=""
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_70_0
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_75_0
|
||||
- >-
|
||||
CMAKE_ADD="
|
||||
-DBOOST_ROOT=${BOOST_ROOT}/_INSTALLED_
|
||||
@@ -319,7 +338,7 @@ matrix:
|
||||
- travis_wait ${MAX_TIME_MIN} cmake --build . --parallel --verbose
|
||||
- ./rippled --unittest --quiet --unittest-log --unittest-jobs ${NUM_PROCESSORS} ${TEST_EXTRA_ARGS}
|
||||
- <<: *macos
|
||||
name: xcode11.2, release
|
||||
name: xcode13.1, release
|
||||
before_script:
|
||||
- export BLD_CONFIG=Release
|
||||
- export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -Dassert=ON"
|
||||
@@ -328,8 +347,8 @@ matrix:
|
||||
before_script:
|
||||
- export TEST_EXTRA_ARGS="--unittest-ipv6"
|
||||
- <<: *macos
|
||||
osx_image: xcode11.2
|
||||
name: xcode11.2, debug
|
||||
osx_image: xcode13.1
|
||||
name: xcode13.1, debug
|
||||
# windows
|
||||
- &windows
|
||||
if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_win/
|
||||
@@ -342,13 +361,13 @@ matrix:
|
||||
- NIH_CACHE_ROOT=${TRAVIS_BUILD_DIR}/nih_c
|
||||
- VCPKG_DEFAULT_TRIPLET="x64-windows-static"
|
||||
- MATRIX_EVAL="CC=cl.exe && CXX=cl.exe"
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_70
|
||||
- BOOST_ROOT=${CACHE_DIR}/boost_1_75
|
||||
- >-
|
||||
CMAKE_ADD="
|
||||
-DCMAKE_PREFIX_PATH=${BOOST_ROOT}/_INSTALLED_
|
||||
-DBOOST_ROOT=${BOOST_ROOT}/_INSTALLED_
|
||||
-DBoost_ROOT=${BOOST_ROOT}/_INSTALLED_
|
||||
-DBoost_DIR=${BOOST_ROOT}/_INSTALLED_/lib/cmake/Boost-1.70.0
|
||||
-DBoost_DIR=${BOOST_ROOT}/_INSTALLED_/lib/cmake/Boost-1.75.0
|
||||
-DBoost_COMPILER=vc141
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DCMAKE_TOOLCHAIN_FILE=${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake
|
||||
@@ -439,4 +458,3 @@ cache:
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ if (static OR APPLE OR MSVC)
|
||||
set (OPENSSL_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set (OPENSSL_MSVC_STATIC_RT ON)
|
||||
find_dependency (OpenSSL 1.0.2 REQUIRED)
|
||||
find_dependency (OpenSSL 1.1.1 REQUIRED)
|
||||
find_dependency (ZLIB)
|
||||
find_dependency (date)
|
||||
if (TARGET ZLIB::ZLIB)
|
||||
|
||||
@@ -19,6 +19,7 @@ endif ()
|
||||
TODO: review these sources for removal or replacement
|
||||
#]===============================]
|
||||
target_sources (xrpl_core PRIVATE
|
||||
src/ripple/beast/clock/basic_seconds_clock.cpp
|
||||
src/ripple/beast/core/CurrentThreadName.cpp
|
||||
src/ripple/beast/core/SemanticVersion.cpp
|
||||
src/ripple/beast/hash/impl/xxhash.cpp
|
||||
@@ -103,6 +104,7 @@ target_sources (xrpl_core PRIVATE
|
||||
src/ripple/protocol/impl/Sign.cpp
|
||||
src/ripple/protocol/impl/TER.cpp
|
||||
src/ripple/protocol/impl/TxFormats.cpp
|
||||
src/ripple/protocol/impl/TxMeta.cpp
|
||||
src/ripple/protocol/impl/UintTypes.cpp
|
||||
src/ripple/protocol/impl/digest.cpp
|
||||
src/ripple/protocol/impl/tokens.cpp
|
||||
@@ -110,26 +112,20 @@ target_sources (xrpl_core PRIVATE
|
||||
main sources:
|
||||
subdir: crypto
|
||||
#]===============================]
|
||||
src/ripple/crypto/impl/GenerateDeterministicKey.cpp
|
||||
src/ripple/crypto/impl/RFC1751.cpp
|
||||
src/ripple/crypto/impl/csprng.cpp
|
||||
src/ripple/crypto/impl/ec_key.cpp
|
||||
src/ripple/crypto/impl/openssl.cpp
|
||||
src/ripple/crypto/impl/secure_erase.cpp)
|
||||
|
||||
add_library (Ripple::xrpl_core ALIAS xrpl_core)
|
||||
target_include_directories (xrpl_core
|
||||
PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src/ripple>
|
||||
# this one is for beast/legacy files:
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src/beast/extras>
|
||||
$<INSTALL_INTERFACE:include>)
|
||||
|
||||
|
||||
target_compile_definitions(xrpl_core
|
||||
PUBLIC
|
||||
BOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT
|
||||
BOOST_CONTAINER_FWD_BAD_DEQUE
|
||||
HAS_UNCAUGHT_EXCEPTIONS=1)
|
||||
target_compile_options (xrpl_core
|
||||
PUBLIC
|
||||
@@ -173,16 +169,10 @@ install (
|
||||
DESTINATION include/ripple/basics)
|
||||
install (
|
||||
FILES
|
||||
src/ripple/crypto/GenerateDeterministicKey.h
|
||||
src/ripple/crypto/RFC1751.h
|
||||
src/ripple/crypto/csprng.h
|
||||
src/ripple/crypto/secure_erase.h
|
||||
DESTINATION include/ripple/crypto)
|
||||
install (
|
||||
FILES
|
||||
src/ripple/crypto/impl/ec_key.h
|
||||
src/ripple/crypto/impl/openssl.h
|
||||
DESTINATION include/ripple/crypto/impl)
|
||||
install (
|
||||
FILES
|
||||
src/ripple/json/JsonPropertyStream.h
|
||||
@@ -245,6 +235,7 @@ install (
|
||||
src/ripple/protocol/TER.h
|
||||
src/ripple/protocol/TxFlags.h
|
||||
src/ripple/protocol/TxFormats.h
|
||||
src/ripple/protocol/TxMeta.h
|
||||
src/ripple/protocol/UintTypes.h
|
||||
src/ripple/protocol/digest.h
|
||||
src/ripple/protocol/jss.h
|
||||
@@ -297,26 +288,27 @@ install (
|
||||
# WARNING!! -- horrible levelization ahead
|
||||
# (these files should be isolated or moved...but
|
||||
# unfortunately unit_test.h above creates this dependency)
|
||||
install (
|
||||
FILES
|
||||
src/beast/extras/beast/unit_test/amount.hpp
|
||||
src/beast/extras/beast/unit_test/dstream.hpp
|
||||
src/beast/extras/beast/unit_test/global_suites.hpp
|
||||
src/beast/extras/beast/unit_test/match.hpp
|
||||
src/beast/extras/beast/unit_test/recorder.hpp
|
||||
src/beast/extras/beast/unit_test/reporter.hpp
|
||||
src/beast/extras/beast/unit_test/results.hpp
|
||||
src/beast/extras/beast/unit_test/runner.hpp
|
||||
src/beast/extras/beast/unit_test/suite.hpp
|
||||
src/beast/extras/beast/unit_test/suite_info.hpp
|
||||
src/beast/extras/beast/unit_test/suite_list.hpp
|
||||
src/beast/extras/beast/unit_test/thread.hpp
|
||||
DESTINATION include/beast/unit_test)
|
||||
install (
|
||||
FILES
|
||||
src/beast/extras/beast/unit_test/detail/const_container.hpp
|
||||
DESTINATION include/beast/unit_test/detail)
|
||||
|
||||
if (tests)
|
||||
install (
|
||||
FILES
|
||||
src/ripple/beast/unit_test/amount.hpp
|
||||
src/ripple/beast/unit_test/dstream.hpp
|
||||
src/ripple/beast/unit_test/global_suites.hpp
|
||||
src/ripple/beast/unit_test/match.hpp
|
||||
src/ripple/beast/unit_test/recorder.hpp
|
||||
src/ripple/beast/unit_test/reporter.hpp
|
||||
src/ripple/beast/unit_test/results.hpp
|
||||
src/ripple/beast/unit_test/runner.hpp
|
||||
src/ripple/beast/unit_test/suite.hpp
|
||||
src/ripple/beast/unit_test/suite_info.hpp
|
||||
src/ripple/beast/unit_test/suite_list.hpp
|
||||
src/ripple/beast/unit_test/thread.hpp
|
||||
DESTINATION include/ripple/beast/extras/unit_test)
|
||||
install (
|
||||
FILES
|
||||
src/ripple/beast/unit_test/detail/const_container.hpp
|
||||
DESTINATION include/ripple/beast/unit_test/detail)
|
||||
endif () #tests
|
||||
#[===================================================================[
|
||||
rippled executable
|
||||
#]===================================================================]
|
||||
@@ -330,6 +322,9 @@ add_executable (rippled src/ripple/app/main/Application.h)
|
||||
if (unity)
|
||||
set_target_properties(rippled PROPERTIES UNITY_BUILD ON)
|
||||
endif ()
|
||||
if (tests)
|
||||
target_compile_definitions(rippled PUBLIC ENABLE_TESTS)
|
||||
endif()
|
||||
target_sources (rippled PRIVATE
|
||||
#[===============================[
|
||||
main sources:
|
||||
@@ -373,7 +368,6 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/app/main/Main.cpp
|
||||
src/ripple/app/main/NodeIdentity.cpp
|
||||
src/ripple/app/main/NodeStoreScheduler.cpp
|
||||
src/ripple/app/reporting/DBHelpers.cpp
|
||||
src/ripple/app/reporting/ReportingETL.cpp
|
||||
src/ripple/app/reporting/ETLSource.cpp
|
||||
src/ripple/app/reporting/P2pProxy.cpp
|
||||
@@ -406,6 +400,24 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/app/paths/impl/DirectStep.cpp
|
||||
src/ripple/app/paths/impl/PaySteps.cpp
|
||||
src/ripple/app/paths/impl/XRPEndpointStep.cpp
|
||||
src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp
|
||||
src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp
|
||||
src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp
|
||||
src/ripple/app/sidechain/Federator.cpp
|
||||
src/ripple/app/sidechain/FederatorEvents.cpp
|
||||
src/ripple/app/sidechain/impl/ChainListener.cpp
|
||||
src/ripple/app/sidechain/impl/DoorKeeper.cpp
|
||||
src/ripple/app/sidechain/impl/InitialSync.cpp
|
||||
src/ripple/app/sidechain/impl/MainchainListener.cpp
|
||||
src/ripple/app/sidechain/impl/SidechainListener.cpp
|
||||
src/ripple/app/sidechain/impl/SignatureCollector.cpp
|
||||
src/ripple/app/sidechain/impl/SignerList.cpp
|
||||
src/ripple/app/sidechain/impl/TicketHolder.cpp
|
||||
src/ripple/app/sidechain/impl/WebsocketClient.cpp
|
||||
src/ripple/app/tx/impl/ApplyContext.cpp
|
||||
src/ripple/app/tx/impl/BookTip.cpp
|
||||
src/ripple/app/tx/impl/CancelCheck.cpp
|
||||
@@ -437,11 +449,11 @@ target_sources (rippled PRIVATE
|
||||
#]===============================]
|
||||
src/ripple/basics/impl/Archive.cpp
|
||||
src/ripple/basics/impl/BasicConfig.cpp
|
||||
src/ripple/basics/impl/PerfLogImp.cpp
|
||||
src/ripple/basics/impl/ResolverAsio.cpp
|
||||
src/ripple/basics/impl/UptimeClock.cpp
|
||||
src/ripple/basics/impl/make_SSLContext.cpp
|
||||
src/ripple/basics/impl/mulDiv.cpp
|
||||
src/ripple/basics/impl/partitioned_unordered_map.cpp
|
||||
#[===============================[
|
||||
main sources:
|
||||
subdir: conditions
|
||||
@@ -461,7 +473,6 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/core/impl/LoadMonitor.cpp
|
||||
src/ripple/core/impl/SNTPClock.cpp
|
||||
src/ripple/core/impl/SociDB.cpp
|
||||
src/ripple/core/impl/Stoppable.cpp
|
||||
src/ripple/core/impl/TimeKeeper.cpp
|
||||
src/ripple/core/impl/Workers.cpp
|
||||
src/ripple/core/Pg.cpp
|
||||
@@ -479,15 +490,12 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/ledger/impl/ApplyViewBase.cpp
|
||||
src/ripple/ledger/impl/ApplyViewImpl.cpp
|
||||
src/ripple/ledger/impl/BookDirs.cpp
|
||||
src/ripple/ledger/impl/CachedSLEs.cpp
|
||||
src/ripple/ledger/impl/CachedView.cpp
|
||||
src/ripple/ledger/impl/CashDiff.cpp
|
||||
src/ripple/ledger/impl/Directory.cpp
|
||||
src/ripple/ledger/impl/OpenView.cpp
|
||||
src/ripple/ledger/impl/PaymentSandbox.cpp
|
||||
src/ripple/ledger/impl/RawStateTable.cpp
|
||||
src/ripple/ledger/impl/ReadView.cpp
|
||||
src/ripple/ledger/impl/TxMeta.cpp
|
||||
src/ripple/ledger/impl/View.cpp
|
||||
#[===============================[
|
||||
main sources:
|
||||
@@ -516,12 +524,14 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/nodestore/impl/DatabaseNodeImp.cpp
|
||||
src/ripple/nodestore/impl/DatabaseRotatingImp.cpp
|
||||
src/ripple/nodestore/impl/DatabaseShardImp.cpp
|
||||
src/ripple/nodestore/impl/DeterministicShard.cpp
|
||||
src/ripple/nodestore/impl/DecodedBlob.cpp
|
||||
src/ripple/nodestore/impl/DummyScheduler.cpp
|
||||
src/ripple/nodestore/impl/EncodedBlob.cpp
|
||||
src/ripple/nodestore/impl/ManagerImp.cpp
|
||||
src/ripple/nodestore/impl/NodeObject.cpp
|
||||
src/ripple/nodestore/impl/Shard.cpp
|
||||
src/ripple/nodestore/impl/ShardInfo.cpp
|
||||
src/ripple/nodestore/impl/TaskQueue.cpp
|
||||
#[===============================[
|
||||
main sources:
|
||||
@@ -537,6 +547,7 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/overlay/impl/PeerSet.cpp
|
||||
src/ripple/overlay/impl/ProtocolVersion.cpp
|
||||
src/ripple/overlay/impl/TrafficCount.cpp
|
||||
src/ripple/overlay/impl/TxMetrics.cpp
|
||||
#[===============================[
|
||||
main sources:
|
||||
subdir: peerfinder
|
||||
@@ -576,6 +587,7 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/handlers/DepositAuthorized.cpp
|
||||
src/ripple/rpc/handlers/DownloadShard.cpp
|
||||
src/ripple/rpc/handlers/Feature1.cpp
|
||||
src/ripple/rpc/handlers/FederatorInfo.cpp
|
||||
src/ripple/rpc/handlers/Fee1.cpp
|
||||
src/ripple/rpc/handlers/FetchInfo.cpp
|
||||
src/ripple/rpc/handlers/GatewayBalances.cpp
|
||||
@@ -593,6 +605,7 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/handlers/LogLevel.cpp
|
||||
src/ripple/rpc/handlers/LogRotate.cpp
|
||||
src/ripple/rpc/handlers/Manifest.cpp
|
||||
src/ripple/rpc/handlers/NodeToShard.cpp
|
||||
src/ripple/rpc/handlers/NoRippleCheck.cpp
|
||||
src/ripple/rpc/handlers/OwnerInfo.cpp
|
||||
src/ripple/rpc/handlers/PathFind.cpp
|
||||
@@ -614,6 +627,7 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/handlers/TransactionEntry.cpp
|
||||
src/ripple/rpc/handlers/Tx.cpp
|
||||
src/ripple/rpc/handlers/TxHistory.cpp
|
||||
src/ripple/rpc/handlers/TxReduceRelay.cpp
|
||||
src/ripple/rpc/handlers/UnlList.cpp
|
||||
src/ripple/rpc/handlers/Unsubscribe.cpp
|
||||
src/ripple/rpc/handlers/ValidationCreate.cpp
|
||||
@@ -633,6 +647,11 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/impl/ShardVerificationScheduler.cpp
|
||||
src/ripple/rpc/impl/Status.cpp
|
||||
src/ripple/rpc/impl/TransactionSign.cpp
|
||||
#[===============================[
|
||||
main sources:
|
||||
subdir: perflog
|
||||
#]===============================]
|
||||
src/ripple/perflog/impl/PerfLogImp.cpp
|
||||
|
||||
#[===============================[
|
||||
main sources:
|
||||
@@ -648,326 +667,327 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/shamap/impl/SHAMap.cpp
|
||||
src/ripple/shamap/impl/SHAMapDelta.cpp
|
||||
src/ripple/shamap/impl/SHAMapInnerNode.cpp
|
||||
src/ripple/shamap/impl/SHAMapItem.cpp
|
||||
src/ripple/shamap/impl/SHAMapLeafNode.cpp
|
||||
src/ripple/shamap/impl/SHAMapNodeID.cpp
|
||||
src/ripple/shamap/impl/SHAMapSync.cpp
|
||||
src/ripple/shamap/impl/SHAMapTreeNode.cpp
|
||||
src/ripple/shamap/impl/ShardFamily.cpp
|
||||
src/ripple/shamap/impl/ShardFamily.cpp)
|
||||
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: app
|
||||
#]===============================]
|
||||
src/test/app/AccountDelete_test.cpp
|
||||
src/test/app/AccountTxPaging_test.cpp
|
||||
src/test/app/AmendmentTable_test.cpp
|
||||
src/test/app/Check_test.cpp
|
||||
src/test/app/CrossingLimits_test.cpp
|
||||
src/test/app/DeliverMin_test.cpp
|
||||
src/test/app/DepositAuth_test.cpp
|
||||
src/test/app/Discrepancy_test.cpp
|
||||
src/test/app/DNS_test.cpp
|
||||
src/test/app/Escrow_test.cpp
|
||||
src/test/app/FeeVote_test.cpp
|
||||
src/test/app/Flow_test.cpp
|
||||
src/test/app/Freeze_test.cpp
|
||||
src/test/app/HashRouter_test.cpp
|
||||
src/test/app/LedgerHistory_test.cpp
|
||||
src/test/app/LedgerLoad_test.cpp
|
||||
src/test/app/LedgerReplay_test.cpp
|
||||
src/test/app/LoadFeeTrack_test.cpp
|
||||
src/test/app/Manifest_test.cpp
|
||||
src/test/app/MultiSign_test.cpp
|
||||
src/test/app/OfferStream_test.cpp
|
||||
src/test/app/Offer_test.cpp
|
||||
src/test/app/OversizeMeta_test.cpp
|
||||
src/test/app/Path_test.cpp
|
||||
src/test/app/PayChan_test.cpp
|
||||
src/test/app/PayStrand_test.cpp
|
||||
src/test/app/PseudoTx_test.cpp
|
||||
src/test/app/RCLCensorshipDetector_test.cpp
|
||||
src/test/app/RCLValidations_test.cpp
|
||||
src/test/app/Regression_test.cpp
|
||||
src/test/app/SHAMapStore_test.cpp
|
||||
src/test/app/SetAuth_test.cpp
|
||||
src/test/app/SetRegularKey_test.cpp
|
||||
src/test/app/SetTrust_test.cpp
|
||||
src/test/app/Taker_test.cpp
|
||||
src/test/app/TheoreticalQuality_test.cpp
|
||||
src/test/app/Ticket_test.cpp
|
||||
src/test/app/Transaction_ordering_test.cpp
|
||||
src/test/app/TrustAndBalance_test.cpp
|
||||
src/test/app/TxQ_test.cpp
|
||||
src/test/app/ValidatorKeys_test.cpp
|
||||
src/test/app/ValidatorList_test.cpp
|
||||
src/test/app/ValidatorSite_test.cpp
|
||||
src/test/app/tx/apply_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: basics
|
||||
#]===============================]
|
||||
src/test/basics/Buffer_test.cpp
|
||||
src/test/basics/DetectCrash_test.cpp
|
||||
src/test/basics/FileUtilities_test.cpp
|
||||
src/test/basics/IOUAmount_test.cpp
|
||||
src/test/basics/KeyCache_test.cpp
|
||||
src/test/basics/PerfLog_test.cpp
|
||||
src/test/basics/RangeSet_test.cpp
|
||||
src/test/basics/Slice_test.cpp
|
||||
src/test/basics/StringUtilities_test.cpp
|
||||
src/test/basics/TaggedCache_test.cpp
|
||||
src/test/basics/XRPAmount_test.cpp
|
||||
src/test/basics/base64_test.cpp
|
||||
src/test/basics/base_uint_test.cpp
|
||||
src/test/basics/contract_test.cpp
|
||||
src/test/basics/FeeUnits_test.cpp
|
||||
src/test/basics/hardened_hash_test.cpp
|
||||
src/test/basics/mulDiv_test.cpp
|
||||
src/test/basics/tagged_integer_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: beast
|
||||
#]===============================]
|
||||
src/test/beast/IPEndpoint_test.cpp
|
||||
src/test/beast/LexicalCast_test.cpp
|
||||
src/test/beast/SemanticVersion_test.cpp
|
||||
src/test/beast/aged_associative_container_test.cpp
|
||||
src/test/beast/beast_CurrentThreadName_test.cpp
|
||||
src/test/beast/beast_Journal_test.cpp
|
||||
src/test/beast/beast_PropertyStream_test.cpp
|
||||
src/test/beast/beast_Zero_test.cpp
|
||||
src/test/beast/beast_abstract_clock_test.cpp
|
||||
src/test/beast/beast_basic_seconds_clock_test.cpp
|
||||
src/test/beast/beast_io_latency_probe_test.cpp
|
||||
src/test/beast/define_print.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: conditions
|
||||
#]===============================]
|
||||
src/test/conditions/PreimageSha256_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: consensus
|
||||
#]===============================]
|
||||
src/test/consensus/ByzantineFailureSim_test.cpp
|
||||
src/test/consensus/Consensus_test.cpp
|
||||
src/test/consensus/DistributedValidatorsSim_test.cpp
|
||||
src/test/consensus/LedgerTiming_test.cpp
|
||||
src/test/consensus/LedgerTrie_test.cpp
|
||||
src/test/consensus/NegativeUNL_test.cpp
|
||||
src/test/consensus/ScaleFreeSim_test.cpp
|
||||
src/test/consensus/Validations_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: core
|
||||
#]===============================]
|
||||
src/test/core/ClosureCounter_test.cpp
|
||||
src/test/core/Config_test.cpp
|
||||
src/test/core/Coroutine_test.cpp
|
||||
src/test/core/CryptoPRNG_test.cpp
|
||||
src/test/core/JobQueue_test.cpp
|
||||
src/test/core/SociDB_test.cpp
|
||||
src/test/core/Stoppable_test.cpp
|
||||
src/test/core/Workers_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: crypto
|
||||
#]===============================]
|
||||
src/test/crypto/Openssl_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: csf
|
||||
#]===============================]
|
||||
src/test/csf/BasicNetwork_test.cpp
|
||||
src/test/csf/Digraph_test.cpp
|
||||
src/test/csf/Histogram_test.cpp
|
||||
src/test/csf/Scheduler_test.cpp
|
||||
src/test/csf/impl/Sim.cpp
|
||||
src/test/csf/impl/ledgers.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: json
|
||||
#]===============================]
|
||||
src/test/json/Object_test.cpp
|
||||
src/test/json/Output_test.cpp
|
||||
src/test/json/Writer_test.cpp
|
||||
src/test/json/json_value_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: jtx
|
||||
#]===============================]
|
||||
src/test/jtx/Env_test.cpp
|
||||
src/test/jtx/WSClient_test.cpp
|
||||
src/test/jtx/impl/Account.cpp
|
||||
src/test/jtx/impl/Env.cpp
|
||||
src/test/jtx/impl/JSONRPCClient.cpp
|
||||
src/test/jtx/impl/ManualTimeKeeper.cpp
|
||||
src/test/jtx/impl/WSClient.cpp
|
||||
src/test/jtx/impl/acctdelete.cpp
|
||||
src/test/jtx/impl/account_txn_id.cpp
|
||||
src/test/jtx/impl/amount.cpp
|
||||
src/test/jtx/impl/balance.cpp
|
||||
src/test/jtx/impl/check.cpp
|
||||
src/test/jtx/impl/delivermin.cpp
|
||||
src/test/jtx/impl/deposit.cpp
|
||||
src/test/jtx/impl/envconfig.cpp
|
||||
src/test/jtx/impl/fee.cpp
|
||||
src/test/jtx/impl/flags.cpp
|
||||
src/test/jtx/impl/invoice_id.cpp
|
||||
src/test/jtx/impl/jtx_json.cpp
|
||||
src/test/jtx/impl/last_ledger_sequence.cpp
|
||||
src/test/jtx/impl/memo.cpp
|
||||
src/test/jtx/impl/multisign.cpp
|
||||
src/test/jtx/impl/offer.cpp
|
||||
src/test/jtx/impl/owners.cpp
|
||||
src/test/jtx/impl/paths.cpp
|
||||
src/test/jtx/impl/pay.cpp
|
||||
src/test/jtx/impl/quality2.cpp
|
||||
src/test/jtx/impl/rate.cpp
|
||||
src/test/jtx/impl/regkey.cpp
|
||||
src/test/jtx/impl/sendmax.cpp
|
||||
src/test/jtx/impl/seq.cpp
|
||||
src/test/jtx/impl/sig.cpp
|
||||
src/test/jtx/impl/tag.cpp
|
||||
src/test/jtx/impl/ticket.cpp
|
||||
src/test/jtx/impl/trust.cpp
|
||||
src/test/jtx/impl/txflags.cpp
|
||||
src/test/jtx/impl/utility.cpp
|
||||
if (tests)
|
||||
target_sources (rippled PRIVATE
|
||||
src/test/app/AccountDelete_test.cpp
|
||||
src/test/app/AccountTxPaging_test.cpp
|
||||
src/test/app/AmendmentTable_test.cpp
|
||||
src/test/app/Check_test.cpp
|
||||
src/test/app/CrossingLimits_test.cpp
|
||||
src/test/app/DeliverMin_test.cpp
|
||||
src/test/app/DepositAuth_test.cpp
|
||||
src/test/app/Discrepancy_test.cpp
|
||||
src/test/app/DNS_test.cpp
|
||||
src/test/app/Escrow_test.cpp
|
||||
src/test/app/FeeVote_test.cpp
|
||||
src/test/app/Flow_test.cpp
|
||||
src/test/app/Freeze_test.cpp
|
||||
src/test/app/HashRouter_test.cpp
|
||||
src/test/app/LedgerHistory_test.cpp
|
||||
src/test/app/LedgerLoad_test.cpp
|
||||
src/test/app/LedgerReplay_test.cpp
|
||||
src/test/app/LoadFeeTrack_test.cpp
|
||||
src/test/app/Manifest_test.cpp
|
||||
src/test/app/MultiSign_test.cpp
|
||||
src/test/app/OfferStream_test.cpp
|
||||
src/test/app/Offer_test.cpp
|
||||
src/test/app/OversizeMeta_test.cpp
|
||||
src/test/app/Path_test.cpp
|
||||
src/test/app/PayChan_test.cpp
|
||||
src/test/app/PayStrand_test.cpp
|
||||
src/test/app/PseudoTx_test.cpp
|
||||
src/test/app/RCLCensorshipDetector_test.cpp
|
||||
src/test/app/RCLValidations_test.cpp
|
||||
src/test/app/Regression_test.cpp
|
||||
src/test/app/SHAMapStore_test.cpp
|
||||
src/test/app/SetAuth_test.cpp
|
||||
src/test/app/SetRegularKey_test.cpp
|
||||
src/test/app/SetTrust_test.cpp
|
||||
src/test/app/Taker_test.cpp
|
||||
src/test/app/TheoreticalQuality_test.cpp
|
||||
src/test/app/Ticket_test.cpp
|
||||
src/test/app/Transaction_ordering_test.cpp
|
||||
src/test/app/TrustAndBalance_test.cpp
|
||||
src/test/app/TxQ_test.cpp
|
||||
src/test/app/ValidatorKeys_test.cpp
|
||||
src/test/app/ValidatorList_test.cpp
|
||||
src/test/app/ValidatorSite_test.cpp
|
||||
src/test/app/tx/apply_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: basics
|
||||
#]===============================]
|
||||
src/test/basics/Buffer_test.cpp
|
||||
src/test/basics/DetectCrash_test.cpp
|
||||
src/test/basics/Expected_test.cpp
|
||||
src/test/basics/FileUtilities_test.cpp
|
||||
src/test/basics/IOUAmount_test.cpp
|
||||
src/test/basics/KeyCache_test.cpp
|
||||
src/test/basics/PerfLog_test.cpp
|
||||
src/test/basics/RangeSet_test.cpp
|
||||
src/test/basics/scope_test.cpp
|
||||
src/test/basics/Slice_test.cpp
|
||||
src/test/basics/StringUtilities_test.cpp
|
||||
src/test/basics/TaggedCache_test.cpp
|
||||
src/test/basics/XRPAmount_test.cpp
|
||||
src/test/basics/base64_test.cpp
|
||||
src/test/basics/base_uint_test.cpp
|
||||
src/test/basics/contract_test.cpp
|
||||
src/test/basics/FeeUnits_test.cpp
|
||||
src/test/basics/hardened_hash_test.cpp
|
||||
src/test/basics/mulDiv_test.cpp
|
||||
src/test/basics/tagged_integer_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: beast
|
||||
#]===============================]
|
||||
src/test/beast/IPEndpoint_test.cpp
|
||||
src/test/beast/LexicalCast_test.cpp
|
||||
src/test/beast/SemanticVersion_test.cpp
|
||||
src/test/beast/aged_associative_container_test.cpp
|
||||
src/test/beast/beast_CurrentThreadName_test.cpp
|
||||
src/test/beast/beast_Journal_test.cpp
|
||||
src/test/beast/beast_PropertyStream_test.cpp
|
||||
src/test/beast/beast_Zero_test.cpp
|
||||
src/test/beast/beast_abstract_clock_test.cpp
|
||||
src/test/beast/beast_basic_seconds_clock_test.cpp
|
||||
src/test/beast/beast_io_latency_probe_test.cpp
|
||||
src/test/beast/define_print.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: conditions
|
||||
#]===============================]
|
||||
src/test/conditions/PreimageSha256_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: consensus
|
||||
#]===============================]
|
||||
src/test/consensus/ByzantineFailureSim_test.cpp
|
||||
src/test/consensus/Consensus_test.cpp
|
||||
src/test/consensus/DistributedValidatorsSim_test.cpp
|
||||
src/test/consensus/LedgerTiming_test.cpp
|
||||
src/test/consensus/LedgerTrie_test.cpp
|
||||
src/test/consensus/NegativeUNL_test.cpp
|
||||
src/test/consensus/ScaleFreeSim_test.cpp
|
||||
src/test/consensus/Validations_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: core
|
||||
#]===============================]
|
||||
src/test/core/ClosureCounter_test.cpp
|
||||
src/test/core/Config_test.cpp
|
||||
src/test/core/Coroutine_test.cpp
|
||||
src/test/core/CryptoPRNG_test.cpp
|
||||
src/test/core/JobQueue_test.cpp
|
||||
src/test/core/SociDB_test.cpp
|
||||
src/test/core/Workers_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: csf
|
||||
#]===============================]
|
||||
src/test/csf/BasicNetwork_test.cpp
|
||||
src/test/csf/Digraph_test.cpp
|
||||
src/test/csf/Histogram_test.cpp
|
||||
src/test/csf/Scheduler_test.cpp
|
||||
src/test/csf/impl/Sim.cpp
|
||||
src/test/csf/impl/ledgers.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: json
|
||||
#]===============================]
|
||||
src/test/json/Object_test.cpp
|
||||
src/test/json/Output_test.cpp
|
||||
src/test/json/Writer_test.cpp
|
||||
src/test/json/json_value_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: jtx
|
||||
#]===============================]
|
||||
src/test/jtx/Env_test.cpp
|
||||
src/test/jtx/WSClient_test.cpp
|
||||
src/test/jtx/impl/Account.cpp
|
||||
src/test/jtx/impl/Env.cpp
|
||||
src/test/jtx/impl/JSONRPCClient.cpp
|
||||
src/test/jtx/impl/ManualTimeKeeper.cpp
|
||||
src/test/jtx/impl/WSClient.cpp
|
||||
src/test/jtx/impl/acctdelete.cpp
|
||||
src/test/jtx/impl/account_txn_id.cpp
|
||||
src/test/jtx/impl/amount.cpp
|
||||
src/test/jtx/impl/balance.cpp
|
||||
src/test/jtx/impl/check.cpp
|
||||
src/test/jtx/impl/delivermin.cpp
|
||||
src/test/jtx/impl/deposit.cpp
|
||||
src/test/jtx/impl/envconfig.cpp
|
||||
src/test/jtx/impl/fee.cpp
|
||||
src/test/jtx/impl/flags.cpp
|
||||
src/test/jtx/impl/invoice_id.cpp
|
||||
src/test/jtx/impl/jtx_json.cpp
|
||||
src/test/jtx/impl/last_ledger_sequence.cpp
|
||||
src/test/jtx/impl/memo.cpp
|
||||
src/test/jtx/impl/multisign.cpp
|
||||
src/test/jtx/impl/offer.cpp
|
||||
src/test/jtx/impl/owners.cpp
|
||||
src/test/jtx/impl/paths.cpp
|
||||
src/test/jtx/impl/pay.cpp
|
||||
src/test/jtx/impl/quality2.cpp
|
||||
src/test/jtx/impl/rate.cpp
|
||||
src/test/jtx/impl/regkey.cpp
|
||||
src/test/jtx/impl/sendmax.cpp
|
||||
src/test/jtx/impl/seq.cpp
|
||||
src/test/jtx/impl/sig.cpp
|
||||
src/test/jtx/impl/tag.cpp
|
||||
src/test/jtx/impl/ticket.cpp
|
||||
src/test/jtx/impl/trust.cpp
|
||||
src/test/jtx/impl/txflags.cpp
|
||||
src/test/jtx/impl/utility.cpp
|
||||
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: ledger
|
||||
#]===============================]
|
||||
src/test/ledger/BookDirs_test.cpp
|
||||
src/test/ledger/Directory_test.cpp
|
||||
src/test/ledger/Invariants_test.cpp
|
||||
src/test/ledger/PaymentSandbox_test.cpp
|
||||
src/test/ledger/PendingSaves_test.cpp
|
||||
src/test/ledger/SkipList_test.cpp
|
||||
src/test/ledger/View_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: net
|
||||
#]===============================]
|
||||
src/test/net/DatabaseDownloader_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: nodestore
|
||||
#]===============================]
|
||||
src/test/nodestore/Backend_test.cpp
|
||||
src/test/nodestore/Basics_test.cpp
|
||||
src/test/nodestore/DatabaseShard_test.cpp
|
||||
src/test/nodestore/Database_test.cpp
|
||||
src/test/nodestore/Timing_test.cpp
|
||||
src/test/nodestore/import_test.cpp
|
||||
src/test/nodestore/varint_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: overlay
|
||||
#]===============================]
|
||||
src/test/overlay/ProtocolVersion_test.cpp
|
||||
src/test/overlay/cluster_test.cpp
|
||||
src/test/overlay/short_read_test.cpp
|
||||
src/test/overlay/compression_test.cpp
|
||||
src/test/overlay/reduce_relay_test.cpp
|
||||
src/test/overlay/handshake_test.cpp
|
||||
src/test/overlay/tx_reduce_relay_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: peerfinder
|
||||
#]===============================]
|
||||
src/test/peerfinder/Livecache_test.cpp
|
||||
src/test/peerfinder/PeerFinder_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: protocol
|
||||
#]===============================]
|
||||
src/test/protocol/BuildInfo_test.cpp
|
||||
src/test/protocol/InnerObjectFormats_test.cpp
|
||||
src/test/protocol/Issue_test.cpp
|
||||
src/test/protocol/KnownFormatToGRPC_test.cpp
|
||||
src/test/protocol/PublicKey_test.cpp
|
||||
src/test/protocol/Quality_test.cpp
|
||||
src/test/protocol/STAccount_test.cpp
|
||||
src/test/protocol/STAmount_test.cpp
|
||||
src/test/protocol/STObject_test.cpp
|
||||
src/test/protocol/STTx_test.cpp
|
||||
src/test/protocol/STValidation_test.cpp
|
||||
src/test/protocol/SecretKey_test.cpp
|
||||
src/test/protocol/Seed_test.cpp
|
||||
src/test/protocol/SeqProxy_test.cpp
|
||||
src/test/protocol/TER_test.cpp
|
||||
src/test/protocol/types_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: resource
|
||||
#]===============================]
|
||||
src/test/resource/Logic_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: rpc
|
||||
#]===============================]
|
||||
src/test/rpc/AccountCurrencies_test.cpp
|
||||
src/test/rpc/AccountInfo_test.cpp
|
||||
src/test/rpc/AccountLinesRPC_test.cpp
|
||||
src/test/rpc/AccountObjects_test.cpp
|
||||
src/test/rpc/AccountOffers_test.cpp
|
||||
src/test/rpc/AccountSet_test.cpp
|
||||
src/test/rpc/AccountTx_test.cpp
|
||||
src/test/rpc/AmendmentBlocked_test.cpp
|
||||
src/test/rpc/Book_test.cpp
|
||||
src/test/rpc/DepositAuthorized_test.cpp
|
||||
src/test/rpc/DeliveredAmount_test.cpp
|
||||
src/test/rpc/Feature_test.cpp
|
||||
src/test/rpc/Fee_test.cpp
|
||||
src/test/rpc/GatewayBalances_test.cpp
|
||||
src/test/rpc/GetCounts_test.cpp
|
||||
src/test/rpc/JSONRPC_test.cpp
|
||||
src/test/rpc/KeyGeneration_test.cpp
|
||||
src/test/rpc/LedgerClosed_test.cpp
|
||||
src/test/rpc/LedgerData_test.cpp
|
||||
src/test/rpc/LedgerRPC_test.cpp
|
||||
src/test/rpc/LedgerRequestRPC_test.cpp
|
||||
src/test/rpc/ManifestRPC_test.cpp
|
||||
src/test/rpc/NodeToShardRPC_test.cpp
|
||||
src/test/rpc/NoRippleCheck_test.cpp
|
||||
src/test/rpc/NoRipple_test.cpp
|
||||
src/test/rpc/OwnerInfo_test.cpp
|
||||
src/test/rpc/Peers_test.cpp
|
||||
src/test/rpc/ReportingETL_test.cpp
|
||||
src/test/rpc/Roles_test.cpp
|
||||
src/test/rpc/RPCCall_test.cpp
|
||||
src/test/rpc/RPCOverload_test.cpp
|
||||
src/test/rpc/RobustTransaction_test.cpp
|
||||
src/test/rpc/ServerInfo_test.cpp
|
||||
src/test/rpc/ShardArchiveHandler_test.cpp
|
||||
src/test/rpc/Status_test.cpp
|
||||
src/test/rpc/Submit_test.cpp
|
||||
src/test/rpc/Subscribe_test.cpp
|
||||
src/test/rpc/Transaction_test.cpp
|
||||
src/test/rpc/TransactionEntry_test.cpp
|
||||
src/test/rpc/TransactionHistory_test.cpp
|
||||
src/test/rpc/Tx_test.cpp
|
||||
src/test/rpc/ValidatorInfo_test.cpp
|
||||
src/test/rpc/ValidatorRPC_test.cpp
|
||||
src/test/rpc/Version_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: server
|
||||
#]===============================]
|
||||
src/test/server/ServerStatus_test.cpp
|
||||
src/test/server/Server_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: shamap
|
||||
#]===============================]
|
||||
src/test/shamap/FetchPack_test.cpp
|
||||
src/test/shamap/SHAMapSync_test.cpp
|
||||
src/test/shamap/SHAMap_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: unit_test
|
||||
#]===============================]
|
||||
src/test/unit_test/multi_runner.cpp)
|
||||
endif () #tests
|
||||
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: ledger
|
||||
#]===============================]
|
||||
src/test/ledger/BookDirs_test.cpp
|
||||
src/test/ledger/CashDiff_test.cpp
|
||||
src/test/ledger/Directory_test.cpp
|
||||
src/test/ledger/Invariants_test.cpp
|
||||
src/test/ledger/PaymentSandbox_test.cpp
|
||||
src/test/ledger/PendingSaves_test.cpp
|
||||
src/test/ledger/SkipList_test.cpp
|
||||
src/test/ledger/View_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: net
|
||||
#]===============================]
|
||||
src/test/net/DatabaseDownloader_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: nodestore
|
||||
#]===============================]
|
||||
src/test/nodestore/Backend_test.cpp
|
||||
src/test/nodestore/Basics_test.cpp
|
||||
src/test/nodestore/DatabaseShard_test.cpp
|
||||
src/test/nodestore/Database_test.cpp
|
||||
src/test/nodestore/Timing_test.cpp
|
||||
src/test/nodestore/import_test.cpp
|
||||
src/test/nodestore/varint_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: overlay
|
||||
#]===============================]
|
||||
src/test/overlay/ProtocolVersion_test.cpp
|
||||
src/test/overlay/cluster_test.cpp
|
||||
src/test/overlay/short_read_test.cpp
|
||||
src/test/overlay/compression_test.cpp
|
||||
src/test/overlay/reduce_relay_test.cpp
|
||||
src/test/overlay/handshake_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: peerfinder
|
||||
#]===============================]
|
||||
src/test/peerfinder/Livecache_test.cpp
|
||||
src/test/peerfinder/PeerFinder_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: protocol
|
||||
#]===============================]
|
||||
src/test/protocol/BuildInfo_test.cpp
|
||||
src/test/protocol/InnerObjectFormats_test.cpp
|
||||
src/test/protocol/Issue_test.cpp
|
||||
src/test/protocol/KnownFormatToGRPC_test.cpp
|
||||
src/test/protocol/PublicKey_test.cpp
|
||||
src/test/protocol/Quality_test.cpp
|
||||
src/test/protocol/STAccount_test.cpp
|
||||
src/test/protocol/STAmount_test.cpp
|
||||
src/test/protocol/STObject_test.cpp
|
||||
src/test/protocol/STTx_test.cpp
|
||||
src/test/protocol/STValidation_test.cpp
|
||||
src/test/protocol/SecretKey_test.cpp
|
||||
src/test/protocol/Seed_test.cpp
|
||||
src/test/protocol/SeqProxy_test.cpp
|
||||
src/test/protocol/TER_test.cpp
|
||||
src/test/protocol/types_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: resource
|
||||
#]===============================]
|
||||
src/test/resource/Logic_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: rpc
|
||||
#]===============================]
|
||||
src/test/rpc/AccountCurrencies_test.cpp
|
||||
src/test/rpc/AccountInfo_test.cpp
|
||||
src/test/rpc/AccountLinesRPC_test.cpp
|
||||
src/test/rpc/AccountObjects_test.cpp
|
||||
src/test/rpc/AccountOffers_test.cpp
|
||||
src/test/rpc/AccountSet_test.cpp
|
||||
src/test/rpc/AccountTx_test.cpp
|
||||
src/test/rpc/AmendmentBlocked_test.cpp
|
||||
src/test/rpc/Book_test.cpp
|
||||
src/test/rpc/DepositAuthorized_test.cpp
|
||||
src/test/rpc/DeliveredAmount_test.cpp
|
||||
src/test/rpc/Feature_test.cpp
|
||||
src/test/rpc/Fee_test.cpp
|
||||
src/test/rpc/GatewayBalances_test.cpp
|
||||
src/test/rpc/GetCounts_test.cpp
|
||||
src/test/rpc/JSONRPC_test.cpp
|
||||
src/test/rpc/KeyGeneration_test.cpp
|
||||
src/test/rpc/LedgerClosed_test.cpp
|
||||
src/test/rpc/LedgerData_test.cpp
|
||||
src/test/rpc/LedgerRPC_test.cpp
|
||||
src/test/rpc/LedgerRequestRPC_test.cpp
|
||||
src/test/rpc/ManifestRPC_test.cpp
|
||||
src/test/rpc/NoRippleCheck_test.cpp
|
||||
src/test/rpc/NoRipple_test.cpp
|
||||
src/test/rpc/OwnerInfo_test.cpp
|
||||
src/test/rpc/Peers_test.cpp
|
||||
src/test/rpc/ReportingETL_test.cpp
|
||||
src/test/rpc/Roles_test.cpp
|
||||
src/test/rpc/RPCCall_test.cpp
|
||||
src/test/rpc/RPCOverload_test.cpp
|
||||
src/test/rpc/RobustTransaction_test.cpp
|
||||
src/test/rpc/ServerInfo_test.cpp
|
||||
src/test/rpc/ShardArchiveHandler_test.cpp
|
||||
src/test/rpc/Status_test.cpp
|
||||
src/test/rpc/Submit_test.cpp
|
||||
src/test/rpc/Subscribe_test.cpp
|
||||
src/test/rpc/Transaction_test.cpp
|
||||
src/test/rpc/TransactionEntry_test.cpp
|
||||
src/test/rpc/TransactionHistory_test.cpp
|
||||
src/test/rpc/Tx_test.cpp
|
||||
src/test/rpc/ValidatorInfo_test.cpp
|
||||
src/test/rpc/ValidatorRPC_test.cpp
|
||||
src/test/rpc/Version_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: server
|
||||
#]===============================]
|
||||
src/test/server/ServerStatus_test.cpp
|
||||
src/test/server/Server_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: shamap
|
||||
#]===============================]
|
||||
src/test/shamap/FetchPack_test.cpp
|
||||
src/test/shamap/SHAMapSync_test.cpp
|
||||
src/test/shamap/SHAMap_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: unit_test
|
||||
#]===============================]
|
||||
src/test/unit_test/multi_runner.cpp)
|
||||
target_link_libraries (rippled
|
||||
Ripple::boost
|
||||
Ripple::opts
|
||||
@@ -987,9 +1007,11 @@ endif ()
|
||||
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.16)
|
||||
# any files that don't play well with unity should be added here
|
||||
set_source_files_properties(
|
||||
# these two seem to produce conflicts in beast teardown template methods
|
||||
src/test/rpc/ValidatorRPC_test.cpp
|
||||
src/test/rpc/ShardArchiveHandler_test.cpp
|
||||
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
|
||||
if (tests)
|
||||
set_source_files_properties(
|
||||
# these two seem to produce conflicts in beast teardown template methods
|
||||
src/test/rpc/ValidatorRPC_test.cpp
|
||||
src/test/rpc/ShardArchiveHandler_test.cpp
|
||||
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
|
||||
endif () #tests
|
||||
endif ()
|
||||
|
||||
@@ -1,78 +1,79 @@
|
||||
#[===================================================================[
|
||||
docs target (optional)
|
||||
#]===================================================================]
|
||||
|
||||
find_package (Doxygen)
|
||||
if (NOT TARGET Doxygen::doxygen)
|
||||
message (STATUS "doxygen executable not found -- skipping docs target")
|
||||
return ()
|
||||
endif ()
|
||||
|
||||
set (doxygen_output_directory "${CMAKE_BINARY_DIR}/docs")
|
||||
set (doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src")
|
||||
set (doxygen_index_file "${doxygen_output_directory}/html/index.html")
|
||||
set (doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile")
|
||||
|
||||
file (GLOB_RECURSE doxygen_input
|
||||
docs/*.md
|
||||
src/ripple/*.h
|
||||
src/ripple/*.cpp
|
||||
src/ripple/*.md
|
||||
src/test/*.h
|
||||
src/test/*.md
|
||||
Builds/*/README.md)
|
||||
list (APPEND doxygen_input
|
||||
README.md
|
||||
RELEASENOTES.md
|
||||
src/README.md)
|
||||
set (dependencies "${doxygen_input}" "${doxyfile}")
|
||||
|
||||
function (verbose_find_path variable name)
|
||||
# find_path sets a CACHE variable, so don't try using a "local" variable.
|
||||
find_path (${variable} "${name}" ${ARGN})
|
||||
if (NOT ${variable})
|
||||
message (WARNING "could not find ${name}")
|
||||
else ()
|
||||
message (STATUS "found ${name}: ${${variable}}/${name}")
|
||||
if (tests)
|
||||
find_package (Doxygen)
|
||||
if (NOT TARGET Doxygen::doxygen)
|
||||
message (STATUS "doxygen executable not found -- skipping docs target")
|
||||
return ()
|
||||
endif ()
|
||||
endfunction ()
|
||||
|
||||
verbose_find_path (doxygen_plantuml_jar_path plantuml.jar PATH_SUFFIXES share/plantuml)
|
||||
verbose_find_path (doxygen_dot_path dot)
|
||||
|
||||
# https://en.cppreference.com/w/Cppreference:Archives
|
||||
# https://stackoverflow.com/questions/60822559/how-to-move-a-file-download-from-configure-step-to-build-step
|
||||
set (download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake")
|
||||
file (WRITE
|
||||
"${download_script}"
|
||||
"file (DOWNLOAD \
|
||||
http://upload.cppreference.com/mwiki/images/b/b2/html_book_20190607.zip \
|
||||
${CMAKE_BINARY_DIR}/docs/cppreference.zip \
|
||||
EXPECTED_HASH MD5=82b3a612d7d35a83e3cb1195a63689ab \
|
||||
)\n \
|
||||
execute_process ( \
|
||||
COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \
|
||||
)\n"
|
||||
)
|
||||
set (tagfile "${CMAKE_BINARY_DIR}/docs/cppreference-doxygen-web.tag.xml")
|
||||
add_custom_command (
|
||||
OUTPUT "${tagfile}"
|
||||
COMMAND "${CMAKE_COMMAND}" -P "${download_script}"
|
||||
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/docs"
|
||||
)
|
||||
set (doxygen_tagfiles "${tagfile}=http://en.cppreference.com/w/")
|
||||
|
||||
add_custom_command (
|
||||
OUTPUT "${doxygen_index_file}"
|
||||
COMMAND "${CMAKE_COMMAND}" -E env
|
||||
"DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}"
|
||||
"DOXYGEN_INCLUDE_PATH=${doxygen_include_path}"
|
||||
"DOXYGEN_TAGFILES=${doxygen_tagfiles}"
|
||||
"DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}"
|
||||
"DOXYGEN_DOT_PATH=${doxygen_dot_path}"
|
||||
"${DOXYGEN_EXECUTABLE}" "${doxyfile}"
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
DEPENDS "${dependencies}" "${tagfile}")
|
||||
add_custom_target (docs
|
||||
DEPENDS "${doxygen_index_file}"
|
||||
SOURCES "${dependencies}")
|
||||
|
||||
set (doxygen_output_directory "${CMAKE_BINARY_DIR}/docs")
|
||||
set (doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src")
|
||||
set (doxygen_index_file "${doxygen_output_directory}/html/index.html")
|
||||
set (doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile")
|
||||
|
||||
file (GLOB_RECURSE doxygen_input
|
||||
docs/*.md
|
||||
src/ripple/*.h
|
||||
src/ripple/*.cpp
|
||||
src/ripple/*.md
|
||||
src/test/*.h
|
||||
src/test/*.md
|
||||
Builds/*/README.md)
|
||||
list (APPEND doxygen_input
|
||||
README.md
|
||||
RELEASENOTES.md
|
||||
src/README.md)
|
||||
set (dependencies "${doxygen_input}" "${doxyfile}")
|
||||
|
||||
function (verbose_find_path variable name)
|
||||
# find_path sets a CACHE variable, so don't try using a "local" variable.
|
||||
find_path (${variable} "${name}" ${ARGN})
|
||||
if (NOT ${variable})
|
||||
message (WARNING "could not find ${name}")
|
||||
else ()
|
||||
message (STATUS "found ${name}: ${${variable}}/${name}")
|
||||
endif ()
|
||||
endfunction ()
|
||||
|
||||
verbose_find_path (doxygen_plantuml_jar_path plantuml.jar PATH_SUFFIXES share/plantuml)
|
||||
verbose_find_path (doxygen_dot_path dot)
|
||||
|
||||
# https://en.cppreference.com/w/Cppreference:Archives
|
||||
# https://stackoverflow.com/questions/60822559/how-to-move-a-file-download-from-configure-step-to-build-step
|
||||
set (download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake")
|
||||
file (WRITE
|
||||
"${download_script}"
|
||||
"file (DOWNLOAD \
|
||||
http://upload.cppreference.com/mwiki/images/b/b2/html_book_20190607.zip \
|
||||
${CMAKE_BINARY_DIR}/docs/cppreference.zip \
|
||||
EXPECTED_HASH MD5=82b3a612d7d35a83e3cb1195a63689ab \
|
||||
)\n \
|
||||
execute_process ( \
|
||||
COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \
|
||||
)\n"
|
||||
)
|
||||
set (tagfile "${CMAKE_BINARY_DIR}/docs/cppreference-doxygen-web.tag.xml")
|
||||
add_custom_command (
|
||||
OUTPUT "${tagfile}"
|
||||
COMMAND "${CMAKE_COMMAND}" -P "${download_script}"
|
||||
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/docs"
|
||||
)
|
||||
set (doxygen_tagfiles "${tagfile}=http://en.cppreference.com/w/")
|
||||
|
||||
add_custom_command (
|
||||
OUTPUT "${doxygen_index_file}"
|
||||
COMMAND "${CMAKE_COMMAND}" -E env
|
||||
"DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}"
|
||||
"DOXYGEN_INCLUDE_PATH=${doxygen_include_path}"
|
||||
"DOXYGEN_TAGFILES=${doxygen_tagfiles}"
|
||||
"DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}"
|
||||
"DOXYGEN_DOT_PATH=${doxygen_dot_path}"
|
||||
"${DOXYGEN_EXECUTABLE}" "${doxyfile}"
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
DEPENDS "${dependencies}" "${tagfile}")
|
||||
add_custom_target (docs
|
||||
DEPENDS "${doxygen_index_file}"
|
||||
SOURCES "${dependencies}")
|
||||
endif ()
|
||||
|
||||
@@ -39,14 +39,14 @@ endif ()
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
|
||||
set (is_clang TRUE)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND
|
||||
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
|
||||
message (FATAL_ERROR "This project requires clang 7 or later")
|
||||
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
|
||||
message (FATAL_ERROR "This project requires clang 8 or later")
|
||||
endif ()
|
||||
# TODO min AppleClang version check ?
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set (is_gcc TRUE)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
|
||||
message (FATAL_ERROR "This project requires GCC 7 or later")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
|
||||
message (FATAL_ERROR "This project requires GCC 8 or later")
|
||||
endif ()
|
||||
endif ()
|
||||
if (CMAKE_GENERATOR STREQUAL "Xcode")
|
||||
|
||||
@@ -6,6 +6,8 @@ option (assert "Enables asserts, even in release builds" OFF)
|
||||
|
||||
option (reporting "Build rippled with reporting mode enabled" OFF)
|
||||
|
||||
option (tests "Build tests" ON)
|
||||
|
||||
option (unity "Creates a build using UNITY support in cmake. This is the default" ON)
|
||||
if (unity)
|
||||
if (CMAKE_VERSION VERSION_LESS 3.16)
|
||||
|
||||
@@ -2,49 +2,49 @@
|
||||
NIH dep: boost
|
||||
#]===================================================================]
|
||||
|
||||
if ((NOT DEFINED BOOST_ROOT) AND (DEFINED ENV{BOOST_ROOT}))
|
||||
set (BOOST_ROOT $ENV{BOOST_ROOT})
|
||||
endif ()
|
||||
file (TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT)
|
||||
if (WIN32 OR CYGWIN)
|
||||
if((NOT DEFINED BOOST_ROOT) AND(DEFINED ENV{BOOST_ROOT}))
|
||||
set(BOOST_ROOT $ENV{BOOST_ROOT})
|
||||
endif()
|
||||
file(TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT)
|
||||
if(WIN32 OR CYGWIN)
|
||||
# Workaround for MSVC having two boost versions - x86 and x64 on same PC in stage folders
|
||||
if (DEFINED BOOST_ROOT)
|
||||
if (IS_DIRECTORY ${BOOST_ROOT}/stage64/lib)
|
||||
set (BOOST_LIBRARYDIR ${BOOST_ROOT}/stage64/lib)
|
||||
elseif (IS_DIRECTORY ${BOOST_ROOT}/stage/lib)
|
||||
set (BOOST_LIBRARYDIR ${BOOST_ROOT}/stage/lib)
|
||||
elseif (IS_DIRECTORY ${BOOST_ROOT}/lib)
|
||||
set (BOOST_LIBRARYDIR ${BOOST_ROOT}/lib)
|
||||
else ()
|
||||
if(DEFINED BOOST_ROOT)
|
||||
if(IS_DIRECTORY ${BOOST_ROOT}/stage64/lib)
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage64/lib)
|
||||
elseif(IS_DIRECTORY ${BOOST_ROOT}/stage/lib)
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage/lib)
|
||||
elseif(IS_DIRECTORY ${BOOST_ROOT}/lib)
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT}/lib)
|
||||
else()
|
||||
message(WARNING "Did not find expected boost library dir. "
|
||||
"Defaulting to ${BOOST_ROOT}")
|
||||
set (BOOST_LIBRARYDIR ${BOOST_ROOT})
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
message (STATUS "BOOST_ROOT: ${BOOST_ROOT}")
|
||||
message (STATUS "BOOST_LIBRARYDIR: ${BOOST_LIBRARYDIR}")
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
message(STATUS "BOOST_ROOT: ${BOOST_ROOT}")
|
||||
message(STATUS "BOOST_LIBRARYDIR: ${BOOST_LIBRARYDIR}")
|
||||
|
||||
# uncomment the following as needed to debug FindBoost issues:
|
||||
#set (Boost_DEBUG ON)
|
||||
#set(Boost_DEBUG ON)
|
||||
|
||||
#[=========================================================[
|
||||
boost dynamic libraries don't trivially support @rpath
|
||||
linking right now (cmake's default), so just force
|
||||
static linking for macos, or if requested on linux by flag
|
||||
#]=========================================================]
|
||||
if (static)
|
||||
set (Boost_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set (Boost_USE_MULTITHREADED ON)
|
||||
if (static AND NOT APPLE)
|
||||
set (Boost_USE_STATIC_RUNTIME ON)
|
||||
else ()
|
||||
set (Boost_USE_STATIC_RUNTIME OFF)
|
||||
endif ()
|
||||
if(static)
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
endif()
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
if(static AND NOT APPLE)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
else()
|
||||
set(Boost_USE_STATIC_RUNTIME OFF)
|
||||
endif()
|
||||
# TBD:
|
||||
# Boost_USE_DEBUG_RUNTIME: When ON, uses Boost libraries linked against the
|
||||
find_package (Boost 1.70 REQUIRED
|
||||
find_package(Boost 1.70 REQUIRED
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
@@ -57,16 +57,16 @@ find_package (Boost 1.70 REQUIRED
|
||||
system
|
||||
thread)
|
||||
|
||||
add_library (ripple_boost INTERFACE)
|
||||
add_library (Ripple::boost ALIAS ripple_boost)
|
||||
if (is_xcode)
|
||||
target_include_directories (ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
target_compile_options (ripple_boost INTERFACE --system-header-prefix="boost/")
|
||||
else ()
|
||||
target_include_directories (ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
add_library(ripple_boost INTERFACE)
|
||||
add_library(Ripple::boost ALIAS ripple_boost)
|
||||
if(is_xcode)
|
||||
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
|
||||
else()
|
||||
target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
target_link_libraries (ripple_boost
|
||||
target_link_libraries(ripple_boost
|
||||
INTERFACE
|
||||
Boost::boost
|
||||
Boost::chrono
|
||||
@@ -78,28 +78,19 @@ target_link_libraries (ripple_boost
|
||||
Boost::regex
|
||||
Boost::system
|
||||
Boost::thread)
|
||||
if (Boost_COMPILER)
|
||||
target_link_libraries (ripple_boost INTERFACE Boost::disable_autolinking)
|
||||
endif ()
|
||||
if (san AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else
|
||||
if(Boost_COMPILER)
|
||||
target_link_libraries(ripple_boost INTERFACE Boost::disable_autolinking)
|
||||
endif()
|
||||
if(san AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else
|
||||
# for gcc ?
|
||||
if (NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property (Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif ()
|
||||
if(NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file (WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options (opts
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts
|
||||
INTERFACE
|
||||
# ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif ()
|
||||
|
||||
# workaround for xcode 10.2 and boost < 1.69
|
||||
# once we require Boost 1.69 or higher, this can be removed
|
||||
# see: https://github.com/boostorg/asio/commit/43874d5
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" AND
|
||||
CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10.0.1.10010043 AND
|
||||
Boost_VERSION LESS 106900)
|
||||
target_compile_definitions (opts INTERFACE BOOST_ASIO_HAS_STD_STRING_VIEW)
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
@@ -969,7 +969,7 @@ function(_Boost_COMPONENT_DEPENDENCIES component _ret)
|
||||
set(_Boost_WAVE_DEPENDENCIES filesystem serialization thread chrono date_time atomic)
|
||||
set(_Boost_WSERIALIZATION_DEPENDENCIES serialization)
|
||||
endif()
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.71.0)
|
||||
if(NOT Boost_VERSION_STRING VERSION_LESS 1.77.0)
|
||||
message(WARNING "New Boost version may have incorrect or missing dependencies and imported targets")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -22,7 +22,7 @@ if (static)
|
||||
set (OPENSSL_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set (OPENSSL_MSVC_STATIC_RT ON)
|
||||
find_package (OpenSSL 1.0.2 REQUIRED)
|
||||
find_package (OpenSSL 1.1.1 REQUIRED)
|
||||
target_link_libraries (ripple_libs
|
||||
INTERFACE
|
||||
OpenSSL::SSL
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
|
||||
if(reporting)
|
||||
|
||||
find_package(PostgreSQL)
|
||||
|
||||
if(NOT PostgreSQL_FOUND)
|
||||
message("find_package did not find postgres")
|
||||
message("find_package did not find postgres")
|
||||
find_library(postgres NAMES pq libpq libpq-dev pq-dev postgresql-devel)
|
||||
find_path(libpq-fe NAMES libpq-fe.h PATH_SUFFIXES postgresql pgsql include)
|
||||
|
||||
|
||||
find_library(postgres NAMES pq libpq libpq-dev pq-dev postgresql-devel)
|
||||
find_path(libpq-fe NAMES libpq-fe.h PATH_SUFFIXES postgresql pgsql include)
|
||||
if(NOT libpq-fe_FOUND OR NOT postgres_FOUND)
|
||||
message("No system installed Postgres found. Will build")
|
||||
|
||||
add_library(postgres SHARED IMPORTED GLOBAL)
|
||||
add_library(pgport SHARED IMPORTED GLOBAL)
|
||||
add_library(pgcommon SHARED IMPORTED GLOBAL)
|
||||
ExternalProject_Add(postgres_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/postgres/postgres.git
|
||||
@@ -22,47 +19,52 @@ if(reporting)
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/src/interfaces/libpq/${ep_lib_prefix}pq.so
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/src/interfaces/libpq/${ep_lib_prefix}pq.a
|
||||
<BINARY_DIR>/src/common/${ep_lib_prefix}pgcommon.a
|
||||
<BINARY_DIR>/src/port/${ep_lib_prefix}pgport.a
|
||||
LOG_BUILD TRUE
|
||||
)
|
||||
|
||||
|
||||
|
||||
ExternalProject_Get_Property (postgres_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (postgres_src BINARY_DIR)
|
||||
|
||||
set (postgres_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${postgres_src_SOURCE_DIR})
|
||||
|
||||
list(APPEND INCLUDE_DIRS ${SOURCE_DIR}/src/include)
|
||||
list(APPEND INCLUDE_DIRS ${SOURCE_DIR}/src/interfaces/libpq)
|
||||
|
||||
|
||||
set_target_properties (postgres PROPERTIES
|
||||
list(APPEND INCLUDE_DIRS
|
||||
${SOURCE_DIR}/src/include
|
||||
${SOURCE_DIR}/src/interfaces/libpq
|
||||
)
|
||||
set_target_properties(postgres PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/src/interfaces/libpq/${ep_lib_prefix}pq.so
|
||||
${BINARY_DIR}/src/interfaces/libpq/${ep_lib_prefix}pq.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${INCLUDE_DIRS}")
|
||||
"${INCLUDE_DIRS}"
|
||||
)
|
||||
set_target_properties(pgcommon PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/src/common/${ep_lib_prefix}pgcommon.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${INCLUDE_DIRS}"
|
||||
)
|
||||
set_target_properties(pgport PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/src/port/${ep_lib_prefix}pgport.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${INCLUDE_DIRS}"
|
||||
)
|
||||
add_dependencies(postgres postgres_src)
|
||||
|
||||
add_dependencies(pgcommon postgres_src)
|
||||
add_dependencies(pgport postgres_src)
|
||||
file(TO_CMAKE_PATH "${postgres_src_SOURCE_DIR}" postgres_src_SOURCE_DIR)
|
||||
|
||||
target_link_libraries(ripple_libs INTERFACE postgres)
|
||||
target_link_libraries(ripple_libs INTERFACE postgres pgcommon pgport)
|
||||
else()
|
||||
|
||||
message("Found system installed Postgres via find_libary")
|
||||
|
||||
target_include_directories(ripple_libs INTERFACE ${libpq-fe})
|
||||
|
||||
target_link_libraries(ripple_libs INTERFACE ${postgres})
|
||||
endif()
|
||||
|
||||
else()
|
||||
message("Found system installed Postgres via find_package")
|
||||
|
||||
target_include_directories(ripple_libs INTERFACE ${PostgreSQL_INCLUDE_DIRS})
|
||||
|
||||
target_link_libraries(ripple_libs INTERFACE ${PostgreSQL_LIBRARIES})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
@@ -9,9 +9,21 @@ if (static)
|
||||
set (Protobuf_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
find_package (Protobuf 3.8)
|
||||
if (local_protobuf OR NOT Protobuf_FOUND)
|
||||
if (is_multiconfig)
|
||||
set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARIES})
|
||||
else ()
|
||||
string(TOUPPER ${CMAKE_BUILD_TYPE} upper_cmake_build_type)
|
||||
set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARY_${upper_cmake_build_type}})
|
||||
endif ()
|
||||
if (local_protobuf OR NOT (Protobuf_FOUND AND Protobuf_PROTOC_EXECUTABLE AND protobuf_protoc_lib))
|
||||
include (GNUInstallDirs)
|
||||
message (STATUS "using local protobuf build.")
|
||||
set(protobuf_reqs Protobuf_PROTOC_EXECUTABLE protobuf_protoc_lib)
|
||||
foreach(lib ${protobuf_reqs})
|
||||
if(NOT ${lib})
|
||||
message(STATUS "Couldn't find ${lib}")
|
||||
endif()
|
||||
endforeach()
|
||||
if (WIN32)
|
||||
# protobuf prepends lib even on windows
|
||||
set (pbuf_lib_pre "lib")
|
||||
|
||||
@@ -8,7 +8,7 @@ set_target_properties (rocksdb_lib
|
||||
|
||||
option (local_rocksdb "use local build of rocksdb." OFF)
|
||||
if (NOT local_rocksdb)
|
||||
find_package (RocksDB 6.7 QUIET CONFIG)
|
||||
find_package (RocksDB 6.27 QUIET CONFIG)
|
||||
if (TARGET RocksDB::rocksdb)
|
||||
message (STATUS "Found RocksDB using config.")
|
||||
get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION_DEBUG)
|
||||
@@ -40,7 +40,7 @@ if (NOT local_rocksdb)
|
||||
# TBD if there is some way to extract transitive deps..then:
|
||||
#set (RocksDB_USE_STATIC ON)
|
||||
else ()
|
||||
find_package (RocksDB 6.7 MODULE)
|
||||
find_package (RocksDB 6.27 MODULE)
|
||||
if (ROCKSDB_FOUND)
|
||||
if (RocksDB_LIBRARY_DEBUG)
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_DEBUG ${RocksDB_LIBRARY_DEBUG})
|
||||
@@ -60,10 +60,10 @@ if (local_rocksdb)
|
||||
ExternalProject_Add (rocksdb
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
|
||||
GIT_TAG v6.7.3
|
||||
GIT_TAG v6.27.3
|
||||
PATCH_COMMAND
|
||||
# only used by windows build
|
||||
${CMAKE_COMMAND} -E copy
|
||||
${CMAKE_COMMAND} -E copy_if_different
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocks_thirdparty.inc
|
||||
<SOURCE_DIR>/thirdparty.inc
|
||||
COMMAND
|
||||
@@ -96,9 +96,13 @@ if (local_rocksdb)
|
||||
-Dlz4_FOUND=ON
|
||||
-USNAPPY_*
|
||||
-Usnappy_*
|
||||
-USnappy_*
|
||||
-Dsnappy_INCLUDE_DIRS=$<JOIN:$<TARGET_PROPERTY:snappy_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-Dsnappy_LIBRARIES=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-Dsnappy_FOUND=ON
|
||||
-DSnappy_INCLUDE_DIRS=$<JOIN:$<TARGET_PROPERTY:snappy_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-DSnappy_LIBRARIES=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-DSnappy_FOUND=ON
|
||||
-DWITH_MD_LIBRARY=OFF
|
||||
-DWITH_RUNTIME_DEBUG=$<IF:$<CONFIG:Debug>,ON,OFF>
|
||||
-DFAIL_ON_WARNINGS=OFF
|
||||
|
||||
@@ -47,7 +47,7 @@ if(reporting)
|
||||
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
||||
GIT_TAG master
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND autoreconf src && ./src/configure --enable-static --disable-shared > /dev/null
|
||||
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared > /dev/null
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND ""
|
||||
|
||||
@@ -211,6 +211,7 @@ else ()
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DgRPC_BUILD_TESTS=OFF
|
||||
-DgRPC_BENCHMARK_PROVIDER=""
|
||||
-DgRPC_BUILD_CSHARP_EXT=OFF
|
||||
-DgRPC_MSVC_STATIC_RUNTIME=ON
|
||||
-DgRPC_INSTALL=OFF
|
||||
|
||||
@@ -1,4 +1,71 @@
|
||||
#include "build_version.h"
|
||||
const char* rocksdb_build_git_sha = "rocksdb_build_git_sha: N/A";
|
||||
const char* rocksdb_build_git_date = "rocksdb_build_git_date: N/A";
|
||||
const char* rocksdb_build_compile_date = "N/A";
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "rocksdb/version.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
// The build script may replace these values with real values based
|
||||
// on whether or not GIT is available and the platform settings
|
||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:@GIT_SHA@";
|
||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:@GIT_TAG@";
|
||||
#define HAS_GIT_CHANGES @GIT_MOD@
|
||||
#if HAS_GIT_CHANGES == 0
|
||||
// If HAS_GIT_CHANGES is 0, the GIT date is used.
|
||||
// Use the time the branch/tag was last modified
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:@GIT_DATE@";
|
||||
#else
|
||||
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
|
||||
// Use the time the build was created.
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:@BUILD_DATE@";
|
||||
#endif
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||
size_t colon = name.find(":");
|
||||
if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) {
|
||||
// If we found a "@:", then this property was a build-time substitution that failed. Skip it
|
||||
size_t at = name.find("@", colon);
|
||||
if (at != colon + 1) {
|
||||
// Everything before the colon is the name, after is the value
|
||||
(*props)[name.substr(0, colon)] = name.substr(colon + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static std::unordered_map<std::string, std::string>* LoadPropertiesSet() {
|
||||
auto * properties = new std::unordered_map<std::string, std::string>();
|
||||
AddProperty(properties, rocksdb_build_git_sha);
|
||||
AddProperty(properties, rocksdb_build_git_tag);
|
||||
AddProperty(properties, rocksdb_build_date);
|
||||
return properties;
|
||||
}
|
||||
|
||||
const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
||||
static std::unique_ptr<std::unordered_map<std::string, std::string>> props(LoadPropertiesSet());
|
||||
return *props;
|
||||
}
|
||||
|
||||
std::string GetRocksVersionAsString(bool with_patch) {
|
||||
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
|
||||
if (with_patch) {
|
||||
return version + "." + ToString(ROCKSDB_PATCH);
|
||||
} else {
|
||||
return version;
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||
std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true);
|
||||
if (verbose) {
|
||||
for (const auto& it : GetRocksBuildProperties()) {
|
||||
info.append("\n ");
|
||||
info.append(it.first);
|
||||
info.append(": ");
|
||||
info.append(it.second);
|
||||
}
|
||||
}
|
||||
return info;
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
|
||||
@@ -1,13 +1,30 @@
|
||||
# This patches unsigned-types.h in the soci official sources
|
||||
# so as to remove type range check exceptions that cause
|
||||
# us trouble when using boost::optional to select int values
|
||||
|
||||
# Some versions of CMake erroneously patch external projects on every build.
|
||||
# If the patch makes no changes, skip it. This workaround can be
|
||||
# removed once we stop supporting vulnerable versions of CMake.
|
||||
# https://gitlab.kitware.com/cmake/cmake/-/issues/21086
|
||||
file (STRINGS include/soci/unsigned-types.h sourcecode)
|
||||
# Delete the .patched file if it exists, so it doesn't end up duplicated.
|
||||
# Trying to remove a file that does not exist is not a problem.
|
||||
file (REMOVE include/soci/unsigned-types.h.patched)
|
||||
foreach (line_ ${sourcecode})
|
||||
if (line_ MATCHES "^[ \\t]+throw[ ]+soci_error[ ]*\\([ ]*\"Value outside of allowed.+$")
|
||||
set (line_ "//${CMAKE_MATCH_0}")
|
||||
endif ()
|
||||
file (APPEND include/soci/unsigned-types.h.patched "${line_}\n")
|
||||
endforeach ()
|
||||
execute_process( COMMAND ${CMAKE_COMMAND} -E compare_files
|
||||
include/soci/unsigned-types.h include/soci/unsigned-types.h.patched
|
||||
RESULT_VARIABLE compare_result
|
||||
)
|
||||
if( compare_result EQUAL 0)
|
||||
message(DEBUG "The soci source and patch files are identical. Make no changes.")
|
||||
file (REMOVE include/soci/unsigned-types.h.patched)
|
||||
return()
|
||||
endif()
|
||||
file (RENAME include/soci/unsigned-types.h include/soci/unsigned-types.h.orig)
|
||||
file (RENAME include/soci/unsigned-types.h.patched include/soci/unsigned-types.h)
|
||||
# also fix Boost.cmake so that it just returns when we override the Boost_FOUND var
|
||||
|
||||
@@ -16,7 +16,7 @@ need these software components
|
||||
|-----------|-----------------------|
|
||||
| [Visual Studio 2017](README.md#install-visual-studio-2017)| 15.5.4 |
|
||||
| [Git for Windows](README.md#install-git-for-windows)| 2.16.1 |
|
||||
| [OpenSSL Library](README.md#install-openssl) | 1.0.2n |
|
||||
| [OpenSSL Library](README.md#install-openssl) | 1.1.1L |
|
||||
| [Boost library](README.md#build-boost) | 1.70.0 |
|
||||
| [CMake for Windows](README.md#optional-install-cmake-for-windows)* | 3.12 |
|
||||
|
||||
@@ -50,17 +50,19 @@ Windows is mandatory for running the unit tests.
|
||||
|
||||
### Install OpenSSL
|
||||
|
||||
[Download OpenSSL.](http://slproweb.com/products/Win32OpenSSL.html) There will
|
||||
four `Win64` bit variants available, you want the non-light `v1.0` line. As of
|
||||
this writing, you **should** select
|
||||
[Download the latest version of
|
||||
OpenSSL.](http://slproweb.com/products/Win32OpenSSL.html) There will
|
||||
several `Win64` bit variants available, you want the non-light
|
||||
`v1.1` line. As of this writing, you **should** select
|
||||
|
||||
* Win64 OpenSSL v1.0.2n.
|
||||
* Win64 OpenSSL v1.1.1L
|
||||
|
||||
and should **not** select
|
||||
|
||||
* Win64 OpenSSL v1.0.2n light
|
||||
* Win64 OpenSSL v1.1.0g
|
||||
* Win64 OpenSSL v1.1.0g light
|
||||
* Anything with "Win32" in the name
|
||||
* Anything with "light" in the name
|
||||
* Anything with "EXPERIMENTAL" in the name
|
||||
* Anything in the 3.0 line - rippled won't currently build with this version.
|
||||
|
||||
Run the installer, and choose an appropriate location for your OpenSSL
|
||||
installation. In this guide we use `C:\lib\OpenSSL-Win64` as the destination
|
||||
@@ -146,7 +148,7 @@ If you receive an error about not having the "correct access rights" make sure
|
||||
you have Github ssh keys, as described above.
|
||||
|
||||
For a stable release, choose the `master` branch or one of the tagged releases
|
||||
listed on [rippled's GitHub page](https://github.com/ripple/rippled/releases).
|
||||
listed on [rippled's GitHub page](https://github.com/ripple/rippled/releases).
|
||||
|
||||
```
|
||||
git checkout master
|
||||
@@ -175,7 +177,7 @@ To begin, simply:
|
||||
cloned rippled folder.
|
||||
2. Right-click on `CMakeLists.txt` in the **Solution Explorer - Folder View** to
|
||||
generate a `CMakeSettings.json` file. A sample settings file is provided
|
||||
[here](/Builds/VisualStudio2017/CMakeSettings-example.json). Customize the
|
||||
[here](/Builds/VisualStudio2017/CMakeSettings-example.json). Customize the
|
||||
settings for `BOOST_ROOT`, `OPENSSL_ROOT` to match the install paths if they
|
||||
differ from those in the file.
|
||||
4. Select either the `x64-Release` or `x64-Debug` configuration from the
|
||||
@@ -221,7 +223,7 @@ Navigate to the `build\cmake` folder created above and select the `rippled.sln`
|
||||
file. You can then choose whether to build the `Debug` or `Release` solution
|
||||
configuration.
|
||||
|
||||
The executable will be in
|
||||
The executable will be in
|
||||
```
|
||||
.\build\cmake\Release\rippled.exe
|
||||
```
|
||||
@@ -233,21 +235,24 @@ These paths are relative to your cloned git repository.
|
||||
|
||||
# Unity/No-Unity Builds
|
||||
|
||||
The rippled build system defaults to using [unity source files](http://onqtam.com/programming/2018-07-07-unity-builds/)
|
||||
to improve build times. In some cases it might be desirable to disable the unity build and compile
|
||||
individual translation units. Here is how you can switch to a "no-unity" build configuration:
|
||||
The rippled build system defaults to using
|
||||
[unity source files](http://onqtam.com/programming/2018-07-07-unity-builds/)
|
||||
to improve build times. In some cases it might be desirable to disable the
|
||||
unity build and compile individual translation units. Here is how you can
|
||||
switch to a "no-unity" build configuration:
|
||||
|
||||
## Visual Studio Integrated CMake
|
||||
|
||||
Edit your `CmakeSettings.json` (described above) by adding `-Dunity=OFF` to the `cmakeCommandArgs` entry
|
||||
for each build configuration.
|
||||
|
||||
Edit your `CmakeSettings.json` (described above) by adding `-Dunity=OFF`
|
||||
to the `cmakeCommandArgs` entry for each build configuration.
|
||||
|
||||
## Standalone CMake Builds
|
||||
|
||||
When running cmake to generate the Visual Studio project files, add `-Dunity=OFF` to the
|
||||
command line options passed to cmake.
|
||||
When running cmake to generate the Visual Studio project files, add
|
||||
`-Dunity=OFF` to the command line options passed to cmake.
|
||||
|
||||
**Note:** you will need to re-run the cmake configuration step anytime you want to switch between unity/no-unity builds.
|
||||
**Note:** you will need to re-run the cmake configuration step anytime you
|
||||
want to switch between unity/no-unity builds.
|
||||
|
||||
# Unit Test (Recommended)
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ time cmake \
|
||||
-Dpackages_only=ON \
|
||||
-Dcontainer_label="${container_tag}" \
|
||||
-Dhave_package_container=ON \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=OFF \
|
||||
-G Ninja ../..
|
||||
time cmake --build . --target ${pkgtype} -- -v
|
||||
|
||||
time cmake --build . --target ${pkgtype}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# can be overridden by project or group variables as needed.
|
||||
variables:
|
||||
# these containers are built manually using the rippled
|
||||
# cmake build (container targets) and tagged/pushed so they
|
||||
# cmake build (container targets) and tagged/pushed so they
|
||||
# can be used here
|
||||
RPM_CONTAINER_TAG: "2020-02-10"
|
||||
RPM_CONTAINER_NAME: "rippled-rpm-builder"
|
||||
@@ -113,7 +113,7 @@ rpm_sign:
|
||||
dependencies:
|
||||
- rpm_build
|
||||
image:
|
||||
name: centos:7
|
||||
name: artifactory.ops.ripple.com/centos:7
|
||||
<<: *only_primary
|
||||
before_script:
|
||||
- |
|
||||
@@ -142,7 +142,7 @@ dpkg_sign:
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
image:
|
||||
name: ubuntu:18.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
<<: *only_primary
|
||||
before_script:
|
||||
- |
|
||||
@@ -181,47 +181,39 @@ centos_7_smoketest:
|
||||
- rpm_build
|
||||
- rpm_sign
|
||||
image:
|
||||
name: centos:7
|
||||
name: artifactory.ops.ripple.com/centos:7
|
||||
<<: *run_local_smoketest
|
||||
|
||||
fedora_29_smoketest:
|
||||
# TODO: Remove "allow_failure" when tests fixed
|
||||
rocky_8_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- rpm_build
|
||||
- rpm_sign
|
||||
image:
|
||||
name: fedora:29
|
||||
name: rockylinux/rockylinux:8
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_28_smoketest:
|
||||
fedora_34_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- rpm_build
|
||||
- rpm_sign
|
||||
image:
|
||||
name: fedora:28
|
||||
name: artifactory.ops.ripple.com/fedora:34
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_27_smoketest:
|
||||
fedora_35_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- rpm_build
|
||||
- rpm_sign
|
||||
image:
|
||||
name: fedora:27
|
||||
<<: *run_local_smoketest
|
||||
|
||||
## this one is not LTS, but we
|
||||
## get some extra coverage by including it
|
||||
## consider dropping it when 20.04 is ready
|
||||
ubuntu_20_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: ubuntu:20.04
|
||||
name: artifactory.ops.ripple.com/fedora:35
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
ubuntu_18_smoketest:
|
||||
stage: smoketest
|
||||
@@ -229,25 +221,54 @@ ubuntu_18_smoketest:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: ubuntu:18.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
<<: *run_local_smoketest
|
||||
|
||||
ubuntu_16_smoketest:
|
||||
ubuntu_20_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: ubuntu:16.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:20.04
|
||||
<<: *run_local_smoketest
|
||||
|
||||
# TODO: remove "allow_failure" when 22.04 released in 4/2022...
|
||||
ubuntu_22_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:22.04
|
||||
<<: *run_local_smoketest
|
||||
allow_failure: true
|
||||
|
||||
debian_9_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: debian:9
|
||||
name: artifactory.ops.ripple.com/debian:9
|
||||
<<: *run_local_smoketest
|
||||
|
||||
debian_10_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:10
|
||||
<<: *run_local_smoketest
|
||||
|
||||
debian_11_smoketest:
|
||||
stage: smoketest
|
||||
dependencies:
|
||||
- dpkg_build
|
||||
- dpkg_sign
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:11
|
||||
<<: *run_local_smoketest
|
||||
|
||||
#########################################################################
|
||||
@@ -265,7 +286,7 @@ debian_9_smoketest:
|
||||
verify_head_signed:
|
||||
stage: verify_sig
|
||||
image:
|
||||
name: ubuntu:latest
|
||||
name: artifactory.ops.ripple.com/ubuntu:latest
|
||||
<<: *only_primary
|
||||
script:
|
||||
- . ./Builds/containers/gitlab-ci/verify_head_commit.sh
|
||||
@@ -315,7 +336,7 @@ push_test:
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: alpine:latest
|
||||
name: artifactory.ops.ripple.com/alpine:latest
|
||||
artifacts:
|
||||
paths:
|
||||
- files.info
|
||||
@@ -340,44 +361,59 @@ centos_7_verify_repo_test:
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: centos:7
|
||||
name: artifactory.ops.ripple.com/centos:7
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
fedora_29_verify_repo_test:
|
||||
rocky_8_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: fedora:29
|
||||
name: rockylinux/rockylinux:8
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_28_verify_repo_test:
|
||||
fedora_34_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: fedora:28
|
||||
name: artifactory.ops.ripple.com/fedora:34
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_27_verify_repo_test:
|
||||
fedora_35_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: fedora:27
|
||||
name: artifactory.ops.ripple.com/fedora:35
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
ubuntu_18_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_20_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
@@ -385,35 +421,25 @@ ubuntu_20_verify_repo_test:
|
||||
DISTRO: "focal"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: ubuntu:20.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:20.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_18_verify_repo_test:
|
||||
# TODO: remove "allow_failure" when 22.04 released in 4/2022...
|
||||
ubuntu_22_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DISTRO: "jammy"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_16_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "xenial"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: ubuntu:16.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:22.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
debian_9_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
@@ -421,7 +447,31 @@ debian_9_verify_repo_test:
|
||||
DISTRO: "stretch"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: debian:9
|
||||
name: artifactory.ops.ripple.com/debian:9
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_10_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "buster"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:10
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_11_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
DISTRO: "bullseye"
|
||||
DEB_REPO: "rippled-deb-test-mirror"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:11
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
@@ -439,7 +489,7 @@ debian_9_verify_repo_test:
|
||||
wait_before_push_prod:
|
||||
stage: wait_approval_prod
|
||||
image:
|
||||
name: alpine:latest
|
||||
name: artifactory.ops.ripple.com/alpine:latest
|
||||
<<: *only_primary
|
||||
script:
|
||||
- echo "proceeding to next stage"
|
||||
@@ -460,7 +510,7 @@ push_prod:
|
||||
DEB_REPO: "rippled-deb"
|
||||
RPM_REPO: "rippled-rpm"
|
||||
image:
|
||||
name: alpine:latest
|
||||
name: artifactory.ops.ripple.com/alpine:latest
|
||||
stage: push_to_prod
|
||||
artifacts:
|
||||
paths:
|
||||
@@ -486,44 +536,59 @@ centos_7_verify_repo_prod:
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm"
|
||||
image:
|
||||
name: centos:7
|
||||
name: artifactory.ops.ripple.com/centos:7
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
fedora_29_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
rocky_8_verify_repo_test:
|
||||
stage: verify_from_test
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm"
|
||||
RPM_REPO: "rippled-rpm-test-mirror"
|
||||
image:
|
||||
name: fedora:29
|
||||
name: rockylinux/rockylinux:8
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_28_verify_repo_prod:
|
||||
fedora_34_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm"
|
||||
image:
|
||||
name: fedora:28
|
||||
name: artifactory.ops.ripple.com/fedora:34
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
fedora_27_verify_repo_prod:
|
||||
fedora_35_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
RPM_REPO: "rippled-rpm"
|
||||
image:
|
||||
name: fedora:27
|
||||
name: artifactory.ops.ripple.com/fedora:35
|
||||
dependencies:
|
||||
- rpm_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
ubuntu_18_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_20_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
@@ -531,35 +596,25 @@ ubuntu_20_verify_repo_prod:
|
||||
DISTRO: "focal"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: ubuntu:20.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:20.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_18_verify_repo_prod:
|
||||
# TODO: remove "allow_failure" when 22.04 released in 4/2022...
|
||||
ubuntu_22_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "bionic"
|
||||
DISTRO: "jammy"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: ubuntu:18.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
ubuntu_16_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "xenial"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: ubuntu:16.04
|
||||
name: artifactory.ops.ripple.com/ubuntu:22.04
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
allow_failure: true
|
||||
|
||||
debian_9_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
@@ -567,7 +622,31 @@ debian_9_verify_repo_prod:
|
||||
DISTRO: "stretch"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: debian:9
|
||||
name: artifactory.ops.ripple.com/debian:9
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_10_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "buster"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:10
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
<<: *run_repo_smoketest
|
||||
|
||||
debian_11_verify_repo_prod:
|
||||
stage: verify_from_prod
|
||||
variables:
|
||||
DISTRO: "bullseye"
|
||||
DEB_REPO: "rippled-deb"
|
||||
image:
|
||||
name: artifactory.ops.ripple.com/debian:11
|
||||
dependencies:
|
||||
- dpkg_sign
|
||||
<<: *only_primary
|
||||
@@ -587,7 +666,7 @@ get_prod_hashes:
|
||||
DEB_REPO: "rippled-deb"
|
||||
RPM_REPO: "rippled-rpm"
|
||||
image:
|
||||
name: alpine:latest
|
||||
name: artifactory.ops.ripple.com/alpine:latest
|
||||
stage: get_final_hashes
|
||||
artifacts:
|
||||
paths:
|
||||
@@ -622,5 +701,3 @@ build_ubuntu_container:
|
||||
script:
|
||||
- . ./Builds/containers/gitlab-ci/build_container.sh dpkg
|
||||
allow_failure: true
|
||||
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ RIPPLED_DBG_PKG=$(ls rippled-dbgsym_*.deb)
|
||||
# TODO - where to upload src tgz?
|
||||
RIPPLED_SRC=$(ls rippled_*.orig.tar.gz)
|
||||
DEB_MATRIX=";deb.component=${COMPONENT};deb.architecture=amd64"
|
||||
for dist in stretch buster xenial bionic disco focal ; do
|
||||
for dist in stretch buster bullseye bionic focal jammy; do
|
||||
DEB_MATRIX="${DEB_MATRIX};deb.distribution=${dist}"
|
||||
done
|
||||
echo "{ \"debs\": {" > "${TOPDIR}/files.info"
|
||||
@@ -88,4 +88,3 @@ JSON
|
||||
)
|
||||
curl ${SLACK_NOTIFY_URL} --data-urlencode "${CONTENT}"
|
||||
fi
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ case ${ID} in
|
||||
ubuntu|debian)
|
||||
pkgtype="dpkg"
|
||||
;;
|
||||
fedora|centos|rhel|scientific)
|
||||
fedora|centos|rhel|scientific|rocky)
|
||||
pkgtype="rpm"
|
||||
;;
|
||||
*)
|
||||
@@ -51,7 +51,7 @@ if [ "${pkgtype}" = "dpkg" ] ; then
|
||||
elif [ "${install_from}" = "local" ] ; then
|
||||
# cached pkg install
|
||||
updateWithRetry
|
||||
apt-get -y install libprotobuf-dev libssl-dev
|
||||
apt-get -y install libprotobuf-dev libprotoc-dev protobuf-compiler libssl-dev
|
||||
rm -f build/dpkg/packages/rippled-dbgsym*.*
|
||||
dpkg --no-debsig -i build/dpkg/packages/*.deb
|
||||
else
|
||||
@@ -76,7 +76,12 @@ else
|
||||
yum -y install ${rpm_version_release}
|
||||
elif [ "${install_from}" = "local" ] ; then
|
||||
# cached pkg install
|
||||
yum install -y yum-utils openssl-static zlib-static
|
||||
pkgs=("yum-utils openssl-static zlib-static")
|
||||
if [ "$ID" = "rocky" ]; then
|
||||
sed -i 's/enabled=0/enabled=1/g' /etc/yum.repos.d/Rocky-PowerTools.repo
|
||||
pkgs="${pkgs[@]/openssl-static}"
|
||||
fi
|
||||
yum install -y $pkgs
|
||||
rm -f build/rpm/packages/rippled-debug*.rpm
|
||||
rm -f build/rpm/packages/*.src.rpm
|
||||
rpm -i build/rpm/packages/*.rpm
|
||||
@@ -95,5 +100,3 @@ fi
|
||||
# run unit tests
|
||||
/opt/ripple/bin/rippled --unittest --unittest-jobs $(nproc)
|
||||
/opt/ripple/bin/validator-keys --unittest
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
opt/ripple/etc/rippled.cfg
|
||||
opt/ripple/etc/validators.txt
|
||||
etc/logrotate.d/rippled
|
||||
/opt/ripple/etc/rippled.cfg
|
||||
/opt/ripple/etc/validators.txt
|
||||
/etc/logrotate.d/rippled
|
||||
|
||||
@@ -17,5 +17,5 @@ Section: devel
|
||||
Recommends: rippled (= ${binary:Version})
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, libprotobuf-dev, libssl-dev
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, libprotobuf-dev, libprotoc-dev, protobuf-compiler
|
||||
Description: development files for applications using xrpl core library (serialize + sign)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
README.md
|
||||
LICENSE
|
||||
LICENSE.md
|
||||
RELEASENOTES.md
|
||||
|
||||
@@ -11,6 +11,9 @@ export CXXFLAGS:=$(subst -Werror=format-security,,$(CXXFLAGS))
|
||||
%:
|
||||
dh $@ --with systemd
|
||||
|
||||
override_dh_systemd_start:
|
||||
dh_systemd_start --no-restart-on-upgrade
|
||||
|
||||
override_dh_auto_configure:
|
||||
env
|
||||
rm -rf bld
|
||||
@@ -19,17 +22,17 @@ override_dh_auto_configure:
|
||||
cmake .. -G Ninja \
|
||||
-DCMAKE_INSTALL_PREFIX=/opt/ripple \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_UNITY_BUILD_BATCH_SIZE=10 \
|
||||
-Dstatic=ON \
|
||||
-Dunity=OFF \
|
||||
-Dvalidator_keys=ON \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DCMAKE_VERBOSE_MAKEFILE=OFF
|
||||
|
||||
override_dh_auto_build:
|
||||
cd bld && \
|
||||
cmake --build . --target rippled --target validator-keys --parallel -- -v
|
||||
cmake --build . --target rippled --target validator-keys --parallel
|
||||
|
||||
override_dh_auto_install:
|
||||
cd bld && DESTDIR=../debian/tmp cmake --build . --target install -- -v
|
||||
cd bld && DESTDIR=../debian/tmp cmake --build . --target install
|
||||
install -D bld/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys
|
||||
install -D Builds/containers/shared/update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh
|
||||
install -D bin/getRippledInfo debian/tmp/opt/ripple/bin/getRippledInfo
|
||||
@@ -38,5 +41,3 @@ override_dh_auto_install:
|
||||
rm -rf debian/tmp/opt/ripple/lib64/cmake/date
|
||||
rm -rf bld
|
||||
rm -rf bld_vl
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ rippled
|
||||
%package devel
|
||||
Summary: Files for development of applications using xrpl core library
|
||||
Group: Development/Libraries
|
||||
Requires: openssl-static, zlib-static
|
||||
Requires: zlib-static
|
||||
|
||||
%description devel
|
||||
core library for development of standalone applications that sign transactions.
|
||||
@@ -32,15 +32,15 @@ core library for development of standalone applications that sign transactions.
|
||||
cd rippled
|
||||
mkdir -p bld.release
|
||||
cd bld.release
|
||||
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -DCMAKE_UNITY_BUILD_BATCH_SIZE=10 -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=ON -Dvalidator_keys=ON
|
||||
cmake --build . --parallel --target rippled --target validator-keys -- -v
|
||||
cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dstatic=true -Dunity=OFF -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON
|
||||
cmake --build . --parallel --target rippled --target validator-keys
|
||||
|
||||
%pre
|
||||
test -e /etc/pki/tls || { mkdir -p /etc/pki; ln -s /usr/lib/ssl /etc/pki/tls; }
|
||||
|
||||
%install
|
||||
rm -rf $RPM_BUILD_ROOT
|
||||
DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.release --target install -- -v
|
||||
DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.release --target install
|
||||
rm -rf ${RPM_BUILD_ROOT}/%{_prefix}/lib64/cmake/date
|
||||
install -d ${RPM_BUILD_ROOT}/etc/opt/ripple
|
||||
install -d ${RPM_BUILD_ROOT}/usr/local/bin
|
||||
@@ -76,7 +76,7 @@ chmod 644 /etc/logrotate.d/rippled
|
||||
chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron
|
||||
|
||||
%files
|
||||
%doc rippled/README.md rippled/LICENSE
|
||||
%doc rippled/README.md rippled/LICENSE.md
|
||||
%{_bindir}/rippled
|
||||
/usr/local/bin/rippled
|
||||
%{_bindir}/update-rippled.sh
|
||||
@@ -110,4 +110,3 @@ chown -R root:$GROUP_NAME %{_prefix}/etc/update-rippled-cron
|
||||
|
||||
* Thu Jun 02 2016 Brandon Wilson <bwilson@ripple.com>
|
||||
- Install validators.txt
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ function build_boost()
|
||||
mkdir -p /opt/local
|
||||
cd /opt/local
|
||||
BOOST_ROOT=/opt/local/boost_${boost_path}
|
||||
BOOST_URL="https://dl.bintray.com/boostorg/release/${boost_ver}/source/boost_${boost_path}.tar.bz2"
|
||||
BOOST_URL="https://boostorg.jfrog.io/artifactory/main/release/${boost_ver}/source/boost_${boost_path}.tar.gz"
|
||||
BOOST_BUILD_ALL=true
|
||||
. /tmp/install_boost.sh
|
||||
if [ "$do_link" = true ] ; then
|
||||
@@ -29,7 +29,7 @@ cd openssl-${OPENSSL_VER}
|
||||
# NOTE: add -g to the end of the following line if we want debug symbols for openssl
|
||||
SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\")
|
||||
./config -fPIC --prefix=/opt/local/openssl --openssldir=${SSLDIR} zlib shared
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
cd ..
|
||||
rm -f openssl-${OPENSSL_VER}.tar.gz
|
||||
@@ -42,7 +42,7 @@ tar xzf libarchive-3.4.1.tar.gz
|
||||
cd libarchive-3.4.1
|
||||
mkdir _bld && cd _bld
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
cd ../..
|
||||
rm -f libarchive-3.4.1.tar.gz
|
||||
@@ -54,7 +54,7 @@ tar xf protobuf-all-3.10.1.tar.gz
|
||||
cd protobuf-3.10.1
|
||||
./autogen.sh
|
||||
./configure
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
ldconfig
|
||||
cd ..
|
||||
@@ -77,7 +77,7 @@ cmake \
|
||||
-DCARES_BUILD_TESTS=OFF \
|
||||
-DCARES_BUILD_CONTAINER_TESTS=OFF \
|
||||
..
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
cd ../..
|
||||
rm -f c-ares-1.15.0.tar.gz
|
||||
@@ -97,7 +97,7 @@ cmake \
|
||||
-DgRPC_PROTOBUF_PROVIDER=package \
|
||||
-DProtobuf_USE_STATIC_LIBS=ON \
|
||||
..
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
cd ../..
|
||||
rm -f xf v1.25.0.tar.gz
|
||||
@@ -114,7 +114,7 @@ if [ "${CI_USE}" = true ] ; then
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -G "Unix Makefiles" ..
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) >> make_output.txt 2>&1
|
||||
make install
|
||||
cd ../..
|
||||
rm -f Release_1_8_16.tar.gz
|
||||
@@ -145,4 +145,3 @@ if [ "${CI_USE}" = true ] ; then
|
||||
pip install requests
|
||||
pip install https://github.com/codecov/codecov-python/archive/master.zip
|
||||
fi
|
||||
|
||||
|
||||
@@ -16,10 +16,14 @@ reflect these rules. Whenever possible, developers should refactor any
|
||||
levelization violations they find (by moving files or individual
|
||||
classes). At the very least, don't make things worse.
|
||||
|
||||
The table below summarizes the _desired_ division of modules. The levels
|
||||
are numbered from the bottom up with the lower level, lower numbered,
|
||||
more independent modules listed first, and the higher level, higher
|
||||
numbered modules with more dependencies listed later.
|
||||
The table below summarizes the _desired_ division of modules, based on the
|
||||
state of the rippled code when it was created. The levels are numbered from
|
||||
the bottom up with the lower level, lower numbered, more independent
|
||||
modules listed first, and the higher level, higher numbered modules with
|
||||
more dependencies listed later.
|
||||
|
||||
**tl;dr:** The modules listed first are more independent than the modules
|
||||
listed later.
|
||||
|
||||
| Level / Tier | Module(s) |
|
||||
|--------------|-----------------------------------------------|
|
||||
@@ -32,12 +36,13 @@ numbered modules with more dependencies listed later.
|
||||
| 07 | ripple/shamap ripple/overlay
|
||||
| 08 | ripple/app
|
||||
| 09 | ripple/rpc
|
||||
| 10 | test/jtx test/beast test/csf
|
||||
| 11 | test/unit_test
|
||||
| 12 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay
|
||||
| 13 | test
|
||||
| 14 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore
|
||||
| 15 | test/rpc test/app
|
||||
| 10 | ripple/perflog
|
||||
| 11 | test/jtx test/beast test/csf
|
||||
| 12 | test/unit_test
|
||||
| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay
|
||||
| 14 | test
|
||||
| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore
|
||||
| 16 | test/rpc test/app
|
||||
|
||||
(Note that `test` levelization is *much* less important and *much* less
|
||||
strictly enforced than `ripple` levelization, other than the requirement
|
||||
|
||||
@@ -11,7 +11,10 @@ Loop: ripple.app ripple.nodestore
|
||||
ripple.app > ripple.nodestore
|
||||
|
||||
Loop: ripple.app ripple.overlay
|
||||
ripple.overlay ~= ripple.app
|
||||
ripple.overlay == ripple.app
|
||||
|
||||
Loop: ripple.app ripple.peerfinder
|
||||
ripple.peerfinder ~= ripple.app
|
||||
|
||||
Loop: ripple.app ripple.rpc
|
||||
ripple.rpc > ripple.app
|
||||
@@ -28,20 +31,14 @@ Loop: ripple.basics ripple.json
|
||||
Loop: ripple.basics ripple.protocol
|
||||
ripple.protocol > ripple.basics
|
||||
|
||||
Loop: ripple.basics ripple.rpc
|
||||
ripple.rpc > ripple.basics
|
||||
|
||||
Loop: ripple.core ripple.net
|
||||
ripple.net > ripple.core
|
||||
|
||||
Loop: ripple.crypto ripple.protocol
|
||||
ripple.protocol > ripple.crypto
|
||||
|
||||
Loop: ripple.net ripple.rpc
|
||||
ripple.rpc > ripple.net
|
||||
|
||||
Loop: ripple.nodestore ripple.overlay
|
||||
ripple.overlay == ripple.nodestore
|
||||
ripple.overlay ~= ripple.nodestore
|
||||
|
||||
Loop: ripple.overlay ripple.rpc
|
||||
ripple.rpc ~= ripple.overlay
|
||||
|
||||
@@ -6,6 +6,7 @@ ripple.app > ripple.crypto
|
||||
ripple.app > ripple.json
|
||||
ripple.app > ripple.protocol
|
||||
ripple.app > ripple.resource
|
||||
ripple.app > ripple.server
|
||||
ripple.app > test.unit_test
|
||||
ripple.basics > ripple.beast
|
||||
ripple.conditions > ripple.basics
|
||||
@@ -47,12 +48,21 @@ ripple.peerfinder > ripple.basics
|
||||
ripple.peerfinder > ripple.beast
|
||||
ripple.peerfinder > ripple.core
|
||||
ripple.peerfinder > ripple.protocol
|
||||
ripple.perflog > ripple.basics
|
||||
ripple.perflog > ripple.beast
|
||||
ripple.perflog > ripple.core
|
||||
ripple.perflog > ripple.json
|
||||
ripple.perflog > ripple.nodestore
|
||||
ripple.perflog > ripple.protocol
|
||||
ripple.perflog > ripple.rpc
|
||||
ripple.protocol > ripple.beast
|
||||
ripple.protocol > ripple.crypto
|
||||
ripple.protocol > ripple.json
|
||||
ripple.resource > ripple.basics
|
||||
ripple.resource > ripple.beast
|
||||
ripple.resource > ripple.json
|
||||
ripple.resource > ripple.protocol
|
||||
ripple.rpc > ripple.basics
|
||||
ripple.rpc > ripple.beast
|
||||
ripple.rpc > ripple.core
|
||||
ripple.rpc > ripple.crypto
|
||||
@@ -117,8 +127,6 @@ test.core > ripple.server
|
||||
test.core > test.jtx
|
||||
test.core > test.toplevel
|
||||
test.core > test.unit_test
|
||||
test.crypto > ripple.beast
|
||||
test.crypto > ripple.crypto
|
||||
test.csf > ripple.basics
|
||||
test.csf > ripple.beast
|
||||
test.csf > ripple.consensus
|
||||
@@ -154,6 +162,7 @@ test.nodestore > ripple.basics
|
||||
test.nodestore > ripple.beast
|
||||
test.nodestore > ripple.core
|
||||
test.nodestore > ripple.nodestore
|
||||
test.nodestore > ripple.protocol
|
||||
test.nodestore > ripple.unity
|
||||
test.nodestore > test.jtx
|
||||
test.nodestore > test.toplevel
|
||||
@@ -163,6 +172,7 @@ test.overlay > ripple.basics
|
||||
test.overlay > ripple.beast
|
||||
test.overlay > ripple.core
|
||||
test.overlay > ripple.overlay
|
||||
test.overlay > ripple.peerfinder
|
||||
test.overlay > ripple.protocol
|
||||
test.overlay > ripple.shamap
|
||||
test.overlay > test.jtx
|
||||
|
||||
@@ -13,13 +13,18 @@ management tools.
|
||||
|
||||
## Dependencies
|
||||
|
||||
gcc-7 or later is required.
|
||||
gcc-8 or later is required.
|
||||
|
||||
Use `apt-get` to install the dependencies provided by the distribution
|
||||
|
||||
```
|
||||
$ apt-get update
|
||||
$ apt-get install -y gcc g++ wget git cmake pkg-config protobuf-compiler libprotobuf-dev libssl-dev
|
||||
$ apt-get install -y gcc g++ wget git cmake pkg-config libprotoc-dev protobuf-compiler libprotobuf-dev libssl-dev
|
||||
```
|
||||
|
||||
To build the software in reporting mode, install these additional dependencies:
|
||||
```
|
||||
$ apt-get install -y autoconf flex bison
|
||||
```
|
||||
|
||||
Advanced users can choose to install newer versions of gcc, or the clang compiler.
|
||||
@@ -31,9 +36,8 @@ protobuf will give errors.
|
||||
Boost 1.70 or later is required. We recommend downloading and compiling boost
|
||||
with the following process: After changing to the directory where
|
||||
you wish to download and compile boost, run
|
||||
|
||||
```
|
||||
$ wget https://dl.bintray.com/boostorg/release/1.70.0/source/boost_1_70_0.tar.gz
|
||||
$ wget https://boostorg.jfrog.io/artifactory/main/release/1.70.0/source/boost_1_70_0.tar.gz
|
||||
$ tar -xzf boost_1_70_0.tar.gz
|
||||
$ cd boost_1_70_0
|
||||
$ ./bootstrap.sh
|
||||
@@ -139,7 +143,7 @@ testing and running.
|
||||
* `-Dsan=thread` to enable the thread sanitizer with clang
|
||||
* `-Dsan=address` to enable the address sanitizer with clang
|
||||
* `-Dstatic=ON` to enable static linking library dependencies
|
||||
* `-Dreporting=ON` to build code neccessary for reporting mode (defaults to OFF)
|
||||
* `-Dreporting=ON` to build code necessary for reporting mode (defaults to OFF)
|
||||
|
||||
Several other infrequently used options are available - run `ccmake` or
|
||||
`cmake-gui` for a list of all options.
|
||||
@@ -156,7 +160,7 @@ the `-j` parameter in this example tells the build tool to compile several
|
||||
files in parallel. This value should be chosen roughly based on the number of
|
||||
cores you have available and/or want to use for building.
|
||||
|
||||
When the build completes succesfully, you will have a `rippled` executable in
|
||||
When the build completes successfully, you will have a `rippled` executable in
|
||||
the current directory, which can be used to connect to the network (when
|
||||
properly configured) or to run unit tests.
|
||||
|
||||
@@ -235,5 +239,3 @@ change the `/opt/local` module path above to match your chosen installation pref
|
||||
`rippled` builds a set of unit tests into the server executable. To run these unit
|
||||
tests after building, pass the `--unittest` option to the compiled `rippled`
|
||||
executable. The executable will exit with summary info after running the unit tests.
|
||||
|
||||
|
||||
|
||||
@@ -1,231 +1,3 @@
|
||||
# macos Build Instructions
|
||||
|
||||
## Important
|
||||
|
||||
We don't recommend macos for rippled production use at this time. Currently, the
|
||||
Ubuntu platform has received the highest level of quality assurance and
|
||||
testing. That said, macos is suitable for many development/test tasks.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You'll need macos 10.8 or later.
|
||||
|
||||
To clone the source code repository, create branches for inspection or
|
||||
modification, build rippled using clang, and run the system tests you will need
|
||||
these software components:
|
||||
|
||||
* [XCode](https://developer.apple.com/xcode/)
|
||||
* [Homebrew](http://brew.sh/)
|
||||
* [Boost](http://boost.org/)
|
||||
* other misc utilities and libraries installed via homebrew
|
||||
|
||||
## Install Software
|
||||
|
||||
### Install XCode
|
||||
|
||||
If not already installed on your system, download and install XCode using the
|
||||
appstore or by using [this link](https://developer.apple.com/xcode/).
|
||||
|
||||
For more info, see "Step 1: Download and Install the Command Line Tools"
|
||||
[here](http://www.moncefbelyamani.com/how-to-install-xcode-homebrew-git-rvm-ruby-on-mac)
|
||||
|
||||
The command line tools can be installed through the terminal with the command:
|
||||
|
||||
```
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
### Install Homebrew
|
||||
|
||||
> "[Homebrew](http://brew.sh/) installs the stuff you need that Apple didn’t."
|
||||
|
||||
Open a terminal and type:
|
||||
|
||||
```
|
||||
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||
```
|
||||
|
||||
For more info, see "Step 2: Install Homebrew"
|
||||
[here](http://www.moncefbelyamani.com/how-to-install-xcode-homebrew-git-rvm-ruby-on-mac#step-2)
|
||||
|
||||
### Install Dependencies Using Homebrew
|
||||
|
||||
`brew` will generally install the latest stable version of any package, which
|
||||
should satisfy the the minimum version requirements for rippled.
|
||||
|
||||
```
|
||||
brew update
|
||||
brew install git cmake pkg-config protobuf openssl ninja
|
||||
```
|
||||
|
||||
### Build Boost
|
||||
|
||||
Boost 1.70 or later is required.
|
||||
|
||||
We want to compile boost with clang/libc++
|
||||
|
||||
Download [a release](https://dl.bintray.com/boostorg/release/1.70.0/source/boost_1_70_0.tar.bz2)
|
||||
|
||||
Extract it to a folder, making note of where, open a terminal, then:
|
||||
|
||||
```
|
||||
./bootstrap.sh
|
||||
./b2 cxxflags="-std=c++14" visibility=global
|
||||
```
|
||||
|
||||
Create an environment variable `BOOST_ROOT` in one of your `rc` files, pointing
|
||||
to the root of the extracted directory.
|
||||
|
||||
### Dependencies for Building Source Documentation
|
||||
|
||||
Source code documentation is not required for running/debugging rippled. That
|
||||
said, the documentation contains some helpful information about specific
|
||||
components of the application. For more information on how to install and run
|
||||
the necessary components, see [this document](../../docs/README.md)
|
||||
|
||||
## Build
|
||||
|
||||
### Clone the rippled repository
|
||||
|
||||
From a shell:
|
||||
|
||||
```
|
||||
git clone git@github.com:ripple/rippled.git
|
||||
cd rippled
|
||||
```
|
||||
|
||||
For a stable release, choose the `master` branch or one of the tagged releases
|
||||
listed on [GitHub](https://github.com/ripple/rippled/releases GitHub).
|
||||
|
||||
```
|
||||
git checkout master
|
||||
```
|
||||
|
||||
or to test the latest release candidate, choose the `release` branch.
|
||||
|
||||
```
|
||||
git checkout release
|
||||
```
|
||||
|
||||
If you are doing development work and want the latest set of untested
|
||||
features, you can consider using the `develop` branch instead.
|
||||
|
||||
```
|
||||
git checkout develop
|
||||
```
|
||||
|
||||
### Configure Library Paths
|
||||
|
||||
If you didn't persistently set the `BOOST_ROOT` environment variable to the
|
||||
root of the extracted directory above, then you should set it temporarily.
|
||||
|
||||
For example, assuming your username were `Abigail` and you extracted Boost
|
||||
1.70.0 in `/Users/Abigail/Downloads/boost_1_70_0`, you would do for any
|
||||
shell in which you want to build:
|
||||
|
||||
```
|
||||
export BOOST_ROOT=/Users/Abigail/Downloads/boost_1_70_0
|
||||
```
|
||||
|
||||
### Generate and Build
|
||||
|
||||
For simple command line building we recommend using the *Unix Makefile* or
|
||||
*Ninja* generator with cmake. All builds should be done in a separate directory
|
||||
from the source tree root (a subdirectory is fine). For example, from the root
|
||||
of the ripple source tree:
|
||||
|
||||
```
|
||||
mkdir my_build
|
||||
cd my_build
|
||||
```
|
||||
|
||||
followed by:
|
||||
|
||||
```
|
||||
cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug ..
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Debug ..
|
||||
```
|
||||
|
||||
`CMAKE_BUILD_TYPE` can be changed as desired for `Debug` vs.
|
||||
`Release` builds (all four standard cmake build types are supported).
|
||||
|
||||
Once you have generated the build system, you can run the build via cmake:
|
||||
|
||||
```
|
||||
cmake --build . -- -j 4
|
||||
```
|
||||
|
||||
the `-j` parameter in this example tells the build tool to compile several
|
||||
files in parallel. This value should be chosen roughly based on the number of
|
||||
cores you have available and/or want to use for building.
|
||||
|
||||
When the build completes succesfully, you will have a `rippled` executable in
|
||||
the current directory, which can be used to connect to the network (when
|
||||
properly configured) or to run unit tests.
|
||||
|
||||
If you prefer to have an XCode project to use for building, ask CMake to
|
||||
generate that instead:
|
||||
|
||||
```
|
||||
cmake -GXcode ..
|
||||
```
|
||||
|
||||
After generation succeeds, the xcode project file can be opened and used to
|
||||
build/debug. However, just as with other generators, cmake knows how to build
|
||||
using the xcode project as well:
|
||||
|
||||
```
|
||||
cmake --build . -- -jobs 4
|
||||
```
|
||||
|
||||
This will invoke the `xcodebuild` utility to compile the project. See `xcodebuild
|
||||
--help` for details about build options.
|
||||
|
||||
#### Optional installation
|
||||
|
||||
If you'd like to install the artifacts of the build, we have preliminary
|
||||
support for standard CMake installation targets. We recommend explicitly
|
||||
setting the installation location when configuring, e.g.:
|
||||
|
||||
```
|
||||
cmake -DCMAKE_INSTALL_PREFIX=/opt/local ..
|
||||
```
|
||||
|
||||
(change the destination as desired), and then build the `install` target:
|
||||
|
||||
```
|
||||
cmake --build . --target install -- -jobs 4
|
||||
```
|
||||
|
||||
#### Options During Configuration:
|
||||
|
||||
The CMake file defines a number of configure-time options which can be
|
||||
examined by running `cmake-gui` or `ccmake` to generated the build. In
|
||||
particular, the `unity` option allows you to select between the unity and
|
||||
non-unity builds. `unity` builds are faster to compile since they combine
|
||||
multiple sources into a single compiliation unit - this is the default if you
|
||||
don't specify. `nounity` builds can be helpful for detecting include omissions
|
||||
or for finding other build-related issues, but aren't generally needed for
|
||||
testing and running.
|
||||
|
||||
* `-Dunity=ON` to enable/disable unity builds (defaults to ON)
|
||||
* `-Dassert=ON` to enable asserts
|
||||
* `-Djemalloc=ON` to enable jemalloc support for heap checking
|
||||
* `-Dsan=thread` to enable the thread sanitizer with clang
|
||||
* `-Dsan=address` to enable the address sanitizer with clang
|
||||
|
||||
Several other infrequently used options are available - run `ccmake` or
|
||||
`cmake-gui` for a list of all options.
|
||||
|
||||
## Unit Tests (Recommended)
|
||||
|
||||
`rippled` builds a set of unit tests into the server executable. To run these unit
|
||||
tests after building, pass the `--unittest` option to the compiled `rippled`
|
||||
executable. The executable will exit with summary info after running the unit tests.
|
||||
|
||||
# macOS Build Instructions
|
||||
|
||||
[Build and Run rippled on macOS](https://xrpl.org/build-run-rippled-macos.html)
|
||||
|
||||
@@ -1,10 +1,31 @@
|
||||
cmake_minimum_required (VERSION 3.9.0)
|
||||
cmake_minimum_required (VERSION 3.16)
|
||||
|
||||
if (POLICY CMP0074)
|
||||
cmake_policy(SET CMP0074 NEW)
|
||||
endif ()
|
||||
|
||||
project (rippled)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
# make GIT_COMMIT_HASH define available to all sources
|
||||
find_package(Git)
|
||||
if(Git_FOUND)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} describe --always --abbrev=40
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||
if(gch)
|
||||
set(GIT_COMMIT_HASH "${gch}")
|
||||
message(STATUS gch: ${GIT_COMMIT_HASH})
|
||||
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
|
||||
endif()
|
||||
endif() #git
|
||||
|
||||
if (thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
add_compile_options("-stdlib=libc++")
|
||||
add_link_options("-stdlib=libc++")
|
||||
endif()
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake")
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps")
|
||||
|
||||
77
LICENSE
77
LICENSE
@@ -1,77 +0,0 @@
|
||||
The accompanying files under various copyrights.
|
||||
|
||||
Copyright (c) 2012, 2013, 2014 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
The accompanying files incorporate work covered by the following copyright
|
||||
and previous license notice:
|
||||
|
||||
Copyright (c) 2011 Arthur Britto, David Schwartz, Jed McCaleb,
|
||||
Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant
|
||||
|
||||
Some code from Raw Material Software, Ltd., provided under the terms of the
|
||||
ISC License. See the corresponding source files for more details.
|
||||
Copyright (c) 2013 - Raw Material Software Ltd.
|
||||
Please visit http://www.juce.com
|
||||
|
||||
Some code from ASIO examples:
|
||||
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
Some code from Bitcoin:
|
||||
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
||||
// Copyright (c) 2011 The Bitcoin developers
|
||||
// Distributed under the MIT/X11 software license, see the accompanying
|
||||
// file license.txt or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
Some code from Tom Wu:
|
||||
This software is covered under the following copyright:
|
||||
|
||||
/*
|
||||
* Copyright (c) 2003-2005 Tom Wu
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
|
||||
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
|
||||
* INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER
|
||||
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF
|
||||
* THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT
|
||||
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*
|
||||
* In addition, the following condition applies:
|
||||
*
|
||||
* All redistributions must retain an intact copy of this copyright notice
|
||||
* and disclaimer.
|
||||
*/
|
||||
|
||||
Address all questions regarding this license to:
|
||||
|
||||
Tom Wu
|
||||
tjw@cs.Stanford.EDU
|
||||
17
LICENSE.md
Normal file
17
LICENSE.md
Normal file
@@ -0,0 +1,17 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2011, Arthur Britto, David Schwartz, Jed McCaleb, Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant.
|
||||
Copyright (c) 2012-2020, the XRP Ledger developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
103
README.md
103
README.md
@@ -1,18 +1,112 @@
|
||||
# XRP Ledger Side chain Branch
|
||||
|
||||
## Warning
|
||||
|
||||
This is not the main branch of the XRP Ledger. This branch supports side chains
|
||||
on the XRP ledger and integrates an implementation of side chain federators.
|
||||
This is a developers prelease and it should not be used for production or to
|
||||
transfer real value. Consider this "alpha" quality software. There will be bugs.
|
||||
See "Status" for a fuller description.
|
||||
|
||||
The latest production code for the XRP ledger is on the "master" branch.
|
||||
|
||||
Until this branch is merged with the mainline branch, it will periodically be
|
||||
rebased on that branch and force pushed to github.
|
||||
|
||||
## What are side chains?
|
||||
|
||||
Side chains are independent ledgers. They can have their own transaction types,
|
||||
their own UNL list (or even their own consensus algorithm), and their own set of
|
||||
other unique features (like a smart contracts layer). What's special about them
|
||||
is there is a way to transfer assets from the XRP ledger to the side chain, and
|
||||
a way to return those assets back from the side chain to the XRP ledger. Both
|
||||
XRP and issued assets may be exchanged.
|
||||
|
||||
The motivation for creating a side chain is to implement an idea that may not be
|
||||
a great fit for the main XRP ledger, or may take a long time before such a
|
||||
feature is adopted by the main XRP ledger. The advantage of a side chain over a
|
||||
brand new ledger is it allows the side chain to immediate use tokens with real
|
||||
monetary value.
|
||||
|
||||
This implementation is meant to support side chains that are similar to the XRP
|
||||
ledger and use the XRP ledger as the main chain. The idea is to develop a new
|
||||
side chain, first this code will be forked and the new features specific to the
|
||||
new chain will be implemented.
|
||||
|
||||
## Status
|
||||
|
||||
All the functionality needed to build side chains should be complete. However,
|
||||
it has not been well tested or polished.
|
||||
|
||||
In particular, all of the following are built:
|
||||
|
||||
* Cross chain transactions for both XRP and Issued Assets
|
||||
* Refunds if transactions fail
|
||||
* Allowing federators to rejoin a network
|
||||
* Detecting and handling when federators fall too far behind in processing
|
||||
transactions
|
||||
* A python library to easily create configuration files for testing side chains
|
||||
and spin up side chains on a local machine
|
||||
* Python scripts to test side chains
|
||||
* An interactive shell to explore side chain functionality
|
||||
|
||||
The biggest missing pieces are:
|
||||
|
||||
* Testing: While the functionality is there, it has just begun to be tested.
|
||||
There will be bugs. Even horrible and embarrassing bugs. Of course, this will
|
||||
improve as testing progresses.
|
||||
|
||||
* Tooling: There is a python library and an interactive shell that was built to
|
||||
help development. However, these tools are geared to run a test network on a
|
||||
local machine. They are not geared to new users or to production systems.
|
||||
Better tooling is coming.
|
||||
|
||||
* Documentation: There is documentation that describes the technical details of
|
||||
how side chains works, how to run the python scripts to set up side chains on
|
||||
the local machine, and the changes to the configuration files. However, like
|
||||
the tooling, this is not geared to new users or production systems. Better
|
||||
documentation is coming. In particular, good documentation for how to set up a
|
||||
production side chain - or even a test net that doesn't run on a local
|
||||
machine - needs to be written.
|
||||
|
||||
## Getting Started
|
||||
|
||||
See the instructions [here](docs/sidechain/GettingStarted.md) for how to
|
||||
run an interactive shell that will spin up a set of federators on your local
|
||||
machine and allow you to transfer assets between the main chain and a side
|
||||
chain.
|
||||
|
||||
After setting things up and completing a cross chain transaction with the
|
||||
"getting started" script above, it may to useful to browse some other
|
||||
documentation:
|
||||
|
||||
* [This](bin/sidechain/python/README.md) document describes the scripts and
|
||||
python modules used to test and explore side chains on your local machine.
|
||||
|
||||
* [This](docs/sidechain/configFile.md) document describes the new stanzas in the
|
||||
config file needed for side chains.
|
||||
|
||||
* [This](docs/sidechain/federatorAccountSetup.md) document describes how to set
|
||||
up the federator accounts if not using the python scripts.
|
||||
|
||||
* [This](docs/sidechain/design.md) document describes the low-level details for
|
||||
how side chains work.
|
||||
|
||||
# The XRP Ledger
|
||||
|
||||
The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer servers. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator.
|
||||
The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator.
|
||||
|
||||
## XRP
|
||||
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. Its creators gifted 80 billion XRP to a company, now called [Ripple](https://ripple.com/), to develop the XRP Ledger and its ecosystem. Ripple uses XRP to help build the Internet of Value, ushering in a world in which money moves as fast and efficiently as information does today.
|
||||
|
||||
## rippled
|
||||
The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE). The `rippled` server is written primarily in C++ and runs on a variety of platforms.
|
||||
The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html).
|
||||
|
||||
### Build from Source
|
||||
|
||||
* [Linux](Builds/linux/README.md)
|
||||
* [Mac](Builds/macos/README.md)
|
||||
* [Windows](Builds/VisualStudio2017/README.md)
|
||||
* [Mac](Builds/macos/README.md) (Not recommended for production)
|
||||
* [Windows](Builds/VisualStudio2017/README.md) (Not recommended for production)
|
||||
|
||||
## Key Features of the XRP Ledger
|
||||
|
||||
@@ -56,3 +150,4 @@ git-subtree. See those directories' README files for more details.
|
||||
* [XRP Ledger Dev Portal](https://xrpl.org/)
|
||||
* [Setup and Installation](https://xrpl.org/install-rippled.html)
|
||||
* [Source Documentation (Doxygen)](https://ripple.github.io/rippled)
|
||||
* [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi)
|
||||
|
||||
163
RELEASENOTES.md
163
RELEASENOTES.md
@@ -2,13 +2,174 @@
|
||||
|
||||

|
||||
|
||||
This document contains the release notes for `rippled`, the reference server implementation of the Ripple protocol. To learn more about how to build, run or update a `rippled` server, visit https://xrpl.org/install-rippled.html
|
||||
This document contains the release notes for `rippled`, the reference server implementation of the XRP Ledger protocol. To learn more about how to build, run or update a `rippled` server, visit https://xrpl.org/install-rippled.html
|
||||
|
||||
|
||||
Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/ripple/rippled/issues/new/choose)
|
||||
|
||||
# Change log
|
||||
|
||||
- API version 2 will now return `signer_lists` in the root of the `account_info` response, no longer nested under `account_data`.
|
||||
|
||||
# Releases
|
||||
|
||||
## Version 1.8.5
|
||||
This is the 1.8.5 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release includes fixes and updates for stability and security, and improvements to build scripts. There are no user-facing API or protocol changes in this release.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
This release contains the following bug fixes and under-the-hood improvements:
|
||||
|
||||
- **Correct TaggedPointer move constructor:** Fixes a bug in unused code for the TaggedPointer class. The old code would fail if a caller explicitly tried to remove a child that is not actually part of the node. (227a12d)
|
||||
|
||||
- **Ensure protocol buffer prerequisites are present:** The build scripts and packages now properly handle Protobuf packages and various packages. Prior to this change, building on Ubuntu 21.10 Impish Indri would fail unless the `libprotoc-dev` package was installed. (e06465f)
|
||||
|
||||
- **Improve handling of endpoints during peer discovery.** This hardens and improves handling of incoming messages on the peer protocol. (289bc0a)
|
||||
|
||||
- **Run tests on updated linux distros:** Test builds now run on Rocky Linux 8, Fedora 34 and 35, Ubuntu 18, 20, and 22, and Debian 9, 10, and 11. (a9ee802)
|
||||
|
||||
- **Avoid dereferencing empty optional in ReportingETL:** Fixes a bug in Reporting Mode that could dereference an empty optional value when throwing an error. (cdc215d)
|
||||
|
||||
- **Correctly add GIT_COMMIT_HASH into version string:** When building the server from a non-tagged release, the build files now add the commit ID in a way that follows the semantic-versioning standard, and correctly handle the case where the commit hash ID cannot be retrieved. (d23d37f)
|
||||
|
||||
- **Update RocksDB to version 6.27.3:** Updates the version of RocksDB included in the server from 6.7.3 (which was released on 2020-03-18) to 6.27.3 (released 2021-12-10).
|
||||
|
||||
|
||||
|
||||
## Version 1.8.4
|
||||
This is the 1.8.4 release of `rippled`, the reference implementation of the XRP Ledger protocol.
|
||||
|
||||
This release corrects a technical flaw introduced with 1.8.3 that may result in failures if the newly-introduced 'fast loading' is enabled. The release also adjusts default parameters used to configure the pathfinding engine to reduce resource usage.
|
||||
|
||||
### Bug Fixes
|
||||
- **Adjust mutex scope in `walkMapParallel`**: This commit corrects a technical flaw introduced with commit [7c12f0135897361398917ad2c8cda888249d42ae] that would result in undefined behavior if the server operator configured their server to use the 'fast loading' mechanism introduced with 1.8.3.
|
||||
|
||||
- **Adjust pathfinding configuration defaults**: This commit adjusts the default configuration of the pathfinding engine, to account for the size of the XRP Ledger mainnet. Unless explicitly overriden, the changes mean that pathfinding operations will return fewer, shallower paths than previous releases.
|
||||
|
||||
|
||||
## Version 1.8.3
|
||||
This is the 1.8.3 release of `rippled`, the reference implementation of the XRP Ledger protocol.
|
||||
|
||||
This release implements changes that improve the syncing performance of peers on the network, adds countermeasures to several routines involving LZ4 to defend against CVE-2021-3520, corrects a minor technical flaw that would result in the server not using a cache for nodestore operations, and adjusts tunable values to optimize disk I/O.
|
||||
|
||||
### Summary of Issues
|
||||
Recently, servers in the XRP Ledger network have been taking an increasingly long time to sync back to the network after restartiningg. This is one of several releases which will be made to improve on this issue.
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **Parallel ledger loader & I/O performance improvements**: This commit makes several changes that, together, should decrease the time needed for a server to sync to the network. To make full use of this change, `rippled` needs to be using storage with high IOPS and operators need to explicitly enable this behavior by adding the following to their config file, under the `[node_db]` stanza:
|
||||
|
||||
[node_db]
|
||||
...
|
||||
fast_load=1
|
||||
|
||||
Note that when 'fast loading' is enabled the server will not open RPC and WebSocket interfaces until after the initial load is completed. Because of this, it may appear unresponsive or down.
|
||||
|
||||
- **Detect CVE-2021-3520 when decompressing using LZ4**: This commit adds code to detect LZ4 payloads that may result in out-of-bounds memory accesses.
|
||||
|
||||
- **Provide sensible default values for nodestore cache:**: The nodestore includes a built-in cache to reduce the disk I/O load but, by default, this cache was not initialized unless it was explicitly configured by the server operator. This commit introduces sensible defaults based on the server's configured node size.
|
||||
|
||||
- **Adjust the number of concurrent ledger data jobs**: Processing a large amount of data at once can effectively bottleneck a server's I/O subsystem. This commits helps optimize I/O performance by controlling how many jobs can concurrently process ledger data.
|
||||
|
||||
- **Two small SHAMapSync improvements**: This commit makes minor changes to optimize the way memory is used and control the amount of background I/O performed when attempting to fetch missing `SHAMap` nodes.
|
||||
|
||||
## Version 1.8.2
|
||||
Ripple has released version 1.8.2 of rippled, the reference server implementation of the XRP Ledger protocol. This release addresses the full transaction queues and elevated transaction fees issue observed on the XRP ledger, and also provides some optimizations and small fixes to improve the server's performance overall.
|
||||
|
||||
### Summary of Issues
|
||||
Recently, servers in the XRP Ledger network have had full transaction queues and transactions paying low fees have mostly not been able to be confirmed through the queue. After investigation, it was discovered that a large influx of transactions to the network caused it to raise the transaction costs to be proposed in the next ledger block, and defer transactions paying lower costs to later ledgers. The first part worked as designed, but deferred transactions were not being confirmed as the ledger had capacity to process them.
|
||||
|
||||
The root cause was that there were very many low-cost transactions that different servers in the network received in a different order due to incidental differences in timing or network topology, which caused validators to propose different sets of low-cost transactions from the queue. Since none of these transactions had support from a majority of validators, they were removed from the proposed transaction set. Normally, any transactions removed from a proposed transaction set are supposed to be retried in the next ledger, but servers attempted to put these deferred transactions into their transaction queues first, which had filled up. As a result, the deferred transactions were discarded, and the network was only able to confirm transactions that paid high costs.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **Address elevated transaction fees**: This change addresses the full queue problems in two ways. First, it puts deferred transactions directly into the open ledger, rather than transaction queue. This reverts a subset of the changes from [ximinez@62127d7](https://github.com/ximinez/rippled/commit/62127d725d801641bfaa61dee7d88c95e48820c5). A transaction that is in the open ledger but doesn't get validated should stay in the open ledger so that it can be proposed again right away. Second, it changes the order in which transactions are pulled from the transaction queue to increase the overlap in servers' initial transaction consensus proposals. Like the old rules, transactions paying higher fee levels are selected first. Unlike the old rules, transactions paying the same fee level are ordered by transaction ID / hash ascending. (Previously, transactions paying the same fee level were unsorted, resulting in each server having a different order.)
|
||||
|
||||
- **Add ignore_default option to account_lines API**: This flag, if present, suppresses the output of incoming trust lines in the default state. This is primarily motivated by observing that users often have many unwanted incoming trust lines in a default state, which are not useful in the vast majority of cases. Being able to suppress those when doing `account_lines` saves bandwidth and resources. ([#3980](https://github.com/ripple/rippled/pull/3980))
|
||||
|
||||
- **Make I/O and prefetch worker threads configurable**: This commit adds the ability to specify **io_workers** and **prefetch_workers** in the config file which can be used to specify the number of threads for processing raw inbound and outbound IO and configure the number of threads for performing node store prefetching. ([#3994](https://github.com/ripple/rippled/pull/3994))
|
||||
|
||||
- **Enforce account RPC limits by objects traversed**: This changes the way the account_objects API method counts and limits the number of objects it returns. Instead of limiting results by the number of objects found, it counts by the number of objects traversed. Additionally, the default and maximum limits for non-admin connections have been decreased. This reduces the amount of work that one API call can do so that public API servers can share load more effectively. ([#4032](https://github.com/ripple/rippled/pull/4032))
|
||||
|
||||
- **Fix a crash on shutdown**: The NuDB backend class could throw an error in its destructor, resulting in a crash while the server was shutting down gracefully. This crash was harmless but resulted in false alarms and noise when tracking down other possible crashes. ([#4017](https://github.com/ripple/rippled/pull/4017))
|
||||
|
||||
- **Improve reporting of job queue in admin server_info**: The server_info command, when run with admin permissions, provides information about jobs in the server's job queue. This commit provides more descriptive names and more granular categories for many jobs that were previously all identified as "clientCommand". ([#4031](https://github.com/ripple/rippled/pull/4031))
|
||||
|
||||
- **Improve full & compressed inner node deserialization**: Remove a redundant copy operation from low-level SHAMap deserialization. ([#4004](https://github.com/ripple/rippled/pull/4004))
|
||||
|
||||
- **Reporting mode: only forward to P2P nodes that are synced**: Previously, reporting mode servers forwarded to any of their configured P2P nodes at random. This commit improves the selection so that it only chooses from P2P nodes that are fully synced with the network. ([#4028](https://github.com/ripple/rippled/pull/4028))
|
||||
|
||||
- **Improve handling of HTTP X-Forwarded-For and Forwarded headers**: Fixes the way the server handles IPv6 addresses in these HTTP headers. ([#4009](https://github.com/ripple/rippled/pull/4009), [#4030](https://github.com/ripple/rippled/pull/4030))
|
||||
|
||||
- **Other minor improvements to logging and Reporting Mode.**
|
||||
|
||||
|
||||
## Version 1.8.0
|
||||
Ripple has released version 1.8.0 of rippled, the reference server implementation of the XRP Ledger protocol. This release brings several features and improvements.
|
||||
|
||||
### New and Improved Features
|
||||
|
||||
- **Improve History Sharding**: Shards of ledger history are now assembled in a deterministic way so that any server can make a binary-identical shard for a given range of ledgers. This makes it possible to retrieve a shard from multiple sources in parallel, then verify its integrity by comparing checksums with peers' checksums for the same shard. Additionally, there's a new admin RPC command to import ledger history from the shard store, and the crawl_shards command has been expanded with more info. ([#2688](https://github.com/ripple/rippled/issues/2688), [#3726](https://github.com/ripple/rippled/pull/3726), [#3875](https://github.com/ripple/rippled/pull/3875))
|
||||
- **New CheckCashMakesTrustLine Amendment**: If enabled, this amendment will change the CheckCash transaction type so that cashing a check for an issued token automatically creates a trust line to hold the token, similar to how purchasing a token in the decentralized exchange creates a trust line to hold the token. This change provides a way for issuers to send tokens to a user before that user has set up a trust line, but without forcing anyone to hold tokens they don't want. ([#3823](https://github.com/ripple/rippled/pull/3823))
|
||||
- **Automatically determine the node size**: The server now selects an appropriate `[node_size]` configuration value by default if it is not explicitly specified. This parameter tunes various settings to the specs of the hardware that the server is running on, especially the amount of RAM and the number of CPU threads available in the system. Previously the server always chose the smallest value by default.
|
||||
- **Improve transaction relaying logic**: Previously, the server relayed every transaction to all its peers (except the one that it received the transaction from). To reduce redundant messages, the server now relays transactions to a subset of peers using a randomized algorithm. Peers can determine whether there are transactions they have not seen and can request them from a peer that has them. It is expected that this feature will further reduce the bandwidth needed to operate a server.
|
||||
- **Improve the Byzantine validator detector**: This expands the detection capabilities of the Byzantine validation detector. Previously, the server only monitored validators on its own UNL. Now, the server monitors for Byzantine behavior in all validations it sees.
|
||||
- **Experimental tx stream with history for sidechains**: Adds an experimental subscription stream for sidechain federators to track messages on the main chain in canonical order. This stream is expected to change or be replaced in future versions as work on sidechains matures.
|
||||
- **Support Debian 11 Bullseye**: This is the first release that is compatible with Debian Linux version 11.x, "Bullseye." The .deb packages now use absolute paths only, for compatibility with Bullseye's stricter package requirements. ([#3909](https://github.com/ripple/rippled/pull/3909))
|
||||
- **Improve Cache Performance**: The server uses a new storage structure for several in-memory caches for greatly improved overall performance. The process of purging old data from these caches, called "sweeping", was time-consuming and blocked other important activities necessary for maintaining ledger state and participating in consensus. The new structure divides the caches into smaller partitions that can be swept in parallel.
|
||||
- **Amendment default votes:** Introduces variable default votes per amendment. Previously the server always voted "yes" on any new amendment unless an admin explicitly configured a voting preference for that amendment. Now the server's default vote can be "yes" or "no" in the source code. This should allow a safer, more gradual roll-out of new amendments, as new releases can be configured to understand a new amendment but not vote for it by default. ([#3877](https://github.com/ripple/rippled/pull/3877))
|
||||
- **More fields in the `validations` stream:** The `validations` subscription stream in the API now reports additional fields that were added to validation messages by the HardenedValidations amendment. These fields make it easier to detect misconfigurations such as multiple servers sharing a validation key pair. ([#3865](https://github.com/ripple/rippled/pull/3865))
|
||||
- **Reporting mode supports `validations` and `manifests` streams:** In the API it is now possible to connect to these streams when connected to a servers running in reporting. Previously, attempting to subscribe to these streams on a reporting server failed with the error `reportingUnsupported`. ([#3905](https://github.com/ripple/rippled/pull/3905))
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **Clarify the safety of NetClock::time_point arithmetic**: * NetClock::rep is uint32_t and can be error-prone when used with subtraction. * Fixes [#3656](https://github.com/ripple/rippled/pull/3656)
|
||||
- **Fix out-of-bounds reserve, and some minor optimizations**
|
||||
- **Fix nested locks in ValidatorSite**
|
||||
- **Fix clang warnings about copies vs references**
|
||||
- **Fix reporting mode build issue**
|
||||
- **Fix potential deadlock in Validator sites**
|
||||
- **Use libsecp256k1 instead of OpenSSL for key derivation**: The deterministic key derivation code was still using calls to OpenSSL. This replaces the OpenSSL-based routines with new libsecp256k1-based implementations
|
||||
- **Improve NodeStore to ShardStore imports**: This runs the import process in a background thread while preventing online_delete from removing ledgers pending import
|
||||
- **Simplify SHAMapItem construction**: The existing class offered several constructors which were mostly unnecessary. This eliminates all existing constructors and introduces a single new one, taking a `Slice`. The internal buffer is switched from `std::vector` to `Buffer` to save a minimum of 8 bytes (plus the buffer slack that is inherent in `std::vector`) per SHAMapItem instance.
|
||||
- **Redesign stoppable objects**: Stoppable is no longer an abstract base class, but a pattern, modeled after the well-understood `std::thread`. The immediate benefits are less code, less synchronization, less runtime work, and (subjectively) more readable code. The end goal is to adhere to RAII in our object design, and this is one necessary step on that path.
|
||||
|
||||
## Version 1.7.3
|
||||
|
||||
This is the 1.7.3 release of `rippled`, the reference implementation of the XRP Ledger protocol. This release addresses an OOB memory read identified by Guido Vranken, as well as an unrelated issue identified by the Ripple C++ team that could result in incorrect use of SLEs. Additionally, this version also introduces the `NegativeUNL` amendment, which corresponds to the feature which was introduced with the 1.6.0 release.
|
||||
|
||||
## Action Required
|
||||
|
||||
If you operate an XRP Ledger server, then you should upgrade to version 1.7.3 at your earliest convenience to mitigate the issues addressed in this hotfix. If a sufficient majority of servers on the network upgrade, the `NegativeUNL` amendment may gain a majority, at which point a two week activation countdown will begin. If the `NegativeUNL` amendment activates, servers running versions of `rippled` prior to 1.7.3 will become [amendment blocked](https://xrpl.org/amendments.html#amendment-blocked).
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **Improve SLE usage in check cashing**: Fixes a situation which could result in the incorrect use of SLEs.
|
||||
- **Address OOB in base58 decoder**: Corrects a technical flaw that could allow an out-of-bounds memory read in the Base58 decoder.
|
||||
- **Add `NegativeUNL` as a supported amendment**: Introduces an amendment for the Negative UNL feature introduced in `rippled` 1.6.0.
|
||||
|
||||
## Version 1.7.2
|
||||
|
||||
This the 1.7.2 release of rippled, the reference server implementation of the XRP Ledger protocol. This release protects against the security issue [CVE-2021-3499](https://www.openssl.org/news/secadv/20210325.txt) affecting OpenSSL, adds an amendment to fix an issue with small offers not being properly removed from order books in some cases, and includes various other minor fixes.
|
||||
Version 1.7.2 supersedes version 1.7.1 and adds fixes for more issues that were discovered during the release cycle.
|
||||
|
||||
## Action Required
|
||||
|
||||
This release introduces a new amendment to the XRP Ledger protocol: `fixRmSmallIncreasedQOffers`. This amendments is now open for voting according to the XRP Ledger's amendment process, which enables protocol changes following two weeks of >80% support from trusted validators.
|
||||
If you operate an XRP Ledger server, then you should upgrade to version 1.7.2 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network.
|
||||
If you operate an XRP Ledger validator, please learn more about this amendment so you can make informed decisions about how your validator votes. If you take no action, your validator begins voting in favor of any new amendments as soon as it has been upgraded.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **fixRmSmallIncreasedQOffers Amendment:** This amendment fixes an issue where certain small offers can be left at the tip of an order book without being consumed or removed when appropriate and causes some payments and Offers to fail when they should have succeeded [(#3827)](https://github.com/ripple/rippled/pull/3827).
|
||||
- **Adjust OpenSSL defaults and mitigate CVE-2021-3499:** Prior to this fix, servers compiled against a vulnerable version of OpenSSL could have a crash triggered by a malicious network connection. This fix disables renegotiation support in OpenSSL so that the rippled server is not vulnerable to this bug regardless of the OpenSSL version used to compile the server. This also removes support for deprecated TLS versions 1.0 and 1.1 and ciphers that are not part of TLS 1.2 [(#79e69da)](https://github.com/ripple/rippled/pull/3843/commits/79e69da3647019840dca49622621c3d88bc3883f).
|
||||
- **Support HTTP health check in reporting mode:** Enables the Health Check special method when running the server in the new Reporting Mode introduced in 1.7.0 [(9c8cadd)](https://github.com/ripple/rippled/pull/3843/commits/9c8caddc5a197bdd642556f8beb14f06d53cdfd3).
|
||||
- **Maintain compatibility for forwarded RPC responses:** Fixes a case in API responses from servers in Reporting Mode, where requests that were forwarded to a P2P-mode server would have the result field nested inside another result field [(8579eb0)](https://github.com/ripple/rippled/pull/3843/commits/8579eb0c191005022dcb20641444ab471e277f67).
|
||||
- **Add load_factor in reporting mode:** Adds a load_factor value to the server info method response when running the server in Reporting Mode so that the response is compatible with the format returned by servers in P2P mode (the default) [(430802c)](https://github.com/ripple/rippled/pull/3843/commits/430802c1cf6d4179f2249a30bfab9eff8e1fa748).
|
||||
- **Properly encode metadata from tx RPC command:** Fixes a problem where transaction metadata in the tx API method response would be in JSON format even when binary was requested [(7311629)](https://github.com/ripple/rippled/pull/3843/commits/73116297aa94c4acbfc74c2593d1aa2323b4cc52).
|
||||
- **Updates to Windows builds:** When building on Windows, use vcpkg 2021 by default and add compatibility with MSVC 2019 [(36fe196)](https://github.com/ripple/rippled/pull/3843/commits/36fe1966c3cd37f668693b5d9910fab59c3f8b1f), [(30fd458)](https://github.com/ripple/rippled/pull/3843/commits/30fd45890b1d3d5f372a2091d1397b1e8e29d2ca).
|
||||
|
||||
## Version 1.7.0
|
||||
|
||||
Ripple has released version 1.7.0 of `rippled`, the reference server implementation of the XRP Ledger protocol.
|
||||
|
||||
@@ -20,7 +20,7 @@ else
|
||||
if [[ -d "${VCPKG_DIR}" ]] ; then
|
||||
rm -rf "${VCPKG_DIR}"
|
||||
fi
|
||||
git clone --branch 2019.12 https://github.com/Microsoft/vcpkg.git ${VCPKG_DIR}
|
||||
git clone --branch 2021.04.30 https://github.com/Microsoft/vcpkg.git ${VCPKG_DIR}
|
||||
pushd ${VCPKG_DIR}
|
||||
BSARGS=()
|
||||
if [[ "$(uname)" == "Darwin" ]] ; then
|
||||
|
||||
183
bin/sidechain/python/README.md
Normal file
183
bin/sidechain/python/README.md
Normal file
@@ -0,0 +1,183 @@
|
||||
## Introduction
|
||||
|
||||
This directory contains python scripts to tests and explore side chains.
|
||||
|
||||
See the instructions [here](docs/sidechain/GettingStarted.md) for how to install
|
||||
the necessary dependencies and run an interactive shell that will spin up a set
|
||||
of federators on your local machine and allow you to transfer assets between the
|
||||
main chain and a side chain.
|
||||
|
||||
For all these scripts, make sure the `RIPPLED_MAINCHAIN_EXE`,
|
||||
`RIPPLED_SIDECHAIN_EXE`, and `RIPPLED_SIDECHAIN_CFG_DIR` environment variables
|
||||
are correctly set, and the side chain configuration files exist. Also make sure the python
|
||||
dependencies are installed and the virtual environment is activated.
|
||||
|
||||
Note: the unit tests do not use the configuration files, so the `RIPPLED_SIDECHAIN_CFG_DIR` is
|
||||
not needed for that script.
|
||||
|
||||
## Unit tests
|
||||
|
||||
The "tests" directory contains a simple unit test. It take several minutes to
|
||||
run, and will create the necessary configuration files, start a test main chain
|
||||
in standalone mode, and a test side chain with 5 federators, and do some simple
|
||||
cross chain transactions. Side chains do not yet have extensive tests. Testing
|
||||
is being actively worked on.
|
||||
|
||||
To run the tests, change directories to the `bin/sidechain/python/tests` directory and type:
|
||||
```
|
||||
pytest
|
||||
```
|
||||
|
||||
To capture logging information and to set the log level (to help with debugging), type this instead:
|
||||
```
|
||||
pytest --log-file=log.txt --log-file-level=info
|
||||
```
|
||||
|
||||
The response should be something like the following:
|
||||
```
|
||||
============================= test session starts ==============================
|
||||
platform linux -- Python 3.8.5, pytest-6.2.5, py-1.10.0, pluggy-1.0.0
|
||||
rootdir: /home/swd/projs/ripple/mine/bin/sidechain/python/tests
|
||||
collected 1 item
|
||||
|
||||
simple_xchain_transfer_test.py . [100%]
|
||||
|
||||
======================== 1 passed in 215.20s (0:03:35) =========================
|
||||
|
||||
```
|
||||
|
||||
## Scripts
|
||||
### riplrepl.py
|
||||
|
||||
This is an interactive shell for experimenting with side chains. It will spin up
|
||||
a test main chain running in standalone mode, and a test side chain with five
|
||||
federators - all running on the local machine. There are commands to make
|
||||
payments within a chain, make cross chain payments, check balances, check server
|
||||
info, and check federator info. There is a simple "help" system, but more
|
||||
documentation is needed for this tool (or more likely we need to replace this
|
||||
with some web front end).
|
||||
|
||||
Note: a "repl" is another name for an interactive shell. It stands for
|
||||
"read-eval-print-loop". It is pronounced "rep-ul".
|
||||
|
||||
### create_config_file.py
|
||||
|
||||
This is a script used to create the config files needed to run a test side chain
|
||||
on your local machine. To run this, make sure the rippled is built,
|
||||
`RIPPLED_MAINCHAIN_EXE`, `RIPPLED_SIDECHAIN_EXE`, and
|
||||
`RIPPLED_SIDECHAIN_CFG_DIR` environment variables are correctly set, and the
|
||||
side chain configuration files exist. Also make sure the python dependencies are
|
||||
installed and the virtual environment is activated. Running this will create
|
||||
config files in the directory specified by the `RIPPLED_SIDECHAIN_CFG_DIR`
|
||||
environment variable.
|
||||
|
||||
### log_analyzer.py
|
||||
|
||||
This is a script used to take structured log files and convert them to json for easier debugging.
|
||||
|
||||
## Python modules
|
||||
|
||||
### sidechain.py
|
||||
|
||||
A python module that can be used to write python scripts to interact with
|
||||
side chains. This is used to write unit tests and the interactive shell. To write
|
||||
a standalone script, look at how the tests are written in
|
||||
`test/simple_xchain_transfer_test.py`. The idea is to call
|
||||
`sidechain._multinode_with_callback`, which sets up the two chains, and place
|
||||
your code in the callback. For example:
|
||||
|
||||
```
|
||||
def multinode_test(params: Params):
|
||||
def callback(mc_app: App, sc_app: App):
|
||||
my_function(mc_app, sc_app, params)
|
||||
|
||||
sidechain._multinode_with_callback(params,
|
||||
callback,
|
||||
setup_user_accounts=False)
|
||||
```
|
||||
|
||||
The functions `sidechain.main_to_side_transfer` and
|
||||
`sidechain.side_to_main_transfer` can be used as convenience functions to initiate
|
||||
cross chain transfers. Of course, these transactions can also be initiated with
|
||||
a payment to the door account with the memo data set to the destination account
|
||||
on the destination chain (which is what those convenience functions do under the
|
||||
hood).
|
||||
|
||||
Transactions execute asynchonously. Use the function
|
||||
`test_utils.wait_for_balance_change` to ensure a transaction has completed.
|
||||
|
||||
### transaction.py
|
||||
|
||||
A python module for transactions. Currently there are transactions for:
|
||||
|
||||
* Payment
|
||||
* Trust (trust set)
|
||||
* SetRegularKey
|
||||
* SignerLisetSet
|
||||
* AccountSet
|
||||
* Offer
|
||||
* Ticket
|
||||
* Hook (experimental - useful paying with the hook amendment from XRPL Labs).
|
||||
|
||||
Typically, a transaction is submitted through the call operator on an `App` object. For example, to make a payment from the account `alice` to the account `bob` for 500 XRP:
|
||||
```
|
||||
mc_app(Payment(account=alice, dst=bob, amt=XRP(500)))
|
||||
```
|
||||
(where mc_app is an App object representing the main chain).
|
||||
|
||||
### command.py
|
||||
|
||||
A python module for RPC commands. Currently there are commands for:
|
||||
* PathFind
|
||||
* Sign
|
||||
* LedgerAccept (for standalone mode)
|
||||
* Stop
|
||||
* LogLevel
|
||||
* WalletPropose
|
||||
* ValidationCreate
|
||||
* AccountInfo
|
||||
* AccountLines
|
||||
* AccountTx
|
||||
* BookOffers
|
||||
* BookSubscription
|
||||
* ServerInfo
|
||||
* FederatorInfo
|
||||
* Subscribe
|
||||
|
||||
### common.py
|
||||
|
||||
Python module for common ledger objects, including:
|
||||
* Account
|
||||
* Asset
|
||||
* Path
|
||||
* Pathlist
|
||||
|
||||
### app.py
|
||||
|
||||
Python module for an application. An application is responsible for local
|
||||
network (or single server) and an address book that maps aliases to accounts.
|
||||
|
||||
### config_file.py
|
||||
|
||||
Python module representing a config file that is read from disk.
|
||||
|
||||
### interactive.py
|
||||
|
||||
Python module with the implementation of the RiplRepl interactive shell.
|
||||
|
||||
### ripple_client.py
|
||||
|
||||
A python module representing a rippled server.
|
||||
|
||||
### testnet.py
|
||||
|
||||
A python module representing a rippled testnet running on the local machine.
|
||||
|
||||
## Other
|
||||
### requirements.txt
|
||||
|
||||
These are the python dependencies needed by the scripts. Use `pip3 install -r
|
||||
requirements.txt` to install them. A python virtual environment is recommended.
|
||||
See the instructions [here](docs/sidechain/GettingStarted.md) for how to install
|
||||
the dependencies.
|
||||
|
||||
624
bin/sidechain/python/app.py
Normal file
624
bin/sidechain/python/app.py
Normal file
@@ -0,0 +1,624 @@
|
||||
from contextlib import contextmanager
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import time
|
||||
from typing import Callable, Dict, List, Optional, Set, Union
|
||||
|
||||
from ripple_client import RippleClient
|
||||
from common import Account, Asset, XRP
|
||||
from command import AccountInfo, AccountLines, BookOffers, Command, FederatorInfo, LedgerAccept, Sign, Submit, SubscriptionCommand, WalletPropose
|
||||
from config_file import ConfigFile
|
||||
import testnet
|
||||
from transaction import Payment, Transaction
|
||||
|
||||
|
||||
class KeyManager:
|
||||
def __init__(self):
|
||||
self._aliases = {} # alias -> account
|
||||
self._accounts = {} # account id -> account
|
||||
|
||||
def add(self, account: Account) -> bool:
|
||||
if account.nickname:
|
||||
self._aliases[account.nickname] = account
|
||||
self._accounts[account.account_id] = account
|
||||
|
||||
def is_alias(self, name: str):
|
||||
return name in self._aliases
|
||||
|
||||
def account_from_alias(self, name: str) -> Account:
|
||||
assert name in self._aliases
|
||||
return self._aliases[name]
|
||||
|
||||
def known_accounts(self) -> List[Account]:
|
||||
return list(self._accounts.values())
|
||||
|
||||
def account_id_dict(self) -> Dict[str, Account]:
|
||||
return self._accounts
|
||||
|
||||
def alias_or_account_id(self, id: Union[Account, str]) -> str:
|
||||
'''
|
||||
return the alias if it exists, otherwise return the id
|
||||
'''
|
||||
if isinstance(id, Account):
|
||||
return id.alias_or_account_id()
|
||||
|
||||
if id in self._accounts:
|
||||
return self._accounts[id].nickname
|
||||
return id
|
||||
|
||||
def alias_to_account_id(self, alias: str) -> Optional[str]:
|
||||
if id in self._aliases:
|
||||
return self._aliases[id].account_id
|
||||
return None
|
||||
|
||||
def to_string(self, nickname: Optional[str] = None):
|
||||
names = []
|
||||
account_ids = []
|
||||
if nickname:
|
||||
names = [nickname]
|
||||
if nickname in self._aliases:
|
||||
account_ids = [self._aliases[nickname].account_id]
|
||||
else:
|
||||
account_id = ['NA']
|
||||
else:
|
||||
for (k, v) in self._aliases.items():
|
||||
names.append(k)
|
||||
account_ids.append(v.account_id)
|
||||
# use a dataframe to get a nice table output
|
||||
df = pd.DataFrame(data={'name': names, 'id': account_ids})
|
||||
return f'{df.to_string(index=False)}'
|
||||
|
||||
|
||||
class AssetAliases:
|
||||
def __init__(self):
|
||||
self._aliases = {} # alias -> asset
|
||||
|
||||
def add(self, asset: Asset, name: str):
|
||||
self._aliases[name] = asset
|
||||
|
||||
def is_alias(self, name: str):
|
||||
return name in self._aliases
|
||||
|
||||
def asset_from_alias(self, name: str) -> Asset:
|
||||
assert name in self._aliases
|
||||
return self._aliases[name]
|
||||
|
||||
def known_aliases(self) -> List[str]:
|
||||
return list(self._aliases.keys())
|
||||
|
||||
def known_assets(self) -> List[Asset]:
|
||||
return list(self._aliases.values())
|
||||
|
||||
def to_string(self, nickname: Optional[str] = None):
|
||||
names = []
|
||||
currencies = []
|
||||
issuers = []
|
||||
if nickname:
|
||||
names = [nickname]
|
||||
if nickname in self._aliases:
|
||||
v = self._aliases[nickname]
|
||||
currencies = [v.currency]
|
||||
iss = v.issuer if v.issuer else ''
|
||||
issuers = [v.issuer if v.issuer else '']
|
||||
else:
|
||||
currencies = ['NA']
|
||||
issuers = ['NA']
|
||||
else:
|
||||
for (k, v) in self._aliases.items():
|
||||
names.append(k)
|
||||
currencies.append(v.currency)
|
||||
issuers.append(v.issuer if v.issuer else '')
|
||||
# use a dataframe to get a nice table output
|
||||
df = pd.DataFrame(data={
|
||||
'name': names,
|
||||
'currency': currencies,
|
||||
'issuer': issuers
|
||||
})
|
||||
return f'{df.to_string(index=False)}'
|
||||
|
||||
|
||||
class App:
|
||||
'''App to to interact with rippled servers'''
|
||||
def __init__(self,
|
||||
*,
|
||||
standalone: bool,
|
||||
network: Optional[testnet.Network] = None,
|
||||
client: Optional[RippleClient] = None):
|
||||
if network and client:
|
||||
raise ValueError('Cannot specify both a testnet and client in App')
|
||||
if not network and not client:
|
||||
raise ValueError('Must specify a testnet or a client in App')
|
||||
|
||||
self.standalone = standalone
|
||||
self.network = network
|
||||
|
||||
if client:
|
||||
self.client = client
|
||||
else:
|
||||
self.client = self.network.get_client(0)
|
||||
|
||||
self.key_manager = KeyManager()
|
||||
self.asset_aliases = AssetAliases()
|
||||
root_account = Account(nickname='root',
|
||||
account_id='rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh',
|
||||
secret_key='masterpassphrase')
|
||||
self.key_manager.add(root_account)
|
||||
|
||||
def shutdown(self):
|
||||
if self.network:
|
||||
self.network.shutdown()
|
||||
else:
|
||||
self.client.shutdown()
|
||||
|
||||
def send_signed(self, txn: Transaction) -> dict:
|
||||
'''Sign then send the given transaction'''
|
||||
if not txn.account.secret_key:
|
||||
raise ValueError('Cannot sign transaction without secret key')
|
||||
r = self(Sign(txn.account.secret_key, txn.to_cmd_obj()))
|
||||
raw_signed = r['tx_blob']
|
||||
r = self(Submit(raw_signed))
|
||||
logging.info(f'App.send_signed: {json.dumps(r, indent=1)}')
|
||||
return r
|
||||
|
||||
def send_command(self, cmd: Command) -> dict:
|
||||
'''Send the command to the rippled server'''
|
||||
r = self.client.send_command(cmd)
|
||||
logging.info(
|
||||
f'App.send_command : {cmd.cmd_name()} : {json.dumps(r, indent=1)}')
|
||||
return r
|
||||
|
||||
# Need async version to close ledgers from async functions
|
||||
async def async_send_command(self, cmd: Command) -> dict:
|
||||
'''Send the command to the rippled server'''
|
||||
return await self.client.async_send_command(cmd)
|
||||
|
||||
def send_subscribe_command(
|
||||
self,
|
||||
cmd: SubscriptionCommand,
|
||||
callback: Optional[Callable[[dict], None]] = None) -> dict:
|
||||
'''Send the subscription command to the rippled server. If already subscribed, it will unsubscribe'''
|
||||
return self.client.send_subscribe_command(cmd, callback)
|
||||
|
||||
def get_pids(self) -> List[int]:
|
||||
if self.network:
|
||||
return self.network.get_pids()
|
||||
if pid := self.client.get_pid():
|
||||
return [pid]
|
||||
|
||||
def get_running_status(self) -> List[bool]:
|
||||
if self.network:
|
||||
return self.network.get_running_status()
|
||||
if self.client.get_pid():
|
||||
return [True]
|
||||
else:
|
||||
return [False]
|
||||
|
||||
# Get a dict of the server_state, validated_ledger_seq, and complete_ledgers
|
||||
def get_brief_server_info(self) -> dict:
|
||||
if self.network:
|
||||
return self.network.get_brief_server_info()
|
||||
else:
|
||||
ret = {}
|
||||
for (k, v) in self.client.get_brief_server_info().items():
|
||||
ret[k] = [v]
|
||||
return ret
|
||||
|
||||
def servers_start(self,
|
||||
server_indexes: Optional[Union[Set[int],
|
||||
List[int]]] = None,
|
||||
*,
|
||||
extra_args: Optional[List[List[str]]] = None):
|
||||
if self.network:
|
||||
return self.network.servers_start(server_indexes,
|
||||
extra_args=extra_args)
|
||||
else:
|
||||
raise ValueError('Cannot start stand alone server')
|
||||
|
||||
def servers_stop(self,
|
||||
server_indexes: Optional[Union[Set[int],
|
||||
List[int]]] = None):
|
||||
if self.network:
|
||||
return self.network.servers_stop(server_indexes)
|
||||
else:
|
||||
raise ValueError('Cannot stop stand alone server')
|
||||
|
||||
def federator_info(self,
|
||||
server_indexes: Optional[Union[Set[int],
|
||||
List[int]]] = None):
|
||||
# key is server index. value is federator_info result
|
||||
result_dict = {}
|
||||
if self.network:
|
||||
if not server_indexes:
|
||||
server_indexes = [
|
||||
i for i in range(self.network.num_clients())
|
||||
if self.network.is_running(i)
|
||||
]
|
||||
for i in server_indexes:
|
||||
if self.network.is_running(i):
|
||||
result_dict[i] = self.network.get_client(i).send_command(
|
||||
FederatorInfo())
|
||||
else:
|
||||
if 0 in server_indexes:
|
||||
result_dict[0] = self.client.send_command(FederatorInfo())
|
||||
return result_dict
|
||||
|
||||
def __call__(self,
|
||||
to_send: Union[Transaction, Command, SubscriptionCommand],
|
||||
callback: Optional[Callable[[dict], None]] = None,
|
||||
*,
|
||||
insert_seq_and_fee=False) -> dict:
|
||||
'''Call `send_signed` for transactions or `send_command` for commands'''
|
||||
if isinstance(to_send, SubscriptionCommand):
|
||||
return self.send_subscribe_command(to_send, callback)
|
||||
assert callback is None
|
||||
if isinstance(to_send, Transaction):
|
||||
if insert_seq_and_fee:
|
||||
self.insert_seq_and_fee(to_send)
|
||||
return self.send_signed(to_send)
|
||||
if isinstance(to_send, Command):
|
||||
return self.send_command(to_send)
|
||||
raise ValueError(
|
||||
'Expected `to_send` to be either a Transaction, Command, or SubscriptionCommand'
|
||||
)
|
||||
|
||||
def get_configs(self) -> List[str]:
|
||||
if self.network:
|
||||
return self.network.get_configs()
|
||||
return [self.client.config]
|
||||
|
||||
def create_account(self, name: str) -> Account:
|
||||
''' Create an account. Use the name as the alias. '''
|
||||
if name == 'root':
|
||||
return
|
||||
assert not self.key_manager.is_alias(name)
|
||||
|
||||
account = Account(nickname=name, result_dict=self(WalletPropose()))
|
||||
self.key_manager.add(account)
|
||||
return account
|
||||
|
||||
def create_accounts(self,
|
||||
names: List[str],
|
||||
funding_account: Union[Account, str] = 'root',
|
||||
amt: Union[int, Asset] = 1000000000) -> List[Account]:
|
||||
'''Fund the accounts with nicknames 'names' by using the funding account and amt'''
|
||||
accounts = [self.create_account(n) for n in names]
|
||||
if not isinstance(funding_account, Account):
|
||||
org_funding_account = funding_account
|
||||
funding_account = self.key_manager.account_from_alias(
|
||||
funding_account)
|
||||
if not isinstance(funding_account, Account):
|
||||
raise ValueError(
|
||||
f'Could not find funding account {org_funding_account}')
|
||||
if not isinstance(amt, Asset):
|
||||
assert isinstance(amt, int)
|
||||
amt = Asset(value=amt)
|
||||
for a in accounts:
|
||||
p = Payment(account=funding_account, dst=a, amt=amt)
|
||||
self.send_signed(p)
|
||||
return accounts
|
||||
|
||||
def maybe_ledger_accept(self):
|
||||
if not self.standalone:
|
||||
return
|
||||
self(LedgerAccept())
|
||||
|
||||
# Need async version to close ledgers from async functions
|
||||
async def async_maybe_ledger_accept(self):
|
||||
if not self.standalone:
|
||||
return
|
||||
await self.async_send_command(LedgerAccept())
|
||||
|
||||
def get_balances(
|
||||
self,
|
||||
account: Union[Account, List[Account], None] = None,
|
||||
asset: Union[Asset, List[Asset]] = Asset()
|
||||
) -> pd.DataFrame:
|
||||
'''Return a pandas dataframe of account balances. If account is None, treat as a wildcard (use address book)'''
|
||||
if account is None:
|
||||
account = self.key_manager.known_accounts()
|
||||
if isinstance(account, list):
|
||||
result = [self.get_balances(acc, asset) for acc in account]
|
||||
return pd.concat(result, ignore_index=True)
|
||||
if isinstance(asset, list):
|
||||
result = [self.get_balances(account, ass) for ass in asset]
|
||||
return pd.concat(result, ignore_index=True)
|
||||
if asset.is_xrp():
|
||||
try:
|
||||
df = self.get_account_info(account)
|
||||
except:
|
||||
# Most likely the account does not exist on the ledger. Give a balance of zero.
|
||||
df = pd.DataFrame({
|
||||
'account': [account],
|
||||
'balance': [0],
|
||||
'flags': [0],
|
||||
'owner_count': [0],
|
||||
'previous_txn_id': ['NA'],
|
||||
'previous_txn_lgr_seq': [-1],
|
||||
'sequence': [-1]
|
||||
})
|
||||
df = df.assign(currency='XRP', peer='', limit='')
|
||||
return df.loc[:,
|
||||
['account', 'balance', 'currency', 'peer', 'limit']]
|
||||
else:
|
||||
try:
|
||||
df = self.get_trust_lines(account)
|
||||
if df.empty: return df
|
||||
df = df[(df['peer'] == asset.issuer.account_id)
|
||||
& (df['currency'] == asset.currency)]
|
||||
except:
|
||||
# Most likely the account does not exist on the ledger. Return an empty data frame
|
||||
df = pd.DataFrame({
|
||||
'account': [],
|
||||
'balance': [],
|
||||
'currency': [],
|
||||
'peer': [],
|
||||
'limit': [],
|
||||
})
|
||||
return df.loc[:,
|
||||
['account', 'balance', 'currency', 'peer', 'limit']]
|
||||
|
||||
def get_balance(self, account: Account, asset: Asset) -> Asset:
|
||||
'''Get a balance from a single account in a single asset'''
|
||||
try:
|
||||
df = self.get_balances(account, asset)
|
||||
return asset(df.iloc[0]['balance'])
|
||||
except:
|
||||
return asset(0)
|
||||
|
||||
def get_account_info(self,
|
||||
account: Optional[Account] = None) -> pd.DataFrame:
|
||||
'''Return a pandas dataframe of account info. If account is None, treat as a wildcard (use address book)'''
|
||||
if account is None:
|
||||
known_accounts = self.key_manager.known_accounts()
|
||||
result = [self.get_account_info(acc) for acc in known_accounts]
|
||||
return pd.concat(result, ignore_index=True)
|
||||
try:
|
||||
result = self.client.send_command(AccountInfo(account))
|
||||
except:
|
||||
# Most likely the account does not exist on the ledger. Give a balance of zero.
|
||||
return pd.DataFrame({
|
||||
'account': [account],
|
||||
'balance': [0],
|
||||
'flags': [0],
|
||||
'owner_count': [0],
|
||||
'previous_txn_id': ['NA'],
|
||||
'previous_txn_lgr_seq': [-1],
|
||||
'sequence': [-1]
|
||||
})
|
||||
if 'account_data' not in result:
|
||||
raise ValueError('Bad result from account_info command')
|
||||
info = result['account_data']
|
||||
for dk in ['LedgerEntryType', 'index']:
|
||||
del info[dk]
|
||||
df = pd.DataFrame([info])
|
||||
df.rename(columns={
|
||||
'Account': 'account',
|
||||
'Balance': 'balance',
|
||||
'Flags': 'flags',
|
||||
'OwnerCount': 'owner_count',
|
||||
'PreviousTxnID': 'previous_txn_id',
|
||||
'PreviousTxnLgrSeq': 'previous_txn_lgr_seq',
|
||||
'Sequence': 'sequence'
|
||||
},
|
||||
inplace=True)
|
||||
df['balance'] = df['balance'].astype(int)
|
||||
return df
|
||||
|
||||
def get_trust_lines(self,
|
||||
account: Account,
|
||||
peer: Optional[Account] = None) -> pd.DataFrame:
|
||||
'''Return a pandas dataframe account trust lines. If peer account is None, treat as a wildcard'''
|
||||
result = self.send_command(AccountLines(account, peer=peer))
|
||||
if 'lines' not in result or 'account' not in result:
|
||||
raise ValueError('Bad result from account_lines command')
|
||||
account = result['account']
|
||||
lines = result['lines']
|
||||
for d in lines:
|
||||
d['peer'] = d['account']
|
||||
d['account'] = account
|
||||
return pd.DataFrame(lines)
|
||||
|
||||
def get_offers(self, taker_pays: Asset, taker_gets: Asset) -> pd.DataFrame:
|
||||
'''Return a pandas dataframe of offers'''
|
||||
result = self.send_command(BookOffers(taker_pays, taker_gets))
|
||||
if 'offers' not in result:
|
||||
raise ValueError('Bad result from book_offers command')
|
||||
|
||||
offers = result['offers']
|
||||
delete_keys = [
|
||||
'BookDirectory', 'BookNode', 'LedgerEntryType', 'OwnerNode',
|
||||
'PreviousTxnID', 'PreviousTxnLgrSeq', 'Sequence', 'index'
|
||||
]
|
||||
for d in offers:
|
||||
for dk in delete_keys:
|
||||
del d[dk]
|
||||
for t in ['TakerPays', 'TakerGets', 'owner_funds']:
|
||||
if 'value' in d[t]:
|
||||
d[t] = d[t]['value']
|
||||
df = pd.DataFrame(offers)
|
||||
df.rename(columns={
|
||||
'Account': 'account',
|
||||
'Flags': 'flags',
|
||||
'TakerGets': 'taker_gets',
|
||||
'TakerPays': 'taker_pays'
|
||||
},
|
||||
inplace=True)
|
||||
return df
|
||||
|
||||
def account_balance(self, account: Account, asset: Asset) -> Asset:
|
||||
'''get the account's balance of the asset'''
|
||||
pass
|
||||
|
||||
def substitute_nicknames(
|
||||
self,
|
||||
df: pd.DataFrame,
|
||||
cols: List[str] = ['account', 'peer']) -> pd.DataFrame:
|
||||
result = df.copy(deep=True)
|
||||
for c in cols:
|
||||
if c not in result:
|
||||
continue
|
||||
result[c] = result[c].map(
|
||||
lambda x: self.key_manager.alias_or_account_id(x))
|
||||
return result
|
||||
|
||||
def add_to_keymanager(self, account: Account):
|
||||
self.key_manager.add(account)
|
||||
|
||||
def is_alias(self, name: str) -> bool:
|
||||
return self.key_manager.is_alias(name)
|
||||
|
||||
def account_from_alias(self, name: str) -> Account:
|
||||
return self.key_manager.account_from_alias(name)
|
||||
|
||||
def known_accounts(self) -> List[Account]:
|
||||
return self.key_manager.known_accounts()
|
||||
|
||||
def known_asset_aliases(self) -> List[str]:
|
||||
return self.asset_aliases.known_aliases()
|
||||
|
||||
def known_iou_assets(self) -> List[Asset]:
|
||||
return self.asset_aliases.known_assets()
|
||||
|
||||
def is_asset_alias(self, name: str) -> bool:
|
||||
return self.asset_aliases.is_alias(name)
|
||||
|
||||
def add_asset_alias(self, asset: Asset, name: str):
|
||||
self.asset_aliases.add(asset, name)
|
||||
|
||||
def asset_from_alias(self, name: str) -> Asset:
|
||||
return self.asset_aliases.asset_from_alias(name)
|
||||
|
||||
def insert_seq_and_fee(self, txn: Transaction):
|
||||
acc_info = self(AccountInfo(txn.account))
|
||||
# TODO: set better fee (Hard code a fee of 15 for now)
|
||||
txn.set_seq_and_fee(acc_info['account_data']['Sequence'], 15)
|
||||
|
||||
def get_client(self) -> RippleClient:
|
||||
return self.client
|
||||
|
||||
|
||||
def balances_dataframe(chains: List[App],
|
||||
chain_names: List[str],
|
||||
account_ids: Optional[List[Account]] = None,
|
||||
assets: List[Asset] = None,
|
||||
in_drops: bool = False):
|
||||
def _removesuffix(self: str, suffix: str) -> str:
|
||||
if suffix and self.endswith(suffix):
|
||||
return self[:-len(suffix)]
|
||||
else:
|
||||
return self[:]
|
||||
|
||||
def _balance_df(chain: App, acc: Optional[Account],
|
||||
asset: Union[Asset, List[Asset]], in_drops: bool):
|
||||
b = chain.get_balances(acc, asset)
|
||||
if not in_drops:
|
||||
b.loc[b['currency'] == 'XRP', 'balance'] /= 1_000_000
|
||||
b = chain.substitute_nicknames(b)
|
||||
b = b.set_index('account')
|
||||
return b
|
||||
|
||||
if account_ids is None:
|
||||
account_ids = [None] * len(chains)
|
||||
|
||||
if assets is None:
|
||||
# XRP and all assets in the assets alias list
|
||||
assets = [[XRP(0)] + c.known_iou_assets() for c in chains]
|
||||
|
||||
dfs = []
|
||||
keys = []
|
||||
for chain, chain_name, acc, asset in zip(chains, chain_names, account_ids,
|
||||
assets):
|
||||
dfs.append(_balance_df(chain, acc, asset, in_drops))
|
||||
keys.append(_removesuffix(chain_name, 'chain'))
|
||||
df = pd.concat(dfs, keys=keys)
|
||||
return df
|
||||
|
||||
|
||||
# Start an app with a single client
|
||||
@contextmanager
|
||||
def single_client_app(*,
|
||||
config: ConfigFile,
|
||||
command_log: Optional[str] = None,
|
||||
server_out=os.devnull,
|
||||
run_server: bool = True,
|
||||
exe: Optional[str] = None,
|
||||
extra_args: Optional[List[str]] = None,
|
||||
standalone=False):
|
||||
'''Start a ripple server and return an app'''
|
||||
try:
|
||||
if extra_args is None:
|
||||
extra_args = []
|
||||
to_run = None
|
||||
app = None
|
||||
client = RippleClient(config=config, command_log=command_log, exe=exe)
|
||||
if run_server:
|
||||
to_run = [client.exe, '--conf', client.config_file_name]
|
||||
if standalone:
|
||||
to_run.append('-a')
|
||||
fout = open(server_out, 'w')
|
||||
p = subprocess.Popen(to_run + extra_args,
|
||||
stdout=fout,
|
||||
stderr=subprocess.STDOUT)
|
||||
client.set_pid(p.pid)
|
||||
print(
|
||||
f'started rippled: config: {client.config_file_name} PID: {p.pid}',
|
||||
flush=True)
|
||||
time.sleep(1.5) # give process time to startup
|
||||
app = App(client=client, standalone=standalone)
|
||||
yield app
|
||||
finally:
|
||||
if app:
|
||||
app.shutdown()
|
||||
if run_server and to_run:
|
||||
subprocess.Popen(to_run + ['stop'],
|
||||
stdout=fout,
|
||||
stderr=subprocess.STDOUT)
|
||||
p.wait()
|
||||
|
||||
|
||||
def configs_for_testnet(config_file_prefix: str) -> List[ConfigFile]:
|
||||
configs = []
|
||||
p = Path(config_file_prefix)
|
||||
dir = p.parent
|
||||
file = p.name
|
||||
file_names = []
|
||||
for f in os.listdir(dir):
|
||||
cfg = os.path.join(dir, f, 'rippled.cfg')
|
||||
if f.startswith(file) and os.path.exists(cfg):
|
||||
file_names.append(cfg)
|
||||
file_names.sort()
|
||||
return [ConfigFile(file_name=f) for f in file_names]
|
||||
|
||||
|
||||
# Start an app for a network with the config files matched by `config_file_prefix*/rippled.cfg`
|
||||
|
||||
|
||||
# Undocumented feature: if the environment variable RIPPLED_SIDECHAIN_RR is set, it is
|
||||
# assumed to point to the rr executable. Sidechain server 0 will then be run under rr.
|
||||
@contextmanager
|
||||
def testnet_app(*,
|
||||
exe: str,
|
||||
configs: List[ConfigFile],
|
||||
command_logs: Optional[List[str]] = None,
|
||||
run_server: Optional[List[bool]] = None,
|
||||
sidechain_rr: Optional[str] = None,
|
||||
extra_args: Optional[List[List[str]]] = None):
|
||||
'''Start a ripple testnet and return an app'''
|
||||
try:
|
||||
app = None
|
||||
network = testnet.Network(exe,
|
||||
configs,
|
||||
command_logs=command_logs,
|
||||
run_server=run_server,
|
||||
with_rr=sidechain_rr,
|
||||
extra_args=extra_args)
|
||||
network.wait_for_validated_ledger()
|
||||
app = App(network=network, standalone=False)
|
||||
yield app
|
||||
finally:
|
||||
if app:
|
||||
app.shutdown()
|
||||
563
bin/sidechain/python/command.py
Normal file
563
bin/sidechain/python/command.py
Normal file
@@ -0,0 +1,563 @@
|
||||
import json
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from common import Account, Asset
|
||||
|
||||
|
||||
class Command:
|
||||
'''Interface for all commands sent to the server'''
|
||||
|
||||
# command id useful for websocket messages
|
||||
next_cmd_id = 1
|
||||
|
||||
def __init__(self):
|
||||
self.cmd_id = Command.next_cmd_id
|
||||
Command.next_cmd_id += 1
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
'''Return the command name for use in a command line'''
|
||||
assert False
|
||||
return ''
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
return [self.cmd_name, json.dumps(self.to_cmd_obj())]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = self.to_cmd_obj()
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''Return an object suitalbe for use in a command (input to json.dumps or similar)'''
|
||||
assert False
|
||||
return {}
|
||||
|
||||
def add_websocket_fields(self, cmd_dict: dict) -> dict:
|
||||
cmd_dict['id'] = self.cmd_id
|
||||
cmd_dict['command'] = self.cmd_name()
|
||||
return cmd_dict
|
||||
|
||||
def _set_flag(self, flag_bit: int, value: bool = True):
|
||||
'''Set or clear the flag bit'''
|
||||
if value:
|
||||
self.flags |= flag_bit
|
||||
else:
|
||||
self.flags &= ~flag_bit
|
||||
return self
|
||||
|
||||
|
||||
class SubscriptionCommand(Command):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
|
||||
class PathFind(Command):
|
||||
'''Rippled ripple_path_find command'''
|
||||
def __init__(self,
|
||||
*,
|
||||
src: Account,
|
||||
dst: Account,
|
||||
amt: Asset,
|
||||
send_max: Optional[Asset] = None,
|
||||
src_currencies: Optional[List[Asset]] = None,
|
||||
ledger_hash: Optional[str] = None,
|
||||
ledger_index: Optional[Union[int, str]] = None):
|
||||
super().__init__()
|
||||
self.src = src
|
||||
self.dst = dst
|
||||
self.amt = amt
|
||||
self.send_max = send_max
|
||||
self.src_currencies = src_currencies
|
||||
self.ledger_hash = ledger_hash
|
||||
self.ledger_index = ledger_index
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'ripple_path_find'
|
||||
|
||||
def add_websocket_fields(self, cmd_dict: dict) -> dict:
|
||||
cmd_dict = super().add_websocket_fields(cmd_dict)
|
||||
cmd_dict['subcommand'] = 'create'
|
||||
return cmd_dict
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''convert to transaction form (suitable for using json.dumps or similar)'''
|
||||
cmd = {
|
||||
'source_account': self.src.account_id,
|
||||
'destination_account': self.dst.account_id,
|
||||
'destination_amount': self.amt.to_cmd_obj()
|
||||
}
|
||||
if self.send_max is not None:
|
||||
cmd['send_max'] = self.send_max.to_cmd_obj()
|
||||
if self.ledger_hash is not None:
|
||||
cmd['ledger_hash'] = self.ledger_hash
|
||||
if self.ledger_index is not None:
|
||||
cmd['ledger_index'] = self.ledger_index
|
||||
if self.src_currencies:
|
||||
c = []
|
||||
for sc in self.src_currencies:
|
||||
d = {'currency': sc.currency, 'issuer': sc.issuer.account_id}
|
||||
c.append(d)
|
||||
cmd['source_currencies'] = c
|
||||
return cmd
|
||||
|
||||
|
||||
class Sign(Command):
|
||||
'''Rippled sign command'''
|
||||
def __init__(self, secret: str, tx: dict):
|
||||
super().__init__()
|
||||
self.tx = tx
|
||||
self.secret = secret
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'sign'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
return [self.cmd_name(), self.secret, f'{json.dumps(self.tx)}']
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {'secret': self.secret, 'tx_json': self.tx}
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class Submit(Command):
|
||||
'''Rippled submit command'''
|
||||
def __init__(self, tx_blob: str):
|
||||
super().__init__()
|
||||
self.tx_blob = tx_blob
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'submit'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
return [self.cmd_name(), self.tx_blob]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {'tx_blob': self.tx_blob}
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class LedgerAccept(Command):
|
||||
'''Rippled ledger_accept command'''
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'ledger_accept'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
return [self.cmd_name()]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {}
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class Stop(Command):
|
||||
'''Rippled stop command'''
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'stop'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
return [self.cmd_name()]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {}
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class LogLevel(Command):
|
||||
'''Rippled log_level command'''
|
||||
def __init__(self, severity: str, *, partition: Optional[str] = None):
|
||||
super().__init__()
|
||||
self.severity = severity
|
||||
self.partition = partition
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'log_level'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
if self.partition is not None:
|
||||
return [self.cmd_name(), self.partition, self.severity]
|
||||
return [self.cmd_name(), self.severity]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {'severity': self.severity}
|
||||
if self.partition is not None:
|
||||
result['partition'] = self.partition
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class WalletPropose(Command):
|
||||
'''Rippled wallet_propose command'''
|
||||
def __init__(self,
|
||||
*,
|
||||
passphrase: Optional[str] = None,
|
||||
seed: Optional[str] = None,
|
||||
seed_hex: Optional[str] = None,
|
||||
key_type: Optional[str] = None):
|
||||
super().__init__()
|
||||
self.passphrase = passphrase
|
||||
self.seed = seed
|
||||
self.seed_hex = seed_hex
|
||||
self.key_type = key_type
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'wallet_propose'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
assert not self.seed and not self.seed_hex and (
|
||||
not self.key_type or self.key_type == 'secp256k1')
|
||||
if self.passphrase:
|
||||
return [self.cmd_name(), self.passphrase]
|
||||
return [self.cmd_name()]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {}
|
||||
if self.seed is not None:
|
||||
result['seed'] = self.seed
|
||||
if self.seed_hex is not None:
|
||||
result['seed_hex'] = self.seed_hex
|
||||
if self.passphrase is not None:
|
||||
result['passphrase'] = self.passphrase
|
||||
if self.key_type is not None:
|
||||
result['key_type'] = self.key_type
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class ValidationCreate(Command):
|
||||
'''Rippled validation_create command'''
|
||||
def __init__(self, *, secret: Optional[str] = None):
|
||||
super().__init__()
|
||||
self.secret = secret
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'validation_create'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
if self.secret:
|
||||
return [self.cmd_name(), self.secret]
|
||||
return [self.cmd_name()]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {}
|
||||
if self.secret is not None:
|
||||
result['secret'] = self.secret
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class AccountInfo(Command):
|
||||
'''Rippled account_info command'''
|
||||
def __init__(self,
|
||||
account: Account,
|
||||
*,
|
||||
strict: Optional[bool] = None,
|
||||
ledger_hash: Optional[str] = None,
|
||||
ledger_index: Optional[Union[str, int]] = None,
|
||||
queue: Optional[bool] = None,
|
||||
signers_list: Optional[bool] = None):
|
||||
super().__init__()
|
||||
self.account = account
|
||||
self.strict = strict
|
||||
self.ledger_hash = ledger_hash
|
||||
self.ledger_index = ledger_index
|
||||
self.queue = queue
|
||||
self.signers_list = signers_list
|
||||
assert not ((ledger_hash is not None) and (ledger_index is not None))
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'account_info'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
result = [self.cmd_name(), self.account.account_id]
|
||||
if self.ledger_index is not None:
|
||||
result.append(self.ledger_index)
|
||||
if self.ledger_hash is not None:
|
||||
result.append(self.ledger_hash)
|
||||
if self.strict is not None:
|
||||
result.append(self.strict)
|
||||
return result
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {'account': self.account.account_id}
|
||||
if self.ledger_index is not None:
|
||||
result['ledger_index'] = self.ledger_index
|
||||
if self.ledger_hash is not None:
|
||||
result['ledger_hash'] = self.ledger_hash
|
||||
if self.strict is not None:
|
||||
result['strict'] = self.strict
|
||||
if self.queue is not None:
|
||||
result['queue'] = self.queue
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class AccountLines(Command):
|
||||
'''Rippled account_lines command'''
|
||||
def __init__(self,
|
||||
account: Account,
|
||||
*,
|
||||
peer: Optional[Account] = None,
|
||||
ledger_hash: Optional[str] = None,
|
||||
ledger_index: Optional[Union[str, int]] = None,
|
||||
limit: Optional[int] = None,
|
||||
marker=None):
|
||||
super().__init__()
|
||||
self.account = account
|
||||
self.peer = peer
|
||||
self.ledger_hash = ledger_hash
|
||||
self.ledger_index = ledger_index
|
||||
self.limit = limit
|
||||
self.marker = marker
|
||||
assert not ((ledger_hash is not None) and (ledger_index is not None))
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'account_lines'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
assert sum(x is None for x in [
|
||||
self.ledger_index, self.ledger_hash, self.limit, self.marker
|
||||
]) == 4
|
||||
result = [self.cmd_name(), self.account.account_id]
|
||||
if self.peer is not None:
|
||||
result.append(self.peer)
|
||||
return result
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {'account': self.account.account_id}
|
||||
if self.peer is not None:
|
||||
result['peer'] = self.peer
|
||||
if self.ledger_index is not None:
|
||||
result['ledger_index'] = self.ledger_index
|
||||
if self.ledger_hash is not None:
|
||||
result['ledger_hash'] = self.ledger_hash
|
||||
if self.limit is not None:
|
||||
result['limit'] = self.limit
|
||||
if self.marker is not None:
|
||||
result['marker'] = self.marker
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class AccountTx(Command):
|
||||
def __init__(self,
|
||||
account: Account,
|
||||
*,
|
||||
limit: Optional[int] = None,
|
||||
marker=None):
|
||||
super().__init__()
|
||||
self.account = account
|
||||
self.limit = limit
|
||||
self.marker = marker
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'account_tx'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
result = [self.cmd_name(), self.account.account_id]
|
||||
return result
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {'account': self.account.account_id}
|
||||
if self.limit is not None:
|
||||
result['limit'] = self.limit
|
||||
if self.marker is not None:
|
||||
result['marker'] = self.marker
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class BookOffers(Command):
|
||||
'''Rippled book_offers command'''
|
||||
def __init__(self,
|
||||
taker_pays: Asset,
|
||||
taker_gets: Asset,
|
||||
*,
|
||||
taker: Optional[Account] = None,
|
||||
ledger_hash: Optional[str] = None,
|
||||
ledger_index: Optional[Union[str, int]] = None,
|
||||
limit: Optional[int] = None,
|
||||
marker=None):
|
||||
super().__init__()
|
||||
self.taker_pays = taker_pays
|
||||
self.taker_gets = taker_gets
|
||||
self.taker = taker
|
||||
self.ledger_hash = ledger_hash
|
||||
self.ledger_index = ledger_index
|
||||
self.limit = limit
|
||||
self.marker = marker
|
||||
assert not ((ledger_hash is not None) and (ledger_index is not None))
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'book_offers'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
assert sum(x is None for x in [
|
||||
self.ledger_index, self.ledger_hash, self.limit, self.marker
|
||||
]) == 4
|
||||
return [
|
||||
self.cmd_name(),
|
||||
self.taker_pays.cmd_str(),
|
||||
self.taker_gets.cmd_str()
|
||||
]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {
|
||||
'taker_pays': self.taker_pays.to_cmd_obj(),
|
||||
'taker_gets': self.taker_gets.to_cmd_obj()
|
||||
}
|
||||
if self.taker is not None:
|
||||
result['taker'] = self.taker.account_id
|
||||
if self.ledger_index is not None:
|
||||
result['ledger_index'] = self.ledger_index
|
||||
if self.ledger_hash is not None:
|
||||
result['ledger_hash'] = self.ledger_hash
|
||||
if self.limit is not None:
|
||||
result['limit'] = self.limit
|
||||
if self.marker is not None:
|
||||
result['marker'] = self.marker
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class BookSubscription:
|
||||
'''Spec for a book in a subscribe command'''
|
||||
def __init__(self,
|
||||
taker_pays: Asset,
|
||||
taker_gets: Asset,
|
||||
*,
|
||||
taker: Optional[Account] = None,
|
||||
snapshot: Optional[bool] = None,
|
||||
both: Optional[bool] = None):
|
||||
self.taker_pays = taker_pays
|
||||
self.taker_gets = taker_gets
|
||||
self.taker = taker
|
||||
self.snapshot = snapshot
|
||||
self.both = both
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''Return an object suitalbe for use in a command'''
|
||||
result = {
|
||||
'taker_pays': self.taker_pays.to_cmd_obj(),
|
||||
'taker_gets': self.taker_gets.to_cmd_obj()
|
||||
}
|
||||
if self.taker is not None:
|
||||
result['taker'] = self.taker.account_id
|
||||
if self.snapshot is not None:
|
||||
result['snapshot'] = self.snapshot
|
||||
if self.both is not None:
|
||||
result['both'] = self.both
|
||||
return result
|
||||
|
||||
|
||||
class ServerInfo(Command):
|
||||
'''Rippled server_info command'''
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'server_info'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
return [self.cmd_name()]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {}
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class FederatorInfo(Command):
|
||||
'''Rippled federator_info command'''
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'federator_info'
|
||||
|
||||
def get_command_line_list(self) -> List[str]:
|
||||
'''Return a list of strings suitable for a command line command for a rippled server'''
|
||||
return [self.cmd_name()]
|
||||
|
||||
def get_websocket_dict(self) -> dict:
|
||||
'''Return a dictionary suitable for converting to json and sending to a rippled server using a websocket'''
|
||||
result = {}
|
||||
return self.add_websocket_fields(result)
|
||||
|
||||
|
||||
class Subscribe(SubscriptionCommand):
|
||||
'''The subscribe method requests periodic notifications from the server
|
||||
when certain events happen. See: https://developers.ripple.com/subscribe.html'''
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
streams: Optional[List[str]] = None,
|
||||
accounts: Optional[List[Account]] = None,
|
||||
accounts_proposed: Optional[List[Account]] = None,
|
||||
account_history_account: Optional[Account] = None,
|
||||
books: Optional[
|
||||
List[BookSubscription]] = None, # taker_pays, taker_gets
|
||||
url: Optional[str] = None,
|
||||
url_username: Optional[str] = None,
|
||||
url_password: Optional[str] = None):
|
||||
super().__init__()
|
||||
self.streams = streams
|
||||
self.accounts = accounts
|
||||
self.account_history_account = account_history_account
|
||||
self.accounts_proposed = accounts_proposed
|
||||
self.books = books
|
||||
self.url = url
|
||||
self.url_username = url_username
|
||||
self.url_password = url_password
|
||||
self.websocket = None
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
if self.websocket:
|
||||
return 'unsubscribe'
|
||||
return 'subscribe'
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
d = {}
|
||||
if self.streams is not None:
|
||||
d['streams'] = self.streams
|
||||
if self.accounts is not None:
|
||||
d['accounts'] = [a.account_id for a in self.accounts]
|
||||
if self.account_history_account is not None:
|
||||
d['account_history_tx_stream'] = {
|
||||
'account': self.account_history_account.account_id
|
||||
}
|
||||
if self.accounts_proposed is not None:
|
||||
d['accounts_proposed'] = [
|
||||
a.account_id for a in self.accounts_proposed
|
||||
]
|
||||
if self.books is not None:
|
||||
d['books'] = [b.to_cmd_obj() for b in self.books]
|
||||
if self.url is not None:
|
||||
d['url'] = self.url
|
||||
if self.url_username is not None:
|
||||
d['url_username'] = self.url_username
|
||||
if self.url_password is not None:
|
||||
d['url_password'] = self.url_password
|
||||
return d
|
||||
256
bin/sidechain/python/common.py
Normal file
256
bin/sidechain/python/common.py
Normal file
@@ -0,0 +1,256 @@
|
||||
import binascii
|
||||
import datetime
|
||||
import logging
|
||||
from typing import List, Optional, Union
|
||||
import pandas as pd
|
||||
import pytz
|
||||
import sys
|
||||
|
||||
EPRINT_ENABLED = True
|
||||
|
||||
|
||||
def disable_eprint():
|
||||
global EPRINT_ENABLED
|
||||
EPRINT_ENABLED = False
|
||||
|
||||
|
||||
def enable_eprint():
|
||||
global EPRINT_ENABLED
|
||||
EPRINT_ENABLED = True
|
||||
|
||||
|
||||
def eprint(*args, **kwargs):
|
||||
if not EPRINT_ENABLED:
|
||||
return
|
||||
logging.error(*args)
|
||||
print(*args, file=sys.stderr, flush=True, **kwargs)
|
||||
|
||||
|
||||
def to_rippled_epoch(d: datetime.datetime) -> int:
|
||||
'''Convert from a datetime to the number of seconds since Jan 1, 2000 (rippled epoch)'''
|
||||
start = datetime.datetime(2000, 1, 1, tzinfo=pytz.utc)
|
||||
return int((d - start).total_seconds())
|
||||
|
||||
|
||||
class Account: # pylint: disable=too-few-public-methods
|
||||
'''
|
||||
Account in the ripple ledger
|
||||
'''
|
||||
def __init__(self,
|
||||
*,
|
||||
account_id: Optional[str] = None,
|
||||
nickname: Optional[str] = None,
|
||||
public_key: Optional[str] = None,
|
||||
public_key_hex: Optional[str] = None,
|
||||
secret_key: Optional[str] = None,
|
||||
result_dict: Optional[dict] = None):
|
||||
self.account_id = account_id
|
||||
self.nickname = nickname
|
||||
self.public_key = public_key
|
||||
self.public_key_hex = public_key_hex
|
||||
self.secret_key = secret_key
|
||||
|
||||
if result_dict is not None:
|
||||
self.account_id = result_dict['account_id']
|
||||
self.public_key = result_dict['public_key']
|
||||
self.public_key_hex = result_dict['public_key_hex']
|
||||
self.secret_key = result_dict['master_seed']
|
||||
|
||||
# Accounts are equal if they represent the same account on the ledger
|
||||
# I.e. only check the account_id field for equality.
|
||||
def __eq__(self, lhs):
|
||||
if not isinstance(lhs, self.__class__):
|
||||
return False
|
||||
return self.account_id == lhs.account_id
|
||||
|
||||
def __ne__(self, lhs):
|
||||
return not self.__eq__(lhs)
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.nickname is not None:
|
||||
return self.nickname
|
||||
return self.account_id
|
||||
|
||||
def alias_or_account_id(self) -> str:
|
||||
'''
|
||||
return the alias if it exists, otherwise return the id
|
||||
'''
|
||||
if self.nickname is not None:
|
||||
return self.nickname
|
||||
return self.account_id
|
||||
|
||||
def account_id_str_as_hex(self) -> str:
|
||||
return binascii.hexlify(self.account_id.encode()).decode('utf-8')
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
return {
|
||||
'account_id': self.account_id,
|
||||
'nickname': self.nickname,
|
||||
'public_key': self.public_key,
|
||||
'public_key_hex': self.public_key_hex,
|
||||
'secret_key': self.secret_key
|
||||
}
|
||||
|
||||
|
||||
class Asset:
|
||||
'''An XRP or IOU value'''
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
value: Union[int, float, None] = None,
|
||||
currency: Optional[
|
||||
str] = None, # Will default to 'XRP' if not specified
|
||||
issuer: Optional[Account] = None,
|
||||
from_asset=None, # asset is of type Optional[Asset]
|
||||
# from_rpc_result is a python object resulting from an rpc command
|
||||
from_rpc_result: Optional[Union[dict, str]] = None):
|
||||
|
||||
assert from_asset is None or from_rpc_result is None
|
||||
|
||||
self.value = value
|
||||
self.issuer = issuer
|
||||
self.currency = currency
|
||||
if from_asset is not None:
|
||||
if self.value is None:
|
||||
self.value = from_asset.value
|
||||
if self.issuer is None:
|
||||
self.issuer = from_asset.issuer
|
||||
if self.currency is None:
|
||||
self.currency = from_asset.currency
|
||||
if from_rpc_result is not None:
|
||||
if isinstance(from_rpc_result, str):
|
||||
self.value = int(from_rpc_result)
|
||||
self.currency = 'XRP'
|
||||
else:
|
||||
self.value = from_rpc_result['value']
|
||||
self.currency = float(from_rpc_result['currency'])
|
||||
self.issuer = Account(account_id=from_rpc_result['issuer'])
|
||||
|
||||
if self.currency is None:
|
||||
self.currency = 'XRP'
|
||||
|
||||
if isinstance(self.value, str):
|
||||
if self.is_xrp():
|
||||
self.value = int(value)
|
||||
else:
|
||||
self.value = float(value)
|
||||
|
||||
def __call__(self, value: Union[int, float]):
|
||||
'''Call operator useful for a terse syntax for assets in tests. I.e. USD(100)'''
|
||||
return Asset(value=value, from_asset=self)
|
||||
|
||||
def __add__(self, lhs):
|
||||
assert (self.issuer == lhs.issuer and self.currency == lhs.currency)
|
||||
return Asset(value=self.value + lhs.value,
|
||||
currency=self.currency,
|
||||
issuer=self.issuer)
|
||||
|
||||
def __sub__(self, lhs):
|
||||
assert (self.issuer == lhs.issuer and self.currency == lhs.currency)
|
||||
return Asset(value=self.value - lhs.value,
|
||||
currency=self.currency,
|
||||
issuer=self.issuer)
|
||||
|
||||
def __eq__(self, lhs):
|
||||
if not isinstance(lhs, self.__class__):
|
||||
return False
|
||||
return (self.value == lhs.value and self.currency == lhs.currency
|
||||
and self.issuer == lhs.issuer)
|
||||
|
||||
def __ne__(self, lhs):
|
||||
return not self.__eq__(lhs)
|
||||
|
||||
def __str__(self) -> str:
|
||||
value_part = '' if self.value is None else f'{self.value}/'
|
||||
issuer_part = '' if self.issuer is None else f'/{self.issuer}'
|
||||
return f'{value_part}{self.currency}{issuer_part}'
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self.__str__()
|
||||
|
||||
def is_xrp(self) -> bool:
|
||||
''' return true if the asset represents XRP'''
|
||||
return self.currency == 'XRP'
|
||||
|
||||
def cmd_str(self) -> str:
|
||||
value_part = '' if self.value is None else f'{self.value}/'
|
||||
issuer_part = '' if self.issuer is None else f'/{self.issuer.account_id}'
|
||||
return f'{value_part}{self.currency}{issuer_part}'
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''Return an object suitalbe for use in a command'''
|
||||
if self.currency == 'XRP':
|
||||
if self.value is not None:
|
||||
return f'{self.value}' # must be a string
|
||||
return {'currency': self.currency}
|
||||
result = {'currency': self.currency, 'issuer': self.issuer.account_id}
|
||||
if self.value is not None:
|
||||
result['value'] = f'{self.value}' # must be a string
|
||||
return result
|
||||
|
||||
|
||||
def XRP(v: Union[int, float]) -> Asset:
|
||||
return Asset(value=v * 1_000_000)
|
||||
|
||||
|
||||
def drops(v: int) -> Asset:
|
||||
return Asset(value=v)
|
||||
|
||||
|
||||
class Path:
|
||||
'''Payment Path'''
|
||||
def __init__(self,
|
||||
nodes: Optional[List[Union[Account, Asset]]] = None,
|
||||
*,
|
||||
result_list: Optional[List[dict]] = None):
|
||||
assert not (nodes and result_list)
|
||||
if result_list is not None:
|
||||
self.result_list = result_list
|
||||
return
|
||||
if nodes is None:
|
||||
self.result_list = []
|
||||
return
|
||||
self.result_list = [
|
||||
self._create_account_path_node(n)
|
||||
if isinstance(n, Account) else self._create_currency_path_node(n)
|
||||
for n in nodes
|
||||
]
|
||||
|
||||
def _create_account_path_node(self, account: Account) -> dict:
|
||||
return {
|
||||
'account': account.account_id,
|
||||
'type': 1,
|
||||
'type_hex': '0000000000000001'
|
||||
}
|
||||
|
||||
def _create_currency_path_node(self, asset: Asset) -> dict:
|
||||
result = {
|
||||
'currency': asset.currency,
|
||||
'type': 48,
|
||||
'type_hex': '0000000000000030'
|
||||
}
|
||||
if not asset.is_xrp():
|
||||
result['issuer'] = asset.issuer.account_id
|
||||
return result
|
||||
|
||||
def to_cmd_obj(self) -> list:
|
||||
'''Return an object suitalbe for use in a command'''
|
||||
return self.result_list
|
||||
|
||||
|
||||
class PathList:
|
||||
'''Collection of paths for use in payments'''
|
||||
def __init__(self,
|
||||
path_list: Optional[List[Path]] = None,
|
||||
*,
|
||||
result_list: Optional[List[List[dict]]] = None):
|
||||
# result_list can be the response from the rippled server
|
||||
assert not (path_list and result_list)
|
||||
if result_list is not None:
|
||||
self.paths = [Path(result_list=l) for l in result_list]
|
||||
return
|
||||
self.paths = path_list
|
||||
|
||||
def to_cmd_obj(self) -> list:
|
||||
'''Return an object suitalbe for use in a command'''
|
||||
return [p.to_cmd_obj() for p in self.paths]
|
||||
101
bin/sidechain/python/config_file.py
Normal file
101
bin/sidechain/python/config_file.py
Normal file
@@ -0,0 +1,101 @@
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
|
||||
class Section:
|
||||
def section_header(l: str) -> Optional[str]:
|
||||
'''
|
||||
If the line is a section header, return the section name
|
||||
otherwise return None
|
||||
'''
|
||||
if l.startswith('[') and l.endswith(']'):
|
||||
return l[1:-1]
|
||||
return None
|
||||
|
||||
def __init__(self, name: str):
|
||||
super().__setattr__('_name', name)
|
||||
# lines contains all non key-value pairs
|
||||
super().__setattr__('_lines', [])
|
||||
super().__setattr__('_kv_pairs', {})
|
||||
|
||||
def get_name(self):
|
||||
return self._name
|
||||
|
||||
def add_line(self, l):
|
||||
s = l.split('=')
|
||||
if len(s) == 2:
|
||||
self._kv_pairs[s[0].strip()] = s[1].strip()
|
||||
else:
|
||||
self._lines.append(l)
|
||||
|
||||
def get_lines(self):
|
||||
return self._lines
|
||||
|
||||
def get_line(self) -> Optional[str]:
|
||||
if len(self._lines) > 0:
|
||||
return self._lines[0]
|
||||
return None
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self._kv_pairs[name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name in self.__dict__:
|
||||
super().__setattr__(name, value)
|
||||
else:
|
||||
self._kv_pairs[name] = value
|
||||
|
||||
def __getstate__(self):
|
||||
return vars(self)
|
||||
|
||||
def __setstate__(self, state):
|
||||
vars(self).update(state)
|
||||
|
||||
|
||||
class ConfigFile:
|
||||
def __init__(self, *, file_name: Optional[str] = None):
|
||||
# parse the file
|
||||
self._file_name = file_name
|
||||
self._sections = {}
|
||||
if not file_name:
|
||||
return
|
||||
|
||||
cur_section = None
|
||||
with open(file_name) as f:
|
||||
for n, l in enumerate(f):
|
||||
l = l.strip()
|
||||
if l.startswith('#') or not l:
|
||||
continue
|
||||
if section_name := Section.section_header(l):
|
||||
if cur_section:
|
||||
self.add_section(cur_section)
|
||||
cur_section = Section(section_name)
|
||||
continue
|
||||
if not cur_section:
|
||||
raise ValueError(
|
||||
f'Error parsing config file: {file_name} line_num: {n} line: {l}'
|
||||
)
|
||||
cur_section.add_line(l)
|
||||
|
||||
if cur_section:
|
||||
self.add_section(cur_section)
|
||||
|
||||
def add_section(self, s: Section):
|
||||
self._sections[s.get_name()] = s
|
||||
|
||||
def get_file_name(self):
|
||||
return self._file_name
|
||||
|
||||
def __getstate__(self):
|
||||
return vars(self)
|
||||
|
||||
def __setstate__(self, state):
|
||||
vars(self).update(state)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self._sections[name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
630
bin/sidechain/python/create_config_files.py
Executable file
630
bin/sidechain/python/create_config_files.py
Executable file
@@ -0,0 +1,630 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Generate rippled config files, each with their own ports, database paths, and validation_seeds.
|
||||
# There will be configs for shards/no_shards, main/test nets, two config files for each combination
|
||||
# (so one can run in a dogfood mode while another is tested). To avoid confusion,The directory path
|
||||
# will be $data_dir/{main | test}.{shard | no_shard}.{dog | test}
|
||||
# The config file will reside in that directory with the name rippled.cfg
|
||||
# The validators file will reside in that directory with the name validators.txt
|
||||
'''
|
||||
Script to test and debug sidechains.
|
||||
|
||||
The rippled exe location can be set through the command line or
|
||||
the environment variable RIPPLED_MAINCHAIN_EXE
|
||||
|
||||
The configs_dir (where the config files will reside) can be set through the command line
|
||||
or the environment variable RIPPLED_SIDECHAIN_CFG_DIR
|
||||
'''
|
||||
|
||||
import argparse
|
||||
from dataclasses import dataclass
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
from config_file import ConfigFile
|
||||
from command import ValidationCreate, WalletPropose
|
||||
from common import Account, Asset, eprint, XRP
|
||||
from app import App, single_client_app
|
||||
|
||||
mainnet_validators = """
|
||||
[validator_list_sites]
|
||||
https://vl.ripple.com
|
||||
|
||||
[validator_list_keys]
|
||||
ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
|
||||
"""
|
||||
|
||||
altnet_validators = """
|
||||
[validator_list_sites]
|
||||
https://vl.altnet.rippletest.net
|
||||
|
||||
[validator_list_keys]
|
||||
ED264807102805220DA0F312E71FC2C69E1552C9C5790F6C25E3729DEB573D5860
|
||||
"""
|
||||
|
||||
node_size = 'medium'
|
||||
default_data_dir = '/home/swd/data/rippled'
|
||||
|
||||
|
||||
@dataclass
|
||||
class Keypair:
|
||||
public_key: str
|
||||
secret_key: str
|
||||
account_id: Optional[str]
|
||||
|
||||
|
||||
def generate_node_keypairs(n: int, rip: App) -> List[Keypair]:
|
||||
'''
|
||||
generate keypairs suitable for validator keys
|
||||
'''
|
||||
result = []
|
||||
for i in range(n):
|
||||
keys = rip(ValidationCreate())
|
||||
result.append(
|
||||
Keypair(public_key=keys["validation_public_key"],
|
||||
secret_key=keys["validation_seed"],
|
||||
account_id=None))
|
||||
return result
|
||||
|
||||
|
||||
def generate_federator_keypairs(n: int, rip: App) -> List[Keypair]:
|
||||
'''
|
||||
generate keypairs suitable for federator keys
|
||||
'''
|
||||
result = []
|
||||
for i in range(n):
|
||||
keys = rip(WalletPropose(key_type='ed25519'))
|
||||
result.append(
|
||||
Keypair(public_key=keys["public_key"],
|
||||
secret_key=keys["master_seed"],
|
||||
account_id=keys["account_id"]))
|
||||
return result
|
||||
|
||||
|
||||
class Ports:
|
||||
'''
|
||||
Port numbers for various services.
|
||||
Port numbers differ by cfg_index so different configs can run
|
||||
at the same time without interfering with each other.
|
||||
'''
|
||||
peer_port_base = 51235
|
||||
http_admin_port_base = 5005
|
||||
ws_public_port_base = 6005
|
||||
|
||||
def __init__(self, cfg_index: int):
|
||||
self.peer_port = Ports.peer_port_base + cfg_index
|
||||
self.http_admin_port = Ports.http_admin_port_base + cfg_index
|
||||
self.ws_public_port = Ports.ws_public_port_base + (2 * cfg_index)
|
||||
# note admin port uses public port base
|
||||
self.ws_admin_port = Ports.ws_public_port_base + (2 * cfg_index) + 1
|
||||
|
||||
|
||||
class Network:
|
||||
def __init__(self, num_nodes: int, num_validators: int,
|
||||
start_cfg_index: int, rip: App):
|
||||
self.validator_keypairs = generate_node_keypairs(num_validators, rip)
|
||||
self.ports = [Ports(start_cfg_index + i) for i in range(num_nodes)]
|
||||
|
||||
|
||||
class SidechainNetwork(Network):
|
||||
def __init__(self, num_nodes: int, num_federators: int,
|
||||
num_validators: int, start_cfg_index: int, rip: App):
|
||||
super().__init__(num_nodes, num_validators, start_cfg_index, rip)
|
||||
self.federator_keypairs = generate_federator_keypairs(
|
||||
num_federators, rip)
|
||||
self.main_account = rip(WalletPropose(key_type='secp256k1'))
|
||||
|
||||
|
||||
class XChainAsset:
|
||||
def __init__(self, main_asset: Asset, side_asset: Asset,
|
||||
main_value: Union[int, float], side_value: Union[int, float],
|
||||
main_refund_penalty: Union[int, float],
|
||||
side_refund_penalty: Union[int, float]):
|
||||
self.main_asset = main_asset(main_value)
|
||||
self.side_asset = side_asset(side_value)
|
||||
self.main_refund_penalty = main_asset(main_refund_penalty)
|
||||
self.side_refund_penalty = side_asset(side_refund_penalty)
|
||||
|
||||
|
||||
def generate_asset_stanzas(
|
||||
assets: Optional[Dict[str, XChainAsset]] = None) -> str:
|
||||
if assets is None:
|
||||
# default to xrp only at a 1:1 value
|
||||
assets = {}
|
||||
assets['xrp_xrp_sidechain_asset'] = XChainAsset(
|
||||
XRP(0), XRP(0), 1, 1, 400, 400)
|
||||
|
||||
index_stanza = """
|
||||
[sidechain_assets]"""
|
||||
|
||||
asset_stanzas = []
|
||||
|
||||
for name, xchainasset in assets.items():
|
||||
index_stanza += '\n' + name
|
||||
new_stanza = f"""
|
||||
[{name}]
|
||||
mainchain_asset={json.dumps(xchainasset.main_asset.to_cmd_obj())}
|
||||
sidechain_asset={json.dumps(xchainasset.side_asset.to_cmd_obj())}
|
||||
mainchain_refund_penalty={json.dumps(xchainasset.main_refund_penalty.to_cmd_obj())}
|
||||
sidechain_refund_penalty={json.dumps(xchainasset.side_refund_penalty.to_cmd_obj())}"""
|
||||
asset_stanzas.append(new_stanza)
|
||||
|
||||
return index_stanza + '\n' + '\n'.join(asset_stanzas)
|
||||
|
||||
|
||||
# First element of the returned tuple is the sidechain stanzas
|
||||
# second element is the bootstrap stanzas
|
||||
def generate_sidechain_stanza(
|
||||
mainchain_ports: Ports,
|
||||
main_account: dict,
|
||||
federators: List[Keypair],
|
||||
signing_key: str,
|
||||
mainchain_cfg_file: str,
|
||||
xchain_assets: Optional[Dict[str,
|
||||
XChainAsset]] = None) -> Tuple[str, str]:
|
||||
mainchain_ip = "127.0.0.1"
|
||||
|
||||
federators_stanza = """
|
||||
# federator signing public keys
|
||||
[sidechain_federators]
|
||||
"""
|
||||
federators_secrets_stanza = """
|
||||
# federator signing secret keys (for standalone-mode testing only; Normally won't be in a config file)
|
||||
[sidechain_federators_secrets]
|
||||
"""
|
||||
bootstrap_federators_stanza = """
|
||||
# first value is federator signing public key, second is the signing pk account
|
||||
[sidechain_federators]
|
||||
"""
|
||||
|
||||
assets_stanzas = generate_asset_stanzas(xchain_assets)
|
||||
|
||||
for fed in federators:
|
||||
federators_stanza += f'{fed.public_key}\n'
|
||||
federators_secrets_stanza += f'{fed.secret_key}\n'
|
||||
bootstrap_federators_stanza += f'{fed.public_key} {fed.account_id}\n'
|
||||
|
||||
sidechain_stanzas = f"""
|
||||
[sidechain]
|
||||
signing_key={signing_key}
|
||||
mainchain_account={main_account["account_id"]}
|
||||
mainchain_ip={mainchain_ip}
|
||||
mainchain_port_ws={mainchain_ports.ws_public_port}
|
||||
# mainchain config file is: {mainchain_cfg_file}
|
||||
|
||||
{assets_stanzas}
|
||||
|
||||
{federators_stanza}
|
||||
|
||||
{federators_secrets_stanza}
|
||||
"""
|
||||
bootstrap_stanzas = f"""
|
||||
[sidechain]
|
||||
mainchain_secret={main_account["master_seed"]}
|
||||
|
||||
{bootstrap_federators_stanza}
|
||||
"""
|
||||
return (sidechain_stanzas, bootstrap_stanzas)
|
||||
|
||||
|
||||
# cfg_type will typically be either 'dog' or 'test', but can be any string. It is only used
|
||||
# to create the data directories.
|
||||
def generate_cfg_dir(*,
|
||||
ports: Ports,
|
||||
with_shards: bool,
|
||||
main_net: bool,
|
||||
cfg_type: str,
|
||||
sidechain_stanza: str,
|
||||
sidechain_bootstrap_stanza: str,
|
||||
validation_seed: Optional[str] = None,
|
||||
validators: Optional[List[str]] = None,
|
||||
fixed_ips: Optional[List[Ports]] = None,
|
||||
data_dir: str,
|
||||
full_history: bool = False,
|
||||
with_hooks: bool = False) -> str:
|
||||
ips_stanza = ''
|
||||
this_ip = '127.0.0.1'
|
||||
if fixed_ips:
|
||||
ips_stanza = '# Fixed ips for a testnet.\n'
|
||||
ips_stanza += '[ips_fixed]\n'
|
||||
for i, p in enumerate(fixed_ips):
|
||||
if p.peer_port == ports.peer_port:
|
||||
continue
|
||||
# rippled limits the number of connects per ip. So use the other loopback devices
|
||||
ips_stanza += f'127.0.0.{i+1} {p.peer_port}\n'
|
||||
else:
|
||||
ips_stanza = '# Where to find some other servers speaking the Ripple protocol.\n'
|
||||
ips_stanza += '[ips]\n'
|
||||
if main_net:
|
||||
ips_stanza += 'r.ripple.com 51235\n'
|
||||
else:
|
||||
ips_stanza += 'r.altnet.rippletest.net 51235\n'
|
||||
disable_shards = '' if with_shards else '# '
|
||||
disable_delete = '#' if full_history else ''
|
||||
history_line = 'full' if full_history else '256'
|
||||
earliest_seq_line = ''
|
||||
if sidechain_stanza:
|
||||
earliest_seq_line = 'earliest_seq=1'
|
||||
hooks_line = 'Hooks' if with_hooks else ''
|
||||
validation_seed_stanza = ''
|
||||
if validation_seed:
|
||||
validation_seed_stanza = f'''
|
||||
[validation_seed]
|
||||
{validation_seed}
|
||||
'''
|
||||
node_size = 'medium'
|
||||
shard_str = 'shards' if with_shards else 'no_shards'
|
||||
net_str = 'main' if main_net else 'test'
|
||||
if not fixed_ips:
|
||||
sub_dir = data_dir + f'/{net_str}.{shard_str}.{cfg_type}'
|
||||
if sidechain_stanza:
|
||||
sub_dir += '.sidechain'
|
||||
else:
|
||||
sub_dir = data_dir + f'/{cfg_type}'
|
||||
db_path = sub_dir + '/db'
|
||||
debug_logfile = sub_dir + '/debug.log'
|
||||
shard_db_path = sub_dir + '/shards'
|
||||
node_db_path = db_path + '/nudb'
|
||||
|
||||
cfg_str = f"""
|
||||
[server]
|
||||
port_rpc_admin_local
|
||||
port_peer
|
||||
port_ws_admin_local
|
||||
port_ws_public
|
||||
#ssl_key = /etc/ssl/private/server.key
|
||||
#ssl_cert = /etc/ssl/certs/server.crt
|
||||
|
||||
[port_rpc_admin_local]
|
||||
port = {ports.http_admin_port}
|
||||
ip = {this_ip}
|
||||
admin = {this_ip}
|
||||
protocol = http
|
||||
|
||||
[port_peer]
|
||||
port = {ports.peer_port}
|
||||
ip = 0.0.0.0
|
||||
protocol = peer
|
||||
|
||||
[port_ws_admin_local]
|
||||
port = {ports.ws_admin_port}
|
||||
ip = {this_ip}
|
||||
admin = {this_ip}
|
||||
protocol = ws
|
||||
|
||||
[port_ws_public]
|
||||
port = {ports.ws_public_port}
|
||||
ip = {this_ip}
|
||||
protocol = ws
|
||||
# protocol = wss
|
||||
|
||||
[node_size]
|
||||
{node_size}
|
||||
|
||||
[ledger_history]
|
||||
{history_line}
|
||||
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path={node_db_path}
|
||||
open_files=2000
|
||||
filter_bits=12
|
||||
cache_mb=256
|
||||
file_size_mb=8
|
||||
file_size_mult=2
|
||||
{earliest_seq_line}
|
||||
{disable_delete}online_delete=256
|
||||
{disable_delete}advisory_delete=0
|
||||
|
||||
[database_path]
|
||||
{db_path}
|
||||
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
{debug_logfile}
|
||||
|
||||
[sntp_servers]
|
||||
time.windows.com
|
||||
time.apple.com
|
||||
time.nist.gov
|
||||
pool.ntp.org
|
||||
|
||||
{ips_stanza}
|
||||
|
||||
[validators_file]
|
||||
validators.txt
|
||||
|
||||
[rpc_startup]
|
||||
{{ "command": "log_level", "severity": "fatal" }}
|
||||
{{ "command": "log_level", "partition": "SidechainFederator", "severity": "trace" }}
|
||||
|
||||
[ssl_verify]
|
||||
1
|
||||
|
||||
{validation_seed_stanza}
|
||||
|
||||
{disable_shards}[shard_db]
|
||||
{disable_shards}type=NuDB
|
||||
{disable_shards}path={shard_db_path}
|
||||
{disable_shards}max_historical_shards=6
|
||||
|
||||
{sidechain_stanza}
|
||||
|
||||
[features]
|
||||
{hooks_line}
|
||||
PayChan
|
||||
Flow
|
||||
FlowCross
|
||||
TickSize
|
||||
fix1368
|
||||
Escrow
|
||||
fix1373
|
||||
EnforceInvariants
|
||||
SortedDirectories
|
||||
fix1201
|
||||
fix1512
|
||||
fix1513
|
||||
fix1523
|
||||
fix1528
|
||||
DepositAuth
|
||||
Checks
|
||||
fix1571
|
||||
fix1543
|
||||
fix1623
|
||||
DepositPreauth
|
||||
fix1515
|
||||
fix1578
|
||||
MultiSignReserve
|
||||
fixTakerDryOfferRemoval
|
||||
fixMasterKeyAsRegularKey
|
||||
fixCheckThreading
|
||||
fixPayChanRecipientOwnerDir
|
||||
DeletableAccounts
|
||||
fixQualityUpperBound
|
||||
RequireFullyCanonicalSig
|
||||
fix1781
|
||||
HardenedValidations
|
||||
fixAmendmentMajorityCalc
|
||||
NegativeUNL
|
||||
TicketBatch
|
||||
FlowSortStrands
|
||||
fixSTAmountCanonicalize
|
||||
fixRmSmallIncreasedQOffers
|
||||
CheckCashMakesTrustLine
|
||||
"""
|
||||
|
||||
validators_str = ''
|
||||
for p in [sub_dir, db_path, shard_db_path]:
|
||||
Path(p).mkdir(parents=True, exist_ok=True)
|
||||
# Add the validators.txt file
|
||||
if validators:
|
||||
validators_str = '[validators]\n'
|
||||
for k in validators:
|
||||
validators_str += f'{k}\n'
|
||||
else:
|
||||
validators_str = mainnet_validators if main_net else altnet_validators
|
||||
with open(sub_dir + "/validators.txt", "w") as f:
|
||||
f.write(validators_str)
|
||||
|
||||
# add the rippled.cfg file
|
||||
with open(sub_dir + "/rippled.cfg", "w") as f:
|
||||
f.write(cfg_str)
|
||||
|
||||
if sidechain_bootstrap_stanza:
|
||||
# add the bootstrap file
|
||||
with open(sub_dir + "/sidechain_bootstrap.cfg", "w") as f:
|
||||
f.write(sidechain_bootstrap_stanza)
|
||||
|
||||
return sub_dir + "/rippled.cfg"
|
||||
|
||||
|
||||
def generate_multinode_net(out_dir: str,
|
||||
mainnet: Network,
|
||||
sidenet: SidechainNetwork,
|
||||
xchain_assets: Optional[Dict[str,
|
||||
XChainAsset]] = None):
|
||||
mainnet_cfgs = []
|
||||
for i in range(len(mainnet.ports)):
|
||||
validator_kp = mainnet.validator_keypairs[i]
|
||||
ports = mainnet.ports[i]
|
||||
mainchain_cfg_file = generate_cfg_dir(
|
||||
ports=ports,
|
||||
with_shards=False,
|
||||
main_net=True,
|
||||
cfg_type=f'mainchain_{i}',
|
||||
sidechain_stanza='',
|
||||
sidechain_bootstrap_stanza='',
|
||||
validation_seed=validator_kp.secret_key,
|
||||
data_dir=out_dir)
|
||||
mainnet_cfgs.append(mainchain_cfg_file)
|
||||
|
||||
for i in range(len(sidenet.ports)):
|
||||
validator_kp = sidenet.validator_keypairs[i]
|
||||
ports = sidenet.ports[i]
|
||||
|
||||
mainnet_i = i % len(mainnet.ports)
|
||||
sidechain_stanza, sidechain_bootstrap_stanza = generate_sidechain_stanza(
|
||||
mainnet.ports[mainnet_i], sidenet.main_account,
|
||||
sidenet.federator_keypairs,
|
||||
sidenet.federator_keypairs[i].secret_key, mainnet_cfgs[mainnet_i],
|
||||
xchain_assets)
|
||||
|
||||
generate_cfg_dir(
|
||||
ports=ports,
|
||||
with_shards=False,
|
||||
main_net=True,
|
||||
cfg_type=f'sidechain_{i}',
|
||||
sidechain_stanza=sidechain_stanza,
|
||||
sidechain_bootstrap_stanza=sidechain_bootstrap_stanza,
|
||||
validation_seed=validator_kp.secret_key,
|
||||
validators=[kp.public_key for kp in sidenet.validator_keypairs],
|
||||
fixed_ips=sidenet.ports,
|
||||
data_dir=out_dir,
|
||||
full_history=True,
|
||||
with_hooks=False)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=('Create config files for testing sidechains'))
|
||||
|
||||
parser.add_argument(
|
||||
'--exe',
|
||||
'-e',
|
||||
help=('path to rippled executable'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--usd',
|
||||
'-u',
|
||||
action='store_true',
|
||||
help=('include a USD/root IOU asset for cross chain transfers'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--cfgs_dir',
|
||||
'-c',
|
||||
help=
|
||||
('path to configuration file dir (where the output config files will be located)'
|
||||
),
|
||||
)
|
||||
|
||||
return parser.parse_known_args()[0]
|
||||
|
||||
|
||||
class Params:
|
||||
def __init__(self):
|
||||
args = parse_args()
|
||||
|
||||
self.exe = None
|
||||
if 'RIPPLED_MAINCHAIN_EXE' in os.environ:
|
||||
self.exe = os.environ['RIPPLED_MAINCHAIN_EXE']
|
||||
if args.exe:
|
||||
self.exe = args.exe
|
||||
|
||||
self.configs_dir = None
|
||||
if 'RIPPLED_SIDECHAIN_CFG_DIR' in os.environ:
|
||||
self.configs_dir = os.environ['RIPPLED_SIDECHAIN_CFG_DIR']
|
||||
if args.cfgs_dir:
|
||||
self.configs_dir = args.cfgs_dir
|
||||
|
||||
self.usd = False
|
||||
if args.usd:
|
||||
self.usd = args.usd
|
||||
|
||||
def check_error(self) -> str:
|
||||
'''
|
||||
Check for errors. Return `None` if no errors,
|
||||
otherwise return a string describing the error
|
||||
'''
|
||||
if not self.exe:
|
||||
return 'Missing exe location. Either set the env variable RIPPLED_MAINCHAIN_EXE or use the --exe_mainchain command line switch'
|
||||
if not self.configs_dir:
|
||||
return 'Missing configs directory location. Either set the env variable RIPPLED_SIDECHAIN_CFG_DIR or use the --cfgs_dir command line switch'
|
||||
|
||||
|
||||
def main(params: Params,
|
||||
xchain_assets: Optional[Dict[str, XChainAsset]] = None):
|
||||
|
||||
if err_str := params.check_error():
|
||||
eprint(err_str)
|
||||
sys.exit(1)
|
||||
index = 0
|
||||
nonvalidator_cfg_file_name = generate_cfg_dir(
|
||||
ports=Ports(index),
|
||||
with_shards=False,
|
||||
main_net=True,
|
||||
cfg_type='non_validator',
|
||||
sidechain_stanza='',
|
||||
sidechain_bootstrap_stanza='',
|
||||
validation_seed=None,
|
||||
data_dir=params.configs_dir)
|
||||
index = index + 1
|
||||
|
||||
nonvalidator_config = ConfigFile(file_name=nonvalidator_cfg_file_name)
|
||||
with single_client_app(exe=params.exe,
|
||||
config=nonvalidator_config,
|
||||
standalone=True) as rip:
|
||||
mainnet = Network(num_nodes=1,
|
||||
num_validators=1,
|
||||
start_cfg_index=index,
|
||||
rip=rip)
|
||||
sidenet = SidechainNetwork(num_nodes=5,
|
||||
num_federators=5,
|
||||
num_validators=5,
|
||||
start_cfg_index=index + 1,
|
||||
rip=rip)
|
||||
generate_multinode_net(
|
||||
out_dir=f'{params.configs_dir}/sidechain_testnet',
|
||||
mainnet=mainnet,
|
||||
sidenet=sidenet,
|
||||
xchain_assets=xchain_assets)
|
||||
index = index + 2
|
||||
|
||||
(Path(params.configs_dir) / 'logs').mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for with_shards in [True, False]:
|
||||
for is_main_net in [True, False]:
|
||||
for cfg_type in ['dog', 'test', 'one', 'two']:
|
||||
if not is_main_net and cfg_type not in ['dog', 'test']:
|
||||
continue
|
||||
|
||||
mainnet = Network(num_nodes=1,
|
||||
num_validators=1,
|
||||
start_cfg_index=index,
|
||||
rip=rip)
|
||||
mainchain_cfg_file = generate_cfg_dir(
|
||||
data_dir=params.configs_dir,
|
||||
ports=mainnet.ports[0],
|
||||
with_shards=with_shards,
|
||||
main_net=is_main_net,
|
||||
cfg_type=cfg_type,
|
||||
sidechain_stanza='',
|
||||
sidechain_bootstrap_stanza='',
|
||||
validation_seed=mainnet.validator_keypairs[0].
|
||||
secret_key)
|
||||
|
||||
sidenet = SidechainNetwork(num_nodes=1,
|
||||
num_federators=5,
|
||||
num_validators=1,
|
||||
start_cfg_index=index + 1,
|
||||
rip=rip)
|
||||
signing_key = sidenet.federator_keypairs[0].secret_key
|
||||
|
||||
sidechain_stanza, sizechain_bootstrap_stanza = generate_sidechain_stanza(
|
||||
mainnet.ports[0], sidenet.main_account,
|
||||
sidenet.federator_keypairs, signing_key,
|
||||
mainchain_cfg_file, xchain_assets)
|
||||
|
||||
generate_cfg_dir(
|
||||
data_dir=params.configs_dir,
|
||||
ports=sidenet.ports[0],
|
||||
with_shards=with_shards,
|
||||
main_net=is_main_net,
|
||||
cfg_type=cfg_type,
|
||||
sidechain_stanza=sidechain_stanza,
|
||||
sidechain_bootstrap_stanza=sizechain_bootstrap_stanza,
|
||||
validation_seed=sidenet.validator_keypairs[0].
|
||||
secret_key)
|
||||
index = index + 2
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
params = Params()
|
||||
|
||||
xchain_assets = None
|
||||
if params.usd:
|
||||
xchain_assets = {}
|
||||
xchain_assets['xrp_xrp_sidechain_asset'] = XChainAsset(
|
||||
XRP(0), XRP(0), 1, 1, 200, 200)
|
||||
root_account = Account(account_id="rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh")
|
||||
main_iou_asset = Asset(value=0, currency='USD', issuer=root_account)
|
||||
side_iou_asset = Asset(value=0, currency='USD', issuer=root_account)
|
||||
xchain_assets['iou_iou_sidechain_asset'] = XChainAsset(
|
||||
main_iou_asset, side_iou_asset, 1, 1, 0.02, 0.02)
|
||||
|
||||
main(params, xchain_assets)
|
||||
1496
bin/sidechain/python/interactive.py
Normal file
1496
bin/sidechain/python/interactive.py
Normal file
File diff suppressed because it is too large
Load Diff
198
bin/sidechain/python/log_analyzer.py
Executable file
198
bin/sidechain/python/log_analyzer.py
Executable file
@@ -0,0 +1,198 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from common import eprint
|
||||
from typing import IO, Optional
|
||||
|
||||
|
||||
class LogLine:
|
||||
UNSTRUCTURED_RE = re.compile(r'''(?x)
|
||||
# The x flag enables insignificant whitespace mode (allowing comments)
|
||||
^(?P<timestamp>.*UTC)
|
||||
[\ ]
|
||||
(?P<module>[^:]*):(?P<level>[^\ ]*)
|
||||
[\ ]
|
||||
(?P<msg>.*$)
|
||||
''')
|
||||
|
||||
STRUCTURED_RE = re.compile(r'''(?x)
|
||||
# The x flag enables insignificant whitespace mode (allowing comments)
|
||||
^(?P<timestamp>.*UTC)
|
||||
[\ ]
|
||||
(?P<module>[^:]*):(?P<level>[^\ ]*)
|
||||
[\ ]
|
||||
(?P<msg>[^{]*)
|
||||
[\ ]
|
||||
(?P<json_data>.*$)
|
||||
''')
|
||||
|
||||
def __init__(self, line: str):
|
||||
self.raw_line = line
|
||||
self.json_data = None
|
||||
|
||||
try:
|
||||
if line.endswith('}'):
|
||||
m = self.STRUCTURED_RE.match(line)
|
||||
try:
|
||||
self.json_data = json.loads(m.group('json_data'))
|
||||
except:
|
||||
m = self.UNSTRUCTURED_RE.match(line)
|
||||
else:
|
||||
m = self.UNSTRUCTURED_RE.match(line)
|
||||
|
||||
self.timestamp = m.group('timestamp')
|
||||
self.level = m.group('level')
|
||||
self.module = m.group('module')
|
||||
self.msg = m.group('msg')
|
||||
except Exception as e:
|
||||
eprint(f'init exception: {e} line: {line}')
|
||||
|
||||
def to_mixed_json_str(self) -> str:
|
||||
'''
|
||||
return a pretty printed string as mixed json
|
||||
'''
|
||||
try:
|
||||
r = f'{self.timestamp} {self.module}:{self.level} {self.msg}'
|
||||
if self.json_data:
|
||||
r += '\n' + json.dumps(self.json_data, indent=1)
|
||||
return r
|
||||
except:
|
||||
eprint(f'Using raw line: {self.raw_line}')
|
||||
return self.raw_line
|
||||
|
||||
def to_pure_json(self) -> dict:
|
||||
'''
|
||||
return a json dict
|
||||
'''
|
||||
dict = {}
|
||||
dict['t'] = self.timestamp
|
||||
dict['m'] = self.module
|
||||
dict['l'] = self.level
|
||||
dict['msg'] = self.msg
|
||||
if self.json_data:
|
||||
dict['data'] = self.json_data
|
||||
return dict
|
||||
|
||||
def to_pure_json_str(self, f_id: Optional[str] = None) -> str:
|
||||
'''
|
||||
return a pretty printed string as pure json
|
||||
'''
|
||||
try:
|
||||
dict = self.to_pure_json(f_id)
|
||||
return json.dumps(dict, indent=1)
|
||||
except:
|
||||
return self.raw_line
|
||||
|
||||
|
||||
def convert_log(in_file_name: str,
|
||||
out: str,
|
||||
*,
|
||||
as_list=False,
|
||||
pure_json=False,
|
||||
module: Optional[str] = 'SidechainFederator') -> list:
|
||||
result = []
|
||||
try:
|
||||
prev_lines = None
|
||||
with open(in_file_name) as input:
|
||||
for l in input:
|
||||
l = l.strip()
|
||||
if not l:
|
||||
continue
|
||||
if LogLine.UNSTRUCTURED_RE.match(l):
|
||||
if prev_lines:
|
||||
log_line = LogLine(prev_lines)
|
||||
if not module or log_line.module == module:
|
||||
if as_list:
|
||||
result.append(log_line.to_pure_json())
|
||||
else:
|
||||
if pure_json:
|
||||
print(log_line.to_pure_json_str(),
|
||||
file=out)
|
||||
else:
|
||||
print(log_line.to_mixed_json_str(),
|
||||
file=out)
|
||||
prev_lines = l
|
||||
else:
|
||||
if not prev_lines:
|
||||
eprint(f'Error: Expected prev_lines. Cur line: {l}')
|
||||
assert prev_lines
|
||||
prev_lines += f' {l}'
|
||||
if prev_lines:
|
||||
log_line = LogLine(prev_lines)
|
||||
if not module or log_line.module == module:
|
||||
if as_list:
|
||||
result.append(log_line.to_pure_json())
|
||||
else:
|
||||
if pure_json:
|
||||
print(log_line.to_pure_json_str(f_id),
|
||||
file=out,
|
||||
flush=True)
|
||||
else:
|
||||
print(log_line.to_mixed_json_str(),
|
||||
file=out,
|
||||
flush=True)
|
||||
except Exception as e:
|
||||
eprint(f'Excption: {e}')
|
||||
raise e
|
||||
return result
|
||||
|
||||
|
||||
def convert_all(in_dir_name: str, out: IO, *, pure_json=False):
|
||||
'''
|
||||
Convert all the "debug.log" log files in one directory level below the in_dir_name into a single json file.
|
||||
There will be a field called 'f' for the director name that the origional log file came from.
|
||||
This is useful when analyzing networks that run on the local machine.
|
||||
'''
|
||||
if not os.path.isdir(in_dir_name):
|
||||
print(f'Error: {in_dir_name} is not a directory')
|
||||
files = []
|
||||
f_ids = []
|
||||
for subdir in os.listdir(in_dir_name):
|
||||
file = f'{in_dir_name}/{subdir}/debug.log'
|
||||
if not os.path.isfile(file):
|
||||
continue
|
||||
files.append(file)
|
||||
f_ids.append(subdir)
|
||||
|
||||
result = {}
|
||||
for f, f_id in zip(files, f_ids):
|
||||
l = convert_log(f, out, as_list=True, pure_json=pure_json, module=None)
|
||||
result[f_id] = l
|
||||
print(json.dumps(result, indent=1), file=out, flush=True)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=('python script to convert log files to json'))
|
||||
|
||||
parser.add_argument(
|
||||
'--input',
|
||||
'-i',
|
||||
help=('input log file or sidechain config directory structure'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
'-o',
|
||||
help=('output log file'),
|
||||
)
|
||||
|
||||
return parser.parse_known_args()[0]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
args = parse_args()
|
||||
with open(args.output, "w") as out:
|
||||
if os.path.isdir(args.input):
|
||||
convert_all(args.input, out, pure_json=True)
|
||||
else:
|
||||
convert_log(args.input, out, pure_json=True)
|
||||
except Exception as e:
|
||||
eprint(f'Excption: {e}')
|
||||
raise e
|
||||
285
bin/sidechain/python/log_report.py
Executable file
285
bin/sidechain/python/log_report.py
Executable file
@@ -0,0 +1,285 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
from collections import defaultdict
|
||||
import datetime
|
||||
import json
|
||||
import numpy as np
|
||||
import os
|
||||
import pandas as pd
|
||||
import string
|
||||
import sys
|
||||
from typing import Dict, Set
|
||||
|
||||
from common import eprint
|
||||
import log_analyzer
|
||||
|
||||
|
||||
def _has_256bit_hex_field_other(data, result: Set[str]):
|
||||
return
|
||||
|
||||
|
||||
_has_256bit_hex_field_overloads = defaultdict(
|
||||
lambda: _has_256bit_hex_field_other)
|
||||
|
||||
|
||||
def _has_256bit_hex_field_str(data: str, result: Set[str]):
|
||||
if len(data) != 64:
|
||||
return
|
||||
for c in data:
|
||||
o = ord(c.upper())
|
||||
if ord('A') <= o <= ord('F'):
|
||||
continue
|
||||
if ord('0') <= o <= ord('9'):
|
||||
continue
|
||||
return
|
||||
result.add(data)
|
||||
|
||||
|
||||
_has_256bit_hex_field_overloads[str] = _has_256bit_hex_field_str
|
||||
|
||||
|
||||
def _has_256bit_hex_field_dict(data: dict, result: Set[str]):
|
||||
for k, v in data.items():
|
||||
if k in [
|
||||
"meta", "index", "LedgerIndex", "ledger_index", "ledger_hash",
|
||||
"SigningPubKey", "suppression"
|
||||
]:
|
||||
continue
|
||||
_has_256bit_hex_field_overloads[type(v)](v, result)
|
||||
|
||||
|
||||
_has_256bit_hex_field_overloads[dict] = _has_256bit_hex_field_dict
|
||||
|
||||
|
||||
def _has_256bit_hex_field_list(data: list, result: Set[str]):
|
||||
for v in data:
|
||||
_has_256bit_hex_field_overloads[type(v)](v, result)
|
||||
|
||||
|
||||
_has_256bit_hex_field_overloads[list] = _has_256bit_hex_field_list
|
||||
|
||||
|
||||
def has_256bit_hex_field(data: dict) -> Set[str]:
|
||||
'''
|
||||
Find all the fields that are strings 64 chars long with only hex digits
|
||||
This is useful when grouping transactions by hex
|
||||
'''
|
||||
result = set()
|
||||
_has_256bit_hex_field_dict(data, result)
|
||||
return result
|
||||
|
||||
|
||||
def group_by_txn(data: dict) -> dict:
|
||||
'''
|
||||
return a dictionary where the key is the transaction hash, the value is another dictionary.
|
||||
The second dictionary the key is the server id, and the values are a list of log items
|
||||
'''
|
||||
def _make_default():
|
||||
return defaultdict(lambda: list())
|
||||
|
||||
result = defaultdict(_make_default)
|
||||
for server_id, log_list in data.items():
|
||||
for log_item in log_list:
|
||||
if txn_hashes := has_256bit_hex_field(log_item):
|
||||
for h in txn_hashes:
|
||||
result[h][server_id].append(log_item)
|
||||
return result
|
||||
|
||||
|
||||
def _rekey_dict_by_txn_date(hash_to_timestamp: dict,
|
||||
grouped_by_txn: dict) -> dict:
|
||||
'''
|
||||
hash_to_timestamp is a dictionary with a key of the txn hash and a value of the timestamp.
|
||||
grouped_by_txn is a dictionary with a key of the txn and an unspecified value.
|
||||
the keys in hash_to_timestamp are a superset of the keys in grouped_by_txn
|
||||
This function returns a new grouped_by_txn dictionary with the transactions sorted by date.
|
||||
'''
|
||||
known_txns = [
|
||||
k for k, v in sorted(hash_to_timestamp.items(), key=lambda x: x[1])
|
||||
]
|
||||
result = {}
|
||||
for k, v in grouped_by_txn.items():
|
||||
if k not in known_txns:
|
||||
result[k] = v
|
||||
for h in known_txns:
|
||||
result[h] = grouped_by_txn[h]
|
||||
return result
|
||||
|
||||
|
||||
def _to_timestamp(str_time: str) -> datetime.datetime:
|
||||
return datetime.datetime.strptime(
|
||||
str_time.split('.')[0], "%Y-%b-%d %H:%M:%S")
|
||||
|
||||
|
||||
class Report:
|
||||
def __init__(self, in_dir, out_dir):
|
||||
self.in_dir = in_dir
|
||||
self.out_dir = out_dir
|
||||
|
||||
self.combined_logs_file_name = f'{self.out_dir}/combined_logs.json'
|
||||
self.grouped_by_txn_file_name = f'{self.out_dir}/grouped_by_txn.json'
|
||||
self.counts_by_txn_and_server_file_name = f'{self.out_dir}/counts_by_txn_and_server.org'
|
||||
self.data = None # combined logs
|
||||
|
||||
# grouped_by_txn is a dictionary where the key is the server id. mainchain servers
|
||||
# have a key of `mainchain_#` and sidechain servers have a key of
|
||||
# `sidechain_#`, where `#` is a number.
|
||||
self.grouped_by_txn = None
|
||||
|
||||
if not os.path.isdir(in_dir):
|
||||
eprint(f'The input {self.in_dir} must be an existing directory')
|
||||
sys.exit(1)
|
||||
|
||||
if os.path.exists(self.out_dir):
|
||||
if not os.path.isdir(self.out_dir):
|
||||
eprint(
|
||||
f'The output: {self.out_dir} exists and is not a directory'
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
os.makedirs(self.out_dir)
|
||||
|
||||
self.combine_logs()
|
||||
with open(self.combined_logs_file_name) as f:
|
||||
self.data = json.load(f)
|
||||
self.grouped_by_txn = group_by_txn(self.data)
|
||||
|
||||
# counts_by_txn_and_server is a dictionary where the key is the txn_hash
|
||||
# and the value is a pandas df with a row for every server and a column for every message
|
||||
# the value is a count of how many times that message appears for that server.
|
||||
counts_by_txn_and_server = {}
|
||||
# dict where the key is a transaction hash and the value is the transaction
|
||||
hash_to_txn = {}
|
||||
# dict where the key is a transaction hash and the value is earliest timestamp in a log file
|
||||
hash_to_timestamp = {}
|
||||
for txn_hash, server_dict in self.grouped_by_txn.items():
|
||||
message_set = set()
|
||||
# message list is ordered by when it appears in the log
|
||||
message_list = []
|
||||
for server_id, messages in server_dict.items():
|
||||
for m in messages:
|
||||
try:
|
||||
d = m['data']
|
||||
if 'msg' in d and 'transaction' in d['msg']:
|
||||
t = d['msg']['transaction']
|
||||
elif 'tx_json' in d:
|
||||
t = d['tx_json']
|
||||
if t['hash'] == txn_hash:
|
||||
hash_to_txn[txn_hash] = t
|
||||
except:
|
||||
pass
|
||||
msg = m['msg']
|
||||
t = _to_timestamp(m['t'])
|
||||
if txn_hash not in hash_to_timestamp:
|
||||
hash_to_timestamp[txn_hash] = t
|
||||
elif hash_to_timestamp[txn_hash] > t:
|
||||
hash_to_timestamp[txn_hash] = t
|
||||
if msg not in message_set:
|
||||
message_set.add(msg)
|
||||
message_list.append(msg)
|
||||
df = pd.DataFrame(0,
|
||||
index=server_dict.keys(),
|
||||
columns=message_list)
|
||||
for server_id, messages in server_dict.items():
|
||||
for m in messages:
|
||||
df[m['msg']][server_id] += 1
|
||||
counts_by_txn_and_server[txn_hash] = df
|
||||
|
||||
# sort the transactions by timestamp, but the txns with unknown timestamp at the beginning
|
||||
self.grouped_by_txn = _rekey_dict_by_txn_date(hash_to_timestamp,
|
||||
self.grouped_by_txn)
|
||||
counts_by_txn_and_server = _rekey_dict_by_txn_date(
|
||||
hash_to_timestamp, counts_by_txn_and_server)
|
||||
|
||||
with open(self.grouped_by_txn_file_name, 'w') as out:
|
||||
print(json.dumps(self.grouped_by_txn, indent=1), file=out)
|
||||
|
||||
with open(self.counts_by_txn_and_server_file_name, 'w') as out:
|
||||
for txn_hash, df in counts_by_txn_and_server.items():
|
||||
print(f'\n\n* Txn: {txn_hash}', file=out)
|
||||
if txn_hash in hash_to_txn:
|
||||
print(json.dumps(hash_to_txn[txn_hash], indent=1),
|
||||
file=out)
|
||||
rename_dict = {}
|
||||
for column, renamed_column in zip(df.columns.array,
|
||||
string.ascii_uppercase):
|
||||
print(f'{renamed_column} = {column}', file=out)
|
||||
rename_dict[column] = renamed_column
|
||||
df.rename(columns=rename_dict, inplace=True)
|
||||
print(f'\n{df}', file=out)
|
||||
|
||||
def combine_logs(self):
|
||||
try:
|
||||
with open(self.combined_logs_file_name, "w") as out:
|
||||
log_analyzer.convert_all(args.input, out, pure_json=True)
|
||||
except Exception as e:
|
||||
eprint(f'Excption: {e}')
|
||||
raise e
|
||||
|
||||
|
||||
def main(input_dir_name: str, output_dir_name: str):
|
||||
r = Report(input_dir_name, output_dir_name)
|
||||
|
||||
# Values are a list of log lines formatted as json. There are five fields:
|
||||
# `t` is the timestamp.
|
||||
# `m` is the module.
|
||||
# `l` is the log level.
|
||||
# `msg` is the message.
|
||||
# `data` is the data.
|
||||
# For example:
|
||||
#
|
||||
# {
|
||||
# "t": "2021-Oct-08 21:33:41.731371562 UTC",
|
||||
# "m": "SidechainFederator",
|
||||
# "l": "TRC",
|
||||
# "msg": "no last xchain txn with result",
|
||||
# "data": {
|
||||
# "needsOtherChainLastXChainTxn": true,
|
||||
# "isMainchain": false,
|
||||
# "jlogId": 121
|
||||
# }
|
||||
# },
|
||||
|
||||
|
||||
# Lifecycle of a transaction
|
||||
# For each federator record:
|
||||
# Transaction detected: amount, seq, destination, chain, hash
|
||||
# Signature received: hash, seq
|
||||
# Signature sent: hash, seq, federator dst
|
||||
# Transaction submitted
|
||||
# Result received, and detect if error
|
||||
# Detect any field that doesn't match
|
||||
|
||||
# Lifecycle of initialization
|
||||
|
||||
# Chain listener messages
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=(
|
||||
'python script to generate a log report from a sidechain config directory structure containing the logs'
|
||||
))
|
||||
|
||||
parser.add_argument(
|
||||
'--input',
|
||||
'-i',
|
||||
help=('directory with sidechain config directory structure'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
'-o',
|
||||
help=('output directory for report files'),
|
||||
)
|
||||
|
||||
return parser.parse_known_args()[0]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
args = parse_args()
|
||||
main(args.input, args.output)
|
||||
except Exception as e:
|
||||
eprint(f'Excption: {e}')
|
||||
raise e
|
||||
14
bin/sidechain/python/requirements.txt
Normal file
14
bin/sidechain/python/requirements.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
attrs==21.2.0
|
||||
iniconfig==1.1.1
|
||||
numpy==1.21.2
|
||||
packaging==21.0
|
||||
pandas==1.3.3
|
||||
pluggy==1.0.0
|
||||
py==1.10.0
|
||||
pyparsing==2.4.7
|
||||
pytest==6.2.5
|
||||
python-dateutil==2.8.2
|
||||
pytz==2021.1
|
||||
six==1.16.0
|
||||
toml==0.10.2
|
||||
websockets==8.1
|
||||
35
bin/sidechain/python/riplrepl.py
Executable file
35
bin/sidechain/python/riplrepl.py
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python3
|
||||
'''
|
||||
Script to run an interactive shell to test sidechains.
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from common import disable_eprint, eprint
|
||||
import interactive
|
||||
import sidechain
|
||||
|
||||
|
||||
def main():
|
||||
params = sidechain.Params()
|
||||
params.interactive = True
|
||||
|
||||
interactive.set_hooks_dir(params.hooks_dir)
|
||||
|
||||
if err_str := params.check_error():
|
||||
eprint(err_str)
|
||||
sys.exit(1)
|
||||
|
||||
if params.verbose:
|
||||
print("eprint enabled")
|
||||
else:
|
||||
disable_eprint()
|
||||
|
||||
if params.standalone:
|
||||
sidechain.standalone_interactive_repl(params)
|
||||
else:
|
||||
sidechain.multinode_interactive_repl(params)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
193
bin/sidechain/python/ripple_client.py
Normal file
193
bin/sidechain/python/ripple_client.py
Normal file
@@ -0,0 +1,193 @@
|
||||
import asyncio
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
from os.path import expanduser
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Callable, List, Optional, Union
|
||||
import time
|
||||
import websockets
|
||||
|
||||
from command import Command, ServerInfo, SubscriptionCommand
|
||||
from common import eprint
|
||||
from config_file import ConfigFile
|
||||
|
||||
|
||||
class RippleClient:
|
||||
'''Client to send commands to the rippled server'''
|
||||
def __init__(self,
|
||||
*,
|
||||
config: ConfigFile,
|
||||
exe: str,
|
||||
command_log: Optional[str] = None):
|
||||
self.config = config
|
||||
self.exe = exe
|
||||
self.command_log = command_log
|
||||
section = config.port_ws_admin_local
|
||||
self.websocket_uri = f'{section.protocol}://{section.ip}:{section.port}'
|
||||
self.subscription_websockets = []
|
||||
self.tasks = []
|
||||
self.pid = None
|
||||
if command_log:
|
||||
with open(self.command_log, 'w') as f:
|
||||
f.write(f'# Start \n')
|
||||
|
||||
@property
|
||||
def config_file_name(self):
|
||||
return self.config.get_file_name()
|
||||
|
||||
def shutdown(self):
|
||||
try:
|
||||
group = asyncio.gather(*self.tasks, return_exceptions=True)
|
||||
group.cancel()
|
||||
asyncio.get_event_loop().run_until_complete(group)
|
||||
for ws in self.subscription_websockets:
|
||||
asyncio.get_event_loop().run_until_complete(ws.close())
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
def set_pid(self, pid: int):
|
||||
self.pid = pid
|
||||
|
||||
def get_pid(self) -> Optional[int]:
|
||||
return self.pid
|
||||
|
||||
def get_config(self) -> ConfigFile:
|
||||
return self.config
|
||||
|
||||
# Get a dict of the server_state, validated_ledger_seq, and complete_ledgers
|
||||
def get_brief_server_info(self) -> dict:
|
||||
ret = {
|
||||
'server_state': 'NA',
|
||||
'ledger_seq': 'NA',
|
||||
'complete_ledgers': 'NA'
|
||||
}
|
||||
if not self.pid or self.pid == -1:
|
||||
return ret
|
||||
r = self.send_command(ServerInfo())
|
||||
if 'info' not in r:
|
||||
return ret
|
||||
r = r['info']
|
||||
for f in ['server_state', 'complete_ledgers']:
|
||||
if f in r:
|
||||
ret[f] = r[f]
|
||||
if 'validated_ledger' in r:
|
||||
ret['ledger_seq'] = r['validated_ledger']['seq']
|
||||
return ret
|
||||
|
||||
def _write_command_log_command(self, cmd: str, cmd_index: int) -> None:
|
||||
if not self.command_log:
|
||||
return
|
||||
with open(self.command_log, 'a') as f:
|
||||
f.write(f'\n\n# command {cmd_index}\n')
|
||||
f.write(f'{cmd}')
|
||||
|
||||
def _write_command_log_result(self, result: str, cmd_index: int) -> None:
|
||||
if not self.command_log:
|
||||
return
|
||||
with open(self.command_log, 'a') as f:
|
||||
f.write(f'\n\n# result {cmd_index}\n')
|
||||
f.write(f'{result}')
|
||||
|
||||
def _send_command_line_command(self, cmd_id: int, *args) -> dict:
|
||||
'''Send the command to the rippled server using the command line interface'''
|
||||
to_run = [self.exe, '-q', '--conf', self.config_file_name, '--']
|
||||
to_run.extend(args)
|
||||
self._write_command_log_command(to_run, cmd_id)
|
||||
max_retries = 4
|
||||
for retry_count in range(0, max_retries + 1):
|
||||
try:
|
||||
r = subprocess.check_output(to_run)
|
||||
self._write_command_log_result(r, cmd_id)
|
||||
return json.loads(r.decode('utf-8'))['result']
|
||||
except Exception as e:
|
||||
if retry_count == max_retries:
|
||||
raise
|
||||
eprint(
|
||||
f'Got exception: {str(e)}\nretrying..{retry_count+1} of {max_retries}'
|
||||
)
|
||||
time.sleep(1) # give process time to startup
|
||||
|
||||
async def _send_websock_command(
|
||||
self,
|
||||
cmd: Command,
|
||||
conn: Optional[websockets.client.Connect] = None) -> dict:
|
||||
assert self.websocket_uri
|
||||
if conn is None:
|
||||
async with websockets.connect(self.websocket_uri) as ws:
|
||||
return await self._send_websock_command(cmd, ws)
|
||||
|
||||
to_send = json.dumps(cmd.get_websocket_dict())
|
||||
self._write_command_log_command(to_send, cmd.cmd_id)
|
||||
await conn.send(to_send)
|
||||
r = await conn.recv()
|
||||
self._write_command_log_result(r, cmd.cmd_id)
|
||||
j = json.loads(r)
|
||||
if not 'result' in j:
|
||||
eprint(
|
||||
f'Error sending websocket command: {json.dumps(cmd.get_websocket_dict(), indent=1)}'
|
||||
)
|
||||
eprint(f'Result: {json.dumps(j, indent=1)}')
|
||||
raise ValueError('Error sending websocket command')
|
||||
return j['result']
|
||||
|
||||
def send_command(self, cmd: Command) -> dict:
|
||||
'''Send the command to the rippled server'''
|
||||
if self.websocket_uri:
|
||||
return asyncio.get_event_loop().run_until_complete(
|
||||
self._send_websock_command(cmd))
|
||||
return self._send_command_line_command(cmd.cmd_id,
|
||||
*cmd.get_command_line_list())
|
||||
|
||||
# Need async version to close ledgers from async functions
|
||||
async def async_send_command(self, cmd: Command) -> dict:
|
||||
'''Send the command to the rippled server'''
|
||||
if self.websocket_uri:
|
||||
return await self._send_websock_command(cmd)
|
||||
return self._send_command_line_command(cmd.cmd_id,
|
||||
*cmd.get_command_line_list())
|
||||
|
||||
def send_subscribe_command(
|
||||
self,
|
||||
cmd: SubscriptionCommand,
|
||||
callback: Optional[Callable[[dict], None]] = None) -> dict:
|
||||
'''Send the command to the rippled server'''
|
||||
assert self.websocket_uri
|
||||
ws = cmd.websocket
|
||||
if ws is None:
|
||||
# subscribe
|
||||
assert callback
|
||||
ws = asyncio.get_event_loop().run_until_complete(
|
||||
websockets.connect(self.websocket_uri))
|
||||
self.subscription_websockets.append(ws)
|
||||
result = asyncio.get_event_loop().run_until_complete(
|
||||
self._send_websock_command(cmd, ws))
|
||||
if cmd.websocket is not None:
|
||||
# unsubscribed. close the websocket
|
||||
self.subscription_websockets.remove(cmd.websocket)
|
||||
cmd.websocket.close()
|
||||
cmd.websocket = None
|
||||
else:
|
||||
# setup a task to read the websocket
|
||||
cmd.websocket = ws # must be set after the _send_websock_command or will unsubscribe
|
||||
|
||||
async def subscribe_callback(ws: websockets.client.Connect,
|
||||
cb: Callable[[dict], None]):
|
||||
while True:
|
||||
r = await ws.recv()
|
||||
d = json.loads(r)
|
||||
cb(d)
|
||||
|
||||
task = asyncio.get_event_loop().create_task(
|
||||
subscribe_callback(cmd.websocket, callback))
|
||||
self.tasks.append(task)
|
||||
return result
|
||||
|
||||
def stop(self):
|
||||
'''Stop the server'''
|
||||
return self.send_command(Stop())
|
||||
|
||||
def set_log_level(self, severity: str, *, partition: Optional[str] = None):
|
||||
'''Set the server log level'''
|
||||
return self.send_command(LogLevel(severity, parition=parition))
|
||||
583
bin/sidechain/python/sidechain.py
Executable file
583
bin/sidechain/python/sidechain.py
Executable file
@@ -0,0 +1,583 @@
|
||||
#!/usr/bin/env python3
|
||||
'''
|
||||
Script to test and debug sidechains.
|
||||
|
||||
The mainchain exe location can be set through the command line or
|
||||
the environment variable RIPPLED_MAINCHAIN_EXE
|
||||
|
||||
The sidechain exe location can be set through the command line or
|
||||
the environment variable RIPPLED_SIDECHAIN_EXE
|
||||
|
||||
The configs_dir (generated with create_config_files.py) can be set through the command line
|
||||
or the environment variable RIPPLED_SIDECHAIN_CFG_DIR
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from multiprocessing import Process, Value
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from typing import Callable, Dict, List, Optional
|
||||
|
||||
from app import App, single_client_app, testnet_app, configs_for_testnet
|
||||
from command import AccountInfo, AccountTx, LedgerAccept, LogLevel, Subscribe
|
||||
from common import Account, Asset, eprint, disable_eprint, XRP
|
||||
from config_file import ConfigFile
|
||||
import interactive
|
||||
from log_analyzer import convert_log
|
||||
from test_utils import mc_wait_for_payment_detect, sc_wait_for_payment_detect, mc_connect_subscription, sc_connect_subscription
|
||||
from transaction import AccountSet, Payment, SignerListSet, SetRegularKey, Ticket, Trust
|
||||
|
||||
|
||||
def parse_args_helper(parser: argparse.ArgumentParser):
|
||||
|
||||
parser.add_argument(
|
||||
'--debug_sidechain',
|
||||
'-ds',
|
||||
action='store_true',
|
||||
help=('Mode to debug sidechain (prompt to run sidechain in gdb)'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--debug_mainchain',
|
||||
'-dm',
|
||||
action='store_true',
|
||||
help=('Mode to debug mainchain (prompt to run sidechain in gdb)'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--exe_mainchain',
|
||||
'-em',
|
||||
help=('path to mainchain rippled executable'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--exe_sidechain',
|
||||
'-es',
|
||||
help=('path to mainchain rippled executable'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--cfgs_dir',
|
||||
'-c',
|
||||
help=
|
||||
('path to configuration file dir (generated with create_config_files.py)'
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--standalone',
|
||||
'-a',
|
||||
action='store_true',
|
||||
help=('run standalone tests'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--interactive',
|
||||
'-i',
|
||||
action='store_true',
|
||||
help=('run interactive repl'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--quiet',
|
||||
'-q',
|
||||
action='store_true',
|
||||
help=('Disable printing errors (eprint disabled)'),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--verbose',
|
||||
'-v',
|
||||
action='store_true',
|
||||
help=('Enable printing errors (eprint enabled)'),
|
||||
)
|
||||
|
||||
# Pauses are use for attaching debuggers and looking at logs are know checkpoints
|
||||
parser.add_argument(
|
||||
'--with_pauses',
|
||||
'-p',
|
||||
action='store_true',
|
||||
help=
|
||||
('Add pauses at certain checkpoints in tests until "enter" key is hit'
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--hooks_dir',
|
||||
help=('path to hooks dir'),
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description=('Test and debug sidechains'))
|
||||
parse_args_helper(parser)
|
||||
return parser.parse_known_args()[0]
|
||||
|
||||
|
||||
class Params:
|
||||
def __init__(self, *, configs_dir: Optional[str] = None):
|
||||
args = parse_args()
|
||||
|
||||
self.debug_sidechain = False
|
||||
if args.debug_sidechain:
|
||||
self.debug_sidechain = args.debug_sidechain
|
||||
self.debug_mainchain = False
|
||||
if args.debug_mainchain:
|
||||
self.debug_mainchain = arts.debug_mainchain
|
||||
|
||||
# Undocumented feature: if the environment variable RIPPLED_SIDECHAIN_RR is set, it is
|
||||
# assumed to point to the rr executable. Sidechain server 0 will then be run under rr.
|
||||
self.sidechain_rr = None
|
||||
if 'RIPPLED_SIDECHAIN_RR' in os.environ:
|
||||
self.sidechain_rr = os.environ['RIPPLED_SIDECHAIN_RR']
|
||||
|
||||
self.standalone = args.standalone
|
||||
self.with_pauses = args.with_pauses
|
||||
self.interactive = args.interactive
|
||||
self.quiet = args.quiet
|
||||
self.verbose = args.verbose
|
||||
|
||||
self.mainchain_exe = None
|
||||
if 'RIPPLED_MAINCHAIN_EXE' in os.environ:
|
||||
self.mainchain_exe = os.environ['RIPPLED_MAINCHAIN_EXE']
|
||||
if args.exe_mainchain:
|
||||
self.mainchain_exe = args.exe_mainchain
|
||||
|
||||
self.sidechain_exe = None
|
||||
if 'RIPPLED_SIDECHAIN_EXE' in os.environ:
|
||||
self.sidechain_exe = os.environ['RIPPLED_SIDECHAIN_EXE']
|
||||
if args.exe_sidechain:
|
||||
self.sidechain_exe = args.exe_sidechain
|
||||
|
||||
self.configs_dir = None
|
||||
if 'RIPPLED_SIDECHAIN_CFG_DIR' in os.environ:
|
||||
self.configs_dir = os.environ['RIPPLED_SIDECHAIN_CFG_DIR']
|
||||
if args.cfgs_dir:
|
||||
self.configs_dir = args.cfgs_dir
|
||||
if configs_dir is not None:
|
||||
self.configs_dir = configs_dir
|
||||
|
||||
self.hooks_dir = None
|
||||
if 'RIPPLED_SIDECHAIN_HOOKS_DIR' in os.environ:
|
||||
self.hooks_dir = os.environ['RIPPLED_SIDECHAIN_HOOKS_DIR']
|
||||
if args.hooks_dir:
|
||||
self.hooks_dir = args.hooks_dir
|
||||
|
||||
if not self.configs_dir:
|
||||
self.mainchain_config = None
|
||||
self.sidechain_config = None
|
||||
self.sidechain_bootstrap_config = None
|
||||
self.genesis_account = None
|
||||
self.mc_door_account = None
|
||||
self.user_account = None
|
||||
self.sc_door_account = None
|
||||
self.federators = None
|
||||
return
|
||||
|
||||
if self.standalone:
|
||||
self.mainchain_config = ConfigFile(
|
||||
file_name=f'{self.configs_dir}/main.no_shards.dog/rippled.cfg')
|
||||
self.sidechain_config = ConfigFile(
|
||||
file_name=
|
||||
f'{self.configs_dir}/main.no_shards.dog.sidechain/rippled.cfg')
|
||||
self.sidechain_bootstrap_config = ConfigFile(
|
||||
file_name=
|
||||
f'{self.configs_dir}/main.no_shards.dog.sidechain/sidechain_bootstrap.cfg'
|
||||
)
|
||||
else:
|
||||
self.mainchain_config = ConfigFile(
|
||||
file_name=
|
||||
f'{self.configs_dir}/sidechain_testnet/main.no_shards.mainchain_0/rippled.cfg'
|
||||
)
|
||||
self.sidechain_config = ConfigFile(
|
||||
file_name=
|
||||
f'{self.configs_dir}/sidechain_testnet/sidechain_0/rippled.cfg'
|
||||
)
|
||||
self.sidechain_bootstrap_config = ConfigFile(
|
||||
file_name=
|
||||
f'{self.configs_dir}/sidechain_testnet/sidechain_0/sidechain_bootstrap.cfg'
|
||||
)
|
||||
|
||||
self.genesis_account = Account(
|
||||
account_id='rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh',
|
||||
secret_key='masterpassphrase',
|
||||
nickname='genesis')
|
||||
self.mc_door_account = Account(
|
||||
account_id=self.sidechain_config.sidechain.mainchain_account,
|
||||
secret_key=self.sidechain_bootstrap_config.sidechain.
|
||||
mainchain_secret,
|
||||
nickname='door')
|
||||
self.user_account = Account(
|
||||
account_id='rJynXY96Vuq6B58pST9K5Ak5KgJ2JcRsQy',
|
||||
secret_key='snVsJfrr2MbVpniNiUU6EDMGBbtzN',
|
||||
nickname='alice')
|
||||
|
||||
self.sc_door_account = Account(
|
||||
account_id='rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh',
|
||||
secret_key='masterpassphrase',
|
||||
nickname='door')
|
||||
self.federators = [
|
||||
l.split()[1].strip() for l in
|
||||
self.sidechain_bootstrap_config.sidechain_federators.get_lines()
|
||||
]
|
||||
|
||||
def check_error(self) -> str:
|
||||
'''
|
||||
Check for errors. Return `None` if no errors,
|
||||
otherwise return a string describing the error
|
||||
'''
|
||||
if not self.mainchain_exe:
|
||||
return 'Missing mainchain_exe location. Either set the env variable RIPPLED_MAINCHAIN_EXE or use the --exe_mainchain command line switch'
|
||||
if not self.sidechain_exe:
|
||||
return 'Missing sidechain_exe location. Either set the env variable RIPPLED_SIDECHAIN_EXE or use the --exe_sidechain command line switch'
|
||||
if not self.configs_dir:
|
||||
return 'Missing configs directory location. Either set the env variable RIPPLED_SIDECHAIN_CFG_DIR or use the --cfgs_dir command line switch'
|
||||
if self.verbose and self.quiet:
|
||||
return 'Cannot specify both verbose and quiet options at the same time'
|
||||
|
||||
|
||||
mainDoorKeeper = 0
|
||||
sideDoorKeeper = 1
|
||||
updateSignerList = 2
|
||||
|
||||
|
||||
def setup_mainchain(mc_app: App,
|
||||
params: Params,
|
||||
setup_user_accounts: bool = True):
|
||||
mc_app.add_to_keymanager(params.mc_door_account)
|
||||
if setup_user_accounts:
|
||||
mc_app.add_to_keymanager(params.user_account)
|
||||
|
||||
mc_app(LogLevel('fatal'))
|
||||
|
||||
# Allow rippling through the genesis account
|
||||
mc_app(AccountSet(account=params.genesis_account).set_default_ripple(True))
|
||||
mc_app.maybe_ledger_accept()
|
||||
|
||||
# Create and fund the mc door account
|
||||
mc_app(
|
||||
Payment(account=params.genesis_account,
|
||||
dst=params.mc_door_account,
|
||||
amt=XRP(10_000)))
|
||||
mc_app.maybe_ledger_accept()
|
||||
|
||||
# Create a trust line so USD/root account ious can be sent cross chain
|
||||
mc_app(
|
||||
Trust(account=params.mc_door_account,
|
||||
limit_amt=Asset(value=1_000_000,
|
||||
currency='USD',
|
||||
issuer=params.genesis_account)))
|
||||
|
||||
# set the chain's signer list and disable the master key
|
||||
divide = 4 * len(params.federators)
|
||||
by = 5
|
||||
quorum = (divide + by - 1) // by
|
||||
mc_app(
|
||||
SignerListSet(account=params.mc_door_account,
|
||||
quorum=quorum,
|
||||
keys=params.federators))
|
||||
mc_app.maybe_ledger_accept()
|
||||
r = mc_app(Ticket(account=params.mc_door_account, src_tag=mainDoorKeeper))
|
||||
mc_app.maybe_ledger_accept()
|
||||
mc_app(Ticket(account=params.mc_door_account, src_tag=sideDoorKeeper))
|
||||
mc_app.maybe_ledger_accept()
|
||||
mc_app(Ticket(account=params.mc_door_account, src_tag=updateSignerList))
|
||||
mc_app.maybe_ledger_accept()
|
||||
mc_app(AccountSet(account=params.mc_door_account).set_disable_master())
|
||||
mc_app.maybe_ledger_accept()
|
||||
|
||||
if setup_user_accounts:
|
||||
# Create and fund a regular user account
|
||||
mc_app(
|
||||
Payment(account=params.genesis_account,
|
||||
dst=params.user_account,
|
||||
amt=XRP(2_000)))
|
||||
mc_app.maybe_ledger_accept()
|
||||
|
||||
|
||||
def setup_sidechain(sc_app: App,
|
||||
params: Params,
|
||||
setup_user_accounts: bool = True):
|
||||
sc_app.add_to_keymanager(params.sc_door_account)
|
||||
if setup_user_accounts:
|
||||
sc_app.add_to_keymanager(params.user_account)
|
||||
|
||||
sc_app(LogLevel('fatal'))
|
||||
sc_app(LogLevel('trace', partition='SidechainFederator'))
|
||||
|
||||
# set the chain's signer list and disable the master key
|
||||
divide = 4 * len(params.federators)
|
||||
by = 5
|
||||
quorum = (divide + by - 1) // by
|
||||
sc_app(
|
||||
SignerListSet(account=params.genesis_account,
|
||||
quorum=quorum,
|
||||
keys=params.federators))
|
||||
sc_app.maybe_ledger_accept()
|
||||
sc_app(Ticket(account=params.genesis_account, src_tag=mainDoorKeeper))
|
||||
sc_app.maybe_ledger_accept()
|
||||
sc_app(Ticket(account=params.genesis_account, src_tag=sideDoorKeeper))
|
||||
sc_app.maybe_ledger_accept()
|
||||
sc_app(Ticket(account=params.genesis_account, src_tag=updateSignerList))
|
||||
sc_app.maybe_ledger_accept()
|
||||
sc_app(AccountSet(account=params.genesis_account).set_disable_master())
|
||||
sc_app.maybe_ledger_accept()
|
||||
|
||||
|
||||
def _xchain_transfer(from_chain: App, to_chain: App, src: Account,
|
||||
dst: Account, amt: Asset, from_chain_door: Account,
|
||||
to_chain_door: Account):
|
||||
memos = [{'Memo': {'MemoData': dst.account_id_str_as_hex()}}]
|
||||
from_chain(Payment(account=src, dst=from_chain_door, amt=amt, memos=memos))
|
||||
from_chain.maybe_ledger_accept()
|
||||
if to_chain.standalone:
|
||||
# from_chain (side chain) sends a txn, but won't close the to_chain (main chain) ledger
|
||||
time.sleep(1)
|
||||
to_chain.maybe_ledger_accept()
|
||||
|
||||
|
||||
def main_to_side_transfer(mc_app: App, sc_app: App, src: Account, dst: Account,
|
||||
amt: Asset, params: Params):
|
||||
_xchain_transfer(mc_app, sc_app, src, dst, amt, params.mc_door_account,
|
||||
params.sc_door_account)
|
||||
|
||||
|
||||
def side_to_main_transfer(mc_app: App, sc_app: App, src: Account, dst: Account,
|
||||
amt: Asset, params: Params):
|
||||
_xchain_transfer(sc_app, mc_app, src, dst, amt, params.sc_door_account,
|
||||
params.mc_door_account)
|
||||
|
||||
|
||||
def simple_test(mc_app: App, sc_app: App, params: Params):
|
||||
try:
|
||||
bob = sc_app.create_account('bob')
|
||||
main_to_side_transfer(mc_app, sc_app, params.user_account, bob,
|
||||
XRP(200), params)
|
||||
main_to_side_transfer(mc_app, sc_app, params.user_account, bob,
|
||||
XRP(60), params)
|
||||
|
||||
if params.with_pauses:
|
||||
_convert_log_files_to_json(
|
||||
mc_app.get_configs() + sc_app.get_configs(),
|
||||
'checkpoint1.json')
|
||||
input(
|
||||
"Pausing to check for main -> side txns (press enter to continue)"
|
||||
)
|
||||
|
||||
side_to_main_transfer(mc_app, sc_app, bob, params.user_account, XRP(9),
|
||||
params)
|
||||
side_to_main_transfer(mc_app, sc_app, bob, params.user_account,
|
||||
XRP(11), params)
|
||||
|
||||
if params.with_pauses:
|
||||
input(
|
||||
"Pausing to check for side -> main txns (press enter to continue)"
|
||||
)
|
||||
finally:
|
||||
_convert_log_files_to_json(mc_app.get_configs() + sc_app.get_configs(),
|
||||
'final.json')
|
||||
|
||||
|
||||
def _rm_debug_log(config: ConfigFile):
|
||||
try:
|
||||
debug_log = config.debug_logfile.get_line()
|
||||
if debug_log:
|
||||
print(f'removing debug file: {debug_log}', flush=True)
|
||||
os.remove(debug_log)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def _standalone_with_callback(params: Params,
|
||||
callback: Callable[[App, App], None],
|
||||
setup_user_accounts: bool = True):
|
||||
|
||||
if (params.debug_mainchain):
|
||||
input("Start mainchain server and press enter to continue: ")
|
||||
else:
|
||||
_rm_debug_log(params.mainchain_config)
|
||||
with single_client_app(config=params.mainchain_config,
|
||||
exe=params.mainchain_exe,
|
||||
standalone=True,
|
||||
run_server=not params.debug_mainchain) as mc_app:
|
||||
|
||||
mc_connect_subscription(mc_app, params.mc_door_account)
|
||||
setup_mainchain(mc_app, params, setup_user_accounts)
|
||||
|
||||
if (params.debug_sidechain):
|
||||
input("Start sidechain server and press enter to continue: ")
|
||||
else:
|
||||
_rm_debug_log(params.sidechain_config)
|
||||
with single_client_app(
|
||||
config=params.sidechain_config,
|
||||
exe=params.sidechain_exe,
|
||||
standalone=True,
|
||||
run_server=not params.debug_sidechain) as sc_app:
|
||||
|
||||
sc_connect_subscription(sc_app, params.sc_door_account)
|
||||
setup_sidechain(sc_app, params, setup_user_accounts)
|
||||
callback(mc_app, sc_app)
|
||||
|
||||
|
||||
def _convert_log_files_to_json(to_convert: List[ConfigFile], suffix: str):
|
||||
'''
|
||||
Convert the log file to json
|
||||
'''
|
||||
for c in to_convert:
|
||||
try:
|
||||
debug_log = c.debug_logfile.get_line()
|
||||
if not os.path.exists(debug_log):
|
||||
continue
|
||||
converted_log = f'{debug_log}.{suffix}'
|
||||
if os.path.exists(converted_log):
|
||||
os.remove(converted_log)
|
||||
print(f'Converting log {debug_log} to {converted_log}', flush=True)
|
||||
convert_log(debug_log, converted_log, pure_json=True)
|
||||
except:
|
||||
eprint(f'Exception converting log')
|
||||
|
||||
|
||||
def _multinode_with_callback(params: Params,
|
||||
callback: Callable[[App, App], None],
|
||||
setup_user_accounts: bool = True):
|
||||
|
||||
mainchain_cfg = ConfigFile(
|
||||
file_name=
|
||||
f'{params.configs_dir}/sidechain_testnet/main.no_shards.mainchain_0/rippled.cfg'
|
||||
)
|
||||
_rm_debug_log(mainchain_cfg)
|
||||
if params.debug_mainchain:
|
||||
input("Start mainchain server and press enter to continue: ")
|
||||
with single_client_app(config=mainchain_cfg,
|
||||
exe=params.mainchain_exe,
|
||||
standalone=True,
|
||||
run_server=not params.debug_mainchain) as mc_app:
|
||||
|
||||
if params.with_pauses:
|
||||
input("Pausing after mainchain start (press enter to continue)")
|
||||
|
||||
mc_connect_subscription(mc_app, params.mc_door_account)
|
||||
setup_mainchain(mc_app, params, setup_user_accounts)
|
||||
if params.with_pauses:
|
||||
input("Pausing after mainchain setup (press enter to continue)")
|
||||
|
||||
testnet_configs = configs_for_testnet(
|
||||
f'{params.configs_dir}/sidechain_testnet/sidechain_')
|
||||
for c in testnet_configs:
|
||||
_rm_debug_log(c)
|
||||
|
||||
run_server_list = [True] * len(testnet_configs)
|
||||
if params.debug_sidechain:
|
||||
run_server_list[0] = False
|
||||
input(
|
||||
f'Start testnet server {testnet_configs[0].get_file_name()} and press enter to continue: '
|
||||
)
|
||||
|
||||
with testnet_app(exe=params.sidechain_exe,
|
||||
configs=testnet_configs,
|
||||
run_server=run_server_list,
|
||||
sidechain_rr=params.sidechain_rr) as n_app:
|
||||
|
||||
if params.with_pauses:
|
||||
input("Pausing after testnet start (press enter to continue)")
|
||||
|
||||
sc_connect_subscription(n_app, params.sc_door_account)
|
||||
setup_sidechain(n_app, params, setup_user_accounts)
|
||||
if params.with_pauses:
|
||||
input(
|
||||
"Pausing after sidechain setup (press enter to continue)")
|
||||
callback(mc_app, n_app)
|
||||
|
||||
|
||||
def standalone_test(params: Params):
|
||||
def callback(mc_app: App, sc_app: App):
|
||||
simple_test(mc_app, sc_app, params)
|
||||
|
||||
_standalone_with_callback(params, callback)
|
||||
|
||||
|
||||
def multinode_test(params: Params):
|
||||
def callback(mc_app: App, sc_app: App):
|
||||
simple_test(mc_app, sc_app, params)
|
||||
|
||||
_multinode_with_callback(params, callback)
|
||||
|
||||
|
||||
# The mainchain runs in standalone mode. Most operations - like cross chain
|
||||
# paymens - will automatically close ledgers. However, some operations, like
|
||||
# refunds need an extra close. This loop automatically closes ledgers.
|
||||
def close_mainchain_ledgers(stop_token: Value, params: Params, sleep_time=4):
|
||||
with single_client_app(config=params.mainchain_config,
|
||||
exe=params.mainchain_exe,
|
||||
standalone=True,
|
||||
run_server=False) as mc_app:
|
||||
while stop_token.value != 0:
|
||||
mc_app.maybe_ledger_accept()
|
||||
time.sleep(sleep_time)
|
||||
|
||||
|
||||
def standalone_interactive_repl(params: Params):
|
||||
def callback(mc_app: App, sc_app: App):
|
||||
# process will run while stop token is non-zero
|
||||
stop_token = Value('i', 1)
|
||||
p = None
|
||||
if mc_app.standalone:
|
||||
p = Process(target=close_mainchain_ledgers,
|
||||
args=(stop_token, params))
|
||||
p.start()
|
||||
try:
|
||||
interactive.repl(mc_app, sc_app)
|
||||
finally:
|
||||
if p:
|
||||
stop_token.value = 0
|
||||
p.join()
|
||||
|
||||
_standalone_with_callback(params, callback, setup_user_accounts=False)
|
||||
|
||||
|
||||
def multinode_interactive_repl(params: Params):
|
||||
def callback(mc_app: App, sc_app: App):
|
||||
# process will run while stop token is non-zero
|
||||
stop_token = Value('i', 1)
|
||||
p = None
|
||||
if mc_app.standalone:
|
||||
p = Process(target=close_mainchain_ledgers,
|
||||
args=(stop_token, params))
|
||||
p.start()
|
||||
try:
|
||||
interactive.repl(mc_app, sc_app)
|
||||
finally:
|
||||
if p:
|
||||
stop_token.value = 0
|
||||
p.join()
|
||||
|
||||
_multinode_with_callback(params, callback, setup_user_accounts=False)
|
||||
|
||||
|
||||
def main():
|
||||
params = Params()
|
||||
interactive.set_hooks_dir(params.hooks_dir)
|
||||
|
||||
if err_str := params.check_error():
|
||||
eprint(err_str)
|
||||
sys.exit(1)
|
||||
|
||||
if params.quiet:
|
||||
print("Disabling eprint")
|
||||
disable_eprint()
|
||||
|
||||
if params.interactive:
|
||||
if params.standalone:
|
||||
standalone_interactive_repl(params)
|
||||
else:
|
||||
multinode_interactive_repl(params)
|
||||
elif params.standalone:
|
||||
standalone_test(params)
|
||||
else:
|
||||
multinode_test(params)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
176
bin/sidechain/python/test_utils.py
Normal file
176
bin/sidechain/python/test_utils.py
Normal file
@@ -0,0 +1,176 @@
|
||||
import asyncio
|
||||
import collections
|
||||
from contextlib import contextmanager
|
||||
import json
|
||||
import logging
|
||||
import pprint
|
||||
import time
|
||||
from typing import Callable, Dict, List, Optional
|
||||
|
||||
from app import App, balances_dataframe
|
||||
from common import Account, Asset, XRP, eprint
|
||||
from command import Subscribe
|
||||
|
||||
MC_SUBSCRIBE_QUEUE = []
|
||||
SC_SUBSCRIBE_QUEUE = []
|
||||
|
||||
|
||||
def _mc_subscribe_callback(v: dict):
|
||||
MC_SUBSCRIBE_QUEUE.append(v)
|
||||
logging.info(f'mc subscribe_callback:\n{json.dumps(v, indent=1)}')
|
||||
|
||||
|
||||
def _sc_subscribe_callback(v: dict):
|
||||
SC_SUBSCRIBE_QUEUE.append(v)
|
||||
logging.info(f'sc subscribe_callback:\n{json.dumps(v, indent=1)}')
|
||||
|
||||
|
||||
def mc_connect_subscription(app: App, door_account: Account):
|
||||
app(Subscribe(account_history_account=door_account),
|
||||
_mc_subscribe_callback)
|
||||
|
||||
|
||||
def sc_connect_subscription(app: App, door_account: Account):
|
||||
app(Subscribe(account_history_account=door_account),
|
||||
_sc_subscribe_callback)
|
||||
|
||||
|
||||
# This pops elements off the subscribe_queue until the transaction is found
|
||||
# It mofifies the queue in place.
|
||||
async def async_wait_for_payment_detect(app: App, subscribe_queue: List[dict],
|
||||
src: Account, dst: Account,
|
||||
amt_asset: Asset):
|
||||
logging.info(
|
||||
f'Wait for payment {src.account_id = } {dst.account_id = } {amt_asset = }'
|
||||
)
|
||||
n_txns = 10 # keep this many txn in a circular buffer.
|
||||
# If the payment is not detected, write them to the log.
|
||||
last_n_paytxns = collections.deque(maxlen=n_txns)
|
||||
for i in range(30):
|
||||
while subscribe_queue:
|
||||
d = subscribe_queue.pop(0)
|
||||
if 'transaction' not in d:
|
||||
continue
|
||||
txn = d['transaction']
|
||||
if txn['TransactionType'] != 'Payment':
|
||||
continue
|
||||
|
||||
txn_asset = Asset(from_rpc_result=txn['Amount'])
|
||||
if txn['Account'] == src.account_id and txn[
|
||||
'Destination'] == dst.account_id and txn_asset == amt_asset:
|
||||
if d['engine_result_code'] == 0:
|
||||
logging.info(
|
||||
f'Found payment {src.account_id = } {dst.account_id = } {amt_asset = }'
|
||||
)
|
||||
return
|
||||
else:
|
||||
logging.error(
|
||||
f'Expected payment failed {src.account_id = } {dst.account_id = } {amt_asset = }'
|
||||
)
|
||||
raise ValueError(
|
||||
f'Expected payment failed {src.account_id = } {dst.account_id = } {amt_asset = }'
|
||||
)
|
||||
else:
|
||||
last_n_paytxns.append(txn)
|
||||
if i > 0 and not (i % 5):
|
||||
logging.warning(
|
||||
f'Waiting for txn detect {src.account_id = } {dst.account_id = } {amt_asset = }'
|
||||
)
|
||||
# side chain can send transactions to the main chain, but won't close the ledger
|
||||
# We don't know when the transaction will be sent, so may need to close the ledger here
|
||||
await app.async_maybe_ledger_accept()
|
||||
await asyncio.sleep(2)
|
||||
logging.warning(
|
||||
f'Last {len(last_n_paytxns)} pay txns while waiting for payment detect'
|
||||
)
|
||||
for t in last_n_paytxns:
|
||||
logging.warning(
|
||||
f'Detected pay transaction while waiting for payment: {t}')
|
||||
logging.error(
|
||||
f'Expected txn detect {src.account_id = } {dst.account_id = } {amt_asset = }'
|
||||
)
|
||||
raise ValueError(
|
||||
f'Expected txn detect {src.account_id = } {dst.account_id = } {amt_asset = }'
|
||||
)
|
||||
|
||||
|
||||
def mc_wait_for_payment_detect(app: App, src: Account, dst: Account,
|
||||
amt_asset: Asset):
|
||||
logging.info(f'mainchain waiting for payment detect')
|
||||
return asyncio.get_event_loop().run_until_complete(
|
||||
async_wait_for_payment_detect(app, MC_SUBSCRIBE_QUEUE, src, dst,
|
||||
amt_asset))
|
||||
|
||||
|
||||
def sc_wait_for_payment_detect(app: App, src: Account, dst: Account,
|
||||
amt_asset: Asset):
|
||||
logging.info(f'sidechain waiting for payment detect')
|
||||
return asyncio.get_event_loop().run_until_complete(
|
||||
async_wait_for_payment_detect(app, SC_SUBSCRIBE_QUEUE, src, dst,
|
||||
amt_asset))
|
||||
|
||||
|
||||
def wait_for_balance_change(app: App,
|
||||
acc: Account,
|
||||
pre_balance: Asset,
|
||||
expected_diff: Optional[Asset] = None):
|
||||
logging.info(
|
||||
f'waiting for balance change {acc.account_id = } {pre_balance = } {expected_diff = }'
|
||||
)
|
||||
for i in range(30):
|
||||
new_bal = app.get_balance(acc, pre_balance(0))
|
||||
diff = new_bal - pre_balance
|
||||
if new_bal != pre_balance:
|
||||
logging.info(
|
||||
f'Balance changed {acc.account_id = } {pre_balance = } {new_bal = } {diff = } {expected_diff = }'
|
||||
)
|
||||
if expected_diff is None or diff == expected_diff:
|
||||
return
|
||||
app.maybe_ledger_accept()
|
||||
time.sleep(2)
|
||||
if i > 0 and not (i % 5):
|
||||
logging.warning(
|
||||
f'Waiting for balance to change {acc.account_id = } {pre_balance = }'
|
||||
)
|
||||
logging.error(
|
||||
f'Expected balance to change {acc.account_id = } {pre_balance = } {new_bal = } {diff = } {expected_diff = }'
|
||||
)
|
||||
raise ValueError(
|
||||
f'Expected balance to change {acc.account_id = } {pre_balance = } {new_bal = } {diff = } {expected_diff = }'
|
||||
)
|
||||
|
||||
|
||||
def log_chain_state(mc_app, sc_app, log, msg='Chain State'):
|
||||
chains = [mc_app, sc_app]
|
||||
chain_names = ['mainchain', 'sidechain']
|
||||
balances = balances_dataframe(chains, chain_names)
|
||||
df_as_str = balances.to_string(float_format=lambda x: f'{x:,.6f}')
|
||||
log(f'{msg} Balances: \n{df_as_str}')
|
||||
federator_info = sc_app.federator_info()
|
||||
log(f'{msg} Federator Info: \n{pprint.pformat(federator_info)}')
|
||||
|
||||
|
||||
# Tests can set this to True to help debug test failures by showing account
|
||||
# balances in the log before the test runs
|
||||
test_context_verbose_logging = False
|
||||
|
||||
|
||||
@contextmanager
|
||||
def test_context(mc_app, sc_app, verbose_logging: Optional[bool] = None):
|
||||
'''Write extra context info to the log on test failure'''
|
||||
global test_context_verbose_logging
|
||||
if verbose_logging is None:
|
||||
verbose_logging = test_context_verbose_logging
|
||||
try:
|
||||
if verbose_logging:
|
||||
log_chain_state(mc_app, sc_app, logging.info)
|
||||
start_time = time.monotonic()
|
||||
yield
|
||||
except:
|
||||
log_chain_state(mc_app, sc_app, logging.error)
|
||||
raise
|
||||
finally:
|
||||
elapased_time = time.monotonic() - start_time
|
||||
logging.info(f'Test elapsed time: {elapased_time}')
|
||||
if verbose_logging:
|
||||
log_chain_state(mc_app, sc_app, logging.info)
|
||||
216
bin/sidechain/python/testnet.py
Normal file
216
bin/sidechain/python/testnet.py
Normal file
@@ -0,0 +1,216 @@
|
||||
'''
|
||||
Bring up a rippled testnetwork from a set of config files with fixed ips.
|
||||
'''
|
||||
|
||||
from contextlib import contextmanager
|
||||
import glob
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from typing import Callable, List, Optional, Set, Union
|
||||
|
||||
from command import ServerInfo
|
||||
from config_file import ConfigFile
|
||||
from ripple_client import RippleClient
|
||||
|
||||
|
||||
class Network:
|
||||
# If run_server is None, run all the servers.
|
||||
# This is useful to help debugging
|
||||
def __init__(
|
||||
self,
|
||||
exe: str,
|
||||
configs: List[ConfigFile],
|
||||
*,
|
||||
command_logs: Optional[List[str]] = None,
|
||||
run_server: Optional[List[bool]] = None,
|
||||
# undocumented feature. If with_rr is not None, assume it points to the rr debugger executable
|
||||
# and run server 0 under rr
|
||||
with_rr: Optional[str] = None,
|
||||
extra_args: Optional[List[List[str]]] = None):
|
||||
|
||||
self.with_rr = with_rr
|
||||
if not configs:
|
||||
raise ValueError(f'Must specify at least one config')
|
||||
|
||||
if run_server and len(run_server) != len(configs):
|
||||
raise ValueError(
|
||||
f'run_server length must match number of configs (or be None): {len(configs) = } {len(run_server) = }'
|
||||
)
|
||||
|
||||
self.configs = configs
|
||||
self.clients = []
|
||||
self.running_server_indexes = set()
|
||||
self.processes = {}
|
||||
|
||||
if not run_server:
|
||||
run_server = []
|
||||
run_server += [True] * (len(configs) - len(run_server))
|
||||
|
||||
self.run_server = run_server
|
||||
|
||||
if not command_logs:
|
||||
command_logs = []
|
||||
command_logs += [None] * (len(configs) - len(command_logs))
|
||||
|
||||
self.command_logs = command_logs
|
||||
|
||||
# remove the old database directories.
|
||||
# we want tests to start from the same empty state every time
|
||||
for config in self.configs:
|
||||
db_path = config.database_path.get_line()
|
||||
if db_path and os.path.isdir(db_path):
|
||||
files = glob.glob(f'{db_path}/**', recursive=True)
|
||||
for f in files:
|
||||
if os.path.isdir(f):
|
||||
continue
|
||||
os.unlink(f)
|
||||
|
||||
for config, log in zip(self.configs, self.command_logs):
|
||||
client = RippleClient(config=config, command_log=log, exe=exe)
|
||||
self.clients.append(client)
|
||||
|
||||
self.servers_start(extra_args=extra_args)
|
||||
|
||||
def shutdown(self):
|
||||
for a in self.clients:
|
||||
a.shutdown()
|
||||
|
||||
self.servers_stop()
|
||||
|
||||
def num_clients(self) -> int:
|
||||
return len(self.clients)
|
||||
|
||||
def get_client(self, i: int) -> RippleClient:
|
||||
return self.clients[i]
|
||||
|
||||
def get_configs(self) -> List[ConfigFile]:
|
||||
return [c.config for c in self.clients]
|
||||
|
||||
def get_pids(self) -> List[int]:
|
||||
return [c.get_pid() for c in self.clients if c.get_pid() is not None]
|
||||
|
||||
# Get a dict of the server_state, validated_ledger_seq, and complete_ledgers
|
||||
def get_brief_server_info(self) -> dict:
|
||||
ret = {'server_state': [], 'ledger_seq': [], 'complete_ledgers': []}
|
||||
for c in self.clients:
|
||||
r = c.get_brief_server_info()
|
||||
for (k, v) in r.items():
|
||||
ret[k].append(v)
|
||||
return ret
|
||||
|
||||
# returns true if the server is running, false if not. Note, this relies on
|
||||
# servers being shut down through the `servers_stop` interface. If a server
|
||||
# crashes, or is started or stopped through other means, an incorrect status
|
||||
# may be reported.
|
||||
def get_running_status(self) -> List[bool]:
|
||||
return [
|
||||
i in self.running_server_indexes for i in range(len(self.clients))
|
||||
]
|
||||
|
||||
def is_running(self, index: int) -> bool:
|
||||
return index in self.running_server_indexes
|
||||
|
||||
def wait_for_validated_ledger(self, server_index: Optional[int] = None):
|
||||
'''
|
||||
Don't return until the network has at least one validated ledger
|
||||
'''
|
||||
|
||||
if server_index is None:
|
||||
for i in range(len(self.configs)):
|
||||
self.wait_for_validated_ledger(i)
|
||||
return
|
||||
|
||||
client = self.clients[server_index]
|
||||
for i in range(600):
|
||||
r = client.send_command(ServerInfo())
|
||||
state = None
|
||||
if 'info' in r:
|
||||
state = r['info']['server_state']
|
||||
if state == 'proposing':
|
||||
print(f'Synced: {server_index} : {state}', flush=True)
|
||||
break
|
||||
if not i % 10:
|
||||
print(f'Waiting for sync: {server_index} : {state}',
|
||||
flush=True)
|
||||
time.sleep(1)
|
||||
|
||||
for i in range(600):
|
||||
r = client.send_command(ServerInfo())
|
||||
state = None
|
||||
if 'info' in r:
|
||||
complete_ledgers = r['info']['complete_ledgers']
|
||||
if complete_ledgers and complete_ledgers != 'empty':
|
||||
print(f'Have complete ledgers: {server_index} : {state}',
|
||||
flush=True)
|
||||
return
|
||||
if not i % 10:
|
||||
print(
|
||||
f'Waiting for complete_ledgers: {server_index} : {complete_ledgers}',
|
||||
flush=True)
|
||||
time.sleep(1)
|
||||
|
||||
raise ValueError('Could not sync server {client.config_file_name}')
|
||||
|
||||
def servers_start(self,
|
||||
server_indexes: Optional[Union[Set[int],
|
||||
List[int]]] = None,
|
||||
*,
|
||||
extra_args: Optional[List[List[str]]] = None):
|
||||
if server_indexes is None:
|
||||
server_indexes = [i for i in range(len(self.clients))]
|
||||
|
||||
if extra_args is None:
|
||||
extra_args = []
|
||||
extra_args += [list()] * (len(self.configs) - len(extra_args))
|
||||
|
||||
for i in server_indexes:
|
||||
if i in self.running_server_indexes or not self.run_server[i]:
|
||||
continue
|
||||
|
||||
client = self.clients[i]
|
||||
to_run = [client.exe, '--conf', client.config_file_name]
|
||||
if self.with_rr and i == 0:
|
||||
to_run = [self.with_rr, 'record'] + to_run
|
||||
print(f'Starting server with rr {client.config_file_name}')
|
||||
else:
|
||||
print(f'Starting server {client.config_file_name}')
|
||||
fout = open(os.devnull, 'w')
|
||||
p = subprocess.Popen(to_run + extra_args[i],
|
||||
stdout=fout,
|
||||
stderr=subprocess.STDOUT)
|
||||
client.set_pid(p.pid)
|
||||
print(
|
||||
f'started rippled: config: {client.config_file_name} PID: {p.pid}',
|
||||
flush=True)
|
||||
self.running_server_indexes.add(i)
|
||||
self.processes[i] = p
|
||||
|
||||
time.sleep(2) # give servers time to start
|
||||
|
||||
def servers_stop(self,
|
||||
server_indexes: Optional[Union[Set[int],
|
||||
List[int]]] = None):
|
||||
if server_indexes is None:
|
||||
server_indexes = self.running_server_indexes.copy()
|
||||
|
||||
if 0 in server_indexes:
|
||||
print(
|
||||
f'WARNING: Server 0 is being stopped. RPC commands cannot be sent until this is restarted.'
|
||||
)
|
||||
|
||||
for i in server_indexes:
|
||||
if i not in self.running_server_indexes:
|
||||
continue
|
||||
client = self.clients[i]
|
||||
to_run = [client.exe, '--conf', client.config_file_name]
|
||||
fout = open(os.devnull, 'w')
|
||||
subprocess.Popen(to_run + ['stop'],
|
||||
stdout=fout,
|
||||
stderr=subprocess.STDOUT)
|
||||
self.running_server_indexes.discard(i)
|
||||
|
||||
for i in server_indexes:
|
||||
self.processes[i].wait()
|
||||
del self.processes[i]
|
||||
self.get_client(i).set_pid(-1)
|
||||
64
bin/sidechain/python/tests/conftest.py
Normal file
64
bin/sidechain/python/tests/conftest.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# Add parent directory to module path
|
||||
import os, sys
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from common import Account, Asset, XRP
|
||||
import create_config_files
|
||||
import sidechain
|
||||
|
||||
import pytest
|
||||
'''
|
||||
Sidechains uses argparse.ArgumentParser to add command line options.
|
||||
The function call to add an argument is `add_argument`. pytest uses `addoption`.
|
||||
This wrapper class changes calls from `add_argument` to calls to `addoption`.
|
||||
To avoid conflicts between pytest and sidechains, all sidechain arguments have
|
||||
the suffix `_sc` appended to them. I.e. `--verbose` is for pytest, `--verbose_sc`
|
||||
is for sidechains.
|
||||
'''
|
||||
|
||||
|
||||
class ArgumentParserWrapper:
|
||||
def __init__(self, wrapped):
|
||||
self.wrapped = wrapped
|
||||
|
||||
def add_argument(self, *args, **kwargs):
|
||||
for a in args:
|
||||
if not a.startswith('--'):
|
||||
continue
|
||||
a = a + '_sc'
|
||||
self.wrapped.addoption(a, **kwargs)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
wrapped = ArgumentParserWrapper(parser)
|
||||
sidechain.parse_args_helper(wrapped)
|
||||
|
||||
|
||||
def _xchain_assets(ratio: int = 1):
|
||||
assets = {}
|
||||
assets['xrp_xrp_sidechain_asset'] = create_config_files.XChainAsset(
|
||||
XRP(0), XRP(0), 1, 1 * ratio, 200, 200 * ratio)
|
||||
root_account = Account(account_id="rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh")
|
||||
main_iou_asset = Asset(value=0, currency='USD', issuer=root_account)
|
||||
side_iou_asset = Asset(value=0, currency='USD', issuer=root_account)
|
||||
assets['iou_iou_sidechain_asset'] = create_config_files.XChainAsset(
|
||||
main_iou_asset, side_iou_asset, 1, 1 * ratio, 0.02, 0.02 * ratio)
|
||||
return assets
|
||||
|
||||
|
||||
# Diction of config dirs. Key is ratio
|
||||
_config_dirs = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configs_dirs_dict(tmp_path):
|
||||
global _config_dirs
|
||||
if not _config_dirs:
|
||||
params = create_config_files.Params()
|
||||
_config_dirs = {}
|
||||
for ratio in (1, 2):
|
||||
params.configs_dir = str(tmp_path / f'test_config_files_{ratio}')
|
||||
create_config_files.main(params, _xchain_assets(ratio))
|
||||
_config_dirs[ratio] = params.configs_dir
|
||||
|
||||
return _config_dirs
|
||||
117
bin/sidechain/python/tests/door_test.py
Normal file
117
bin/sidechain/python/tests/door_test.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from typing import Dict
|
||||
from app import App
|
||||
from common import XRP
|
||||
from sidechain import Params
|
||||
import sidechain
|
||||
import test_utils
|
||||
import time
|
||||
from transaction import Payment
|
||||
import tst_common
|
||||
|
||||
batch_test_num_accounts = 200
|
||||
|
||||
|
||||
def door_test(mc_app: App, sc_app: App, params: Params):
|
||||
# setup, create accounts on both chains
|
||||
for i in range(batch_test_num_accounts):
|
||||
name = "m_" + str(i)
|
||||
account_main = mc_app.create_account(name)
|
||||
name = "s_" + str(i)
|
||||
account_side = sc_app.create_account(name)
|
||||
mc_app(
|
||||
Payment(account=params.genesis_account,
|
||||
dst=account_main,
|
||||
amt=XRP(20_000)))
|
||||
mc_app.maybe_ledger_accept()
|
||||
account_main_last = mc_app.account_from_alias("m_" +
|
||||
str(batch_test_num_accounts -
|
||||
1))
|
||||
test_utils.wait_for_balance_change(mc_app, account_main_last, XRP(0),
|
||||
XRP(20_000))
|
||||
|
||||
# test
|
||||
to_side_xrp = XRP(1000)
|
||||
to_main_xrp = XRP(100)
|
||||
last_tx_xrp = XRP(343)
|
||||
with test_utils.test_context(mc_app, sc_app, True):
|
||||
# send xchain payment to open accounts on sidechain
|
||||
for i in range(batch_test_num_accounts):
|
||||
name_main = "m_" + str(i)
|
||||
account_main = mc_app.account_from_alias(name_main)
|
||||
name_side = "s_" + str(i)
|
||||
account_side = sc_app.account_from_alias(name_side)
|
||||
memos = [{
|
||||
'Memo': {
|
||||
'MemoData': account_side.account_id_str_as_hex()
|
||||
}
|
||||
}]
|
||||
mc_app(
|
||||
Payment(account=account_main,
|
||||
dst=params.mc_door_account,
|
||||
amt=to_side_xrp,
|
||||
memos=memos))
|
||||
|
||||
while 1:
|
||||
federator_info = sc_app.federator_info()
|
||||
should_loop = False
|
||||
for v in federator_info.values():
|
||||
for c in ['mainchain', 'sidechain']:
|
||||
state = v['info'][c]['listener_info']['state']
|
||||
if state != 'normal':
|
||||
should_loop = True
|
||||
if not should_loop:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
# wait some time for the door to change
|
||||
door_closing = False
|
||||
door_reopened = False
|
||||
for i in range(batch_test_num_accounts * 2 + 40):
|
||||
server_index = [0]
|
||||
federator_info = sc_app.federator_info(server_index)
|
||||
for v in federator_info.values():
|
||||
door_status = v['info']['mainchain']['door_status']['status']
|
||||
if not door_closing:
|
||||
if door_status != 'open':
|
||||
door_closing = True
|
||||
else:
|
||||
if door_status == 'open':
|
||||
door_reopened = True
|
||||
|
||||
if not door_reopened:
|
||||
time.sleep(1)
|
||||
mc_app.maybe_ledger_accept()
|
||||
else:
|
||||
break
|
||||
|
||||
if not door_reopened:
|
||||
raise ValueError('Expected door status changes did not happen')
|
||||
|
||||
# wait for accounts created on sidechain
|
||||
for i in range(batch_test_num_accounts):
|
||||
name_side = "s_" + str(i)
|
||||
account_side = sc_app.account_from_alias(name_side)
|
||||
test_utils.wait_for_balance_change(sc_app, account_side, XRP(0),
|
||||
to_side_xrp)
|
||||
|
||||
# # try one xchain payment, each direction
|
||||
name_main = "m_" + str(0)
|
||||
account_main = mc_app.account_from_alias(name_main)
|
||||
name_side = "s_" + str(0)
|
||||
account_side = sc_app.account_from_alias(name_side)
|
||||
|
||||
pre_bal = mc_app.get_balance(account_main, XRP(0))
|
||||
sidechain.side_to_main_transfer(mc_app, sc_app, account_side,
|
||||
account_main, to_main_xrp, params)
|
||||
test_utils.wait_for_balance_change(mc_app, account_main, pre_bal,
|
||||
to_main_xrp)
|
||||
|
||||
pre_bal = sc_app.get_balance(account_side, XRP(0))
|
||||
sidechain.main_to_side_transfer(mc_app, sc_app, account_main,
|
||||
account_side, last_tx_xrp, params)
|
||||
test_utils.wait_for_balance_change(sc_app, account_side, pre_bal,
|
||||
last_tx_xrp)
|
||||
|
||||
|
||||
def test_door_operations(configs_dirs_dict: Dict[int, str]):
|
||||
tst_common.test_start(configs_dirs_dict, door_test)
|
||||
151
bin/sidechain/python/tests/simple_xchain_transfer_test.py
Normal file
151
bin/sidechain/python/tests/simple_xchain_transfer_test.py
Normal file
@@ -0,0 +1,151 @@
|
||||
import logging
|
||||
import pprint
|
||||
import pytest
|
||||
from multiprocessing import Process, Value
|
||||
from typing import Dict
|
||||
import sys
|
||||
|
||||
from app import App
|
||||
from common import Asset, eprint, disable_eprint, drops, XRP
|
||||
import interactive
|
||||
from sidechain import Params
|
||||
import sidechain
|
||||
import test_utils
|
||||
import time
|
||||
from transaction import Payment, Trust
|
||||
import tst_common
|
||||
|
||||
|
||||
def simple_xrp_test(mc_app: App, sc_app: App, params: Params):
|
||||
alice = mc_app.account_from_alias('alice')
|
||||
adam = sc_app.account_from_alias('adam')
|
||||
mc_door = mc_app.account_from_alias('door')
|
||||
sc_door = sc_app.account_from_alias('door')
|
||||
|
||||
# main to side
|
||||
# First txn funds the side chain account
|
||||
with test_utils.test_context(mc_app, sc_app):
|
||||
to_send_asset = XRP(9999)
|
||||
mc_pre_bal = mc_app.get_balance(mc_door, to_send_asset)
|
||||
sc_pre_bal = sc_app.get_balance(adam, to_send_asset)
|
||||
sidechain.main_to_side_transfer(mc_app, sc_app, alice, adam,
|
||||
to_send_asset, params)
|
||||
test_utils.wait_for_balance_change(mc_app, mc_door, mc_pre_bal,
|
||||
to_send_asset)
|
||||
test_utils.wait_for_balance_change(sc_app, adam, sc_pre_bal,
|
||||
to_send_asset)
|
||||
|
||||
for i in range(2):
|
||||
# even amounts for main to side
|
||||
for value in range(20, 30, 2):
|
||||
with test_utils.test_context(mc_app, sc_app):
|
||||
to_send_asset = drops(value)
|
||||
mc_pre_bal = mc_app.get_balance(mc_door, to_send_asset)
|
||||
sc_pre_bal = sc_app.get_balance(adam, to_send_asset)
|
||||
sidechain.main_to_side_transfer(mc_app, sc_app, alice, adam,
|
||||
to_send_asset, params)
|
||||
test_utils.wait_for_balance_change(mc_app, mc_door, mc_pre_bal,
|
||||
to_send_asset)
|
||||
test_utils.wait_for_balance_change(sc_app, adam, sc_pre_bal,
|
||||
to_send_asset)
|
||||
|
||||
# side to main
|
||||
# odd amounts for side to main
|
||||
for value in range(19, 29, 2):
|
||||
with test_utils.test_context(mc_app, sc_app):
|
||||
to_send_asset = drops(value)
|
||||
pre_bal = mc_app.get_balance(alice, to_send_asset)
|
||||
sidechain.side_to_main_transfer(mc_app, sc_app, adam, alice,
|
||||
to_send_asset, params)
|
||||
test_utils.wait_for_balance_change(mc_app, alice, pre_bal,
|
||||
to_send_asset)
|
||||
|
||||
|
||||
def simple_iou_test(mc_app: App, sc_app: App, params: Params):
|
||||
alice = mc_app.account_from_alias('alice')
|
||||
adam = sc_app.account_from_alias('adam')
|
||||
|
||||
mc_asset = Asset(value=0,
|
||||
currency='USD',
|
||||
issuer=mc_app.account_from_alias('root'))
|
||||
sc_asset = Asset(value=0,
|
||||
currency='USD',
|
||||
issuer=sc_app.account_from_alias('door'))
|
||||
mc_app.add_asset_alias(mc_asset, 'mcd') # main chain dollar
|
||||
sc_app.add_asset_alias(sc_asset, 'scd') # side chain dollar
|
||||
mc_app(Trust(account=alice, limit_amt=mc_asset(1_000_000)))
|
||||
|
||||
## make sure adam account on the side chain exists and set the trust line
|
||||
with test_utils.test_context(mc_app, sc_app):
|
||||
sidechain.main_to_side_transfer(mc_app, sc_app, alice, adam, XRP(300),
|
||||
params)
|
||||
|
||||
# create a trust line to alice and pay her USD/root
|
||||
mc_app(Trust(account=alice, limit_amt=mc_asset(1_000_000)))
|
||||
mc_app.maybe_ledger_accept()
|
||||
mc_app(
|
||||
Payment(account=mc_app.account_from_alias('root'),
|
||||
dst=alice,
|
||||
amt=mc_asset(10_000)))
|
||||
mc_app.maybe_ledger_accept()
|
||||
|
||||
# create a trust line for adam
|
||||
sc_app(Trust(account=adam, limit_amt=sc_asset(1_000_000)))
|
||||
|
||||
for i in range(2):
|
||||
# even amounts for main to side
|
||||
for value in range(10, 20, 2):
|
||||
with test_utils.test_context(mc_app, sc_app):
|
||||
to_send_asset = mc_asset(value)
|
||||
rcv_asset = sc_asset(value)
|
||||
pre_bal = sc_app.get_balance(adam, rcv_asset)
|
||||
sidechain.main_to_side_transfer(mc_app, sc_app, alice, adam,
|
||||
to_send_asset, params)
|
||||
test_utils.wait_for_balance_change(sc_app, adam, pre_bal,
|
||||
rcv_asset)
|
||||
|
||||
# side to main
|
||||
# odd amounts for side to main
|
||||
for value in range(9, 19, 2):
|
||||
with test_utils.test_context(mc_app, sc_app):
|
||||
to_send_asset = sc_asset(value)
|
||||
rcv_asset = mc_asset(value)
|
||||
pre_bal = mc_app.get_balance(alice, to_send_asset)
|
||||
sidechain.side_to_main_transfer(mc_app, sc_app, adam, alice,
|
||||
to_send_asset, params)
|
||||
test_utils.wait_for_balance_change(mc_app, alice, pre_bal,
|
||||
rcv_asset)
|
||||
|
||||
|
||||
def setup_accounts(mc_app: App, sc_app: App, params: Params):
|
||||
# Setup a funded user account on the main chain, and add an unfunded account.
|
||||
# Setup address book and add a funded account on the mainchain.
|
||||
# Typical female names are addresses on the mainchain.
|
||||
# The first account is funded.
|
||||
alice = mc_app.create_account('alice')
|
||||
beth = mc_app.create_account('beth')
|
||||
carol = mc_app.create_account('carol')
|
||||
deb = mc_app.create_account('deb')
|
||||
ella = mc_app.create_account('ella')
|
||||
mc_app(Payment(account=params.genesis_account, dst=alice, amt=XRP(20_000)))
|
||||
mc_app.maybe_ledger_accept()
|
||||
|
||||
# Typical male names are addresses on the sidechain.
|
||||
# All accounts are initially unfunded
|
||||
adam = sc_app.create_account('adam')
|
||||
bob = sc_app.create_account('bob')
|
||||
charlie = sc_app.create_account('charlie')
|
||||
dan = sc_app.create_account('dan')
|
||||
ed = sc_app.create_account('ed')
|
||||
|
||||
|
||||
def run_all(mc_app: App, sc_app: App, params: Params):
|
||||
setup_accounts(mc_app, sc_app, params)
|
||||
logging.info(f'mainchain:\n{mc_app.key_manager.to_string()}')
|
||||
logging.info(f'sidechain:\n{sc_app.key_manager.to_string()}')
|
||||
simple_xrp_test(mc_app, sc_app, params)
|
||||
simple_iou_test(mc_app, sc_app, params)
|
||||
|
||||
|
||||
def test_simple_xchain(configs_dirs_dict: Dict[int, str]):
|
||||
tst_common.test_start(configs_dirs_dict, run_all)
|
||||
74
bin/sidechain/python/tests/tst_common.py
Normal file
74
bin/sidechain/python/tests/tst_common.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import logging
|
||||
import pprint
|
||||
import pytest
|
||||
from multiprocessing import Process, Value
|
||||
from typing import Callable, Dict
|
||||
import sys
|
||||
|
||||
from app import App
|
||||
from common import eprint, disable_eprint, XRP
|
||||
from sidechain import Params
|
||||
import sidechain
|
||||
import test_utils
|
||||
import time
|
||||
|
||||
|
||||
def run(mc_app: App, sc_app: App, params: Params,
|
||||
test_case: Callable[[App, App, Params], None]):
|
||||
# process will run while stop token is non-zero
|
||||
stop_token = Value('i', 1)
|
||||
p = None
|
||||
if mc_app.standalone:
|
||||
p = Process(target=sidechain.close_mainchain_ledgers,
|
||||
args=(stop_token, params))
|
||||
p.start()
|
||||
try:
|
||||
test_case(mc_app, sc_app, params)
|
||||
finally:
|
||||
if p:
|
||||
stop_token.value = 0
|
||||
p.join()
|
||||
sidechain._convert_log_files_to_json(
|
||||
mc_app.get_configs() + sc_app.get_configs(), 'final.json')
|
||||
|
||||
|
||||
def standalone_test(params: Params, test_case: Callable[[App, App, Params],
|
||||
None]):
|
||||
def callback(mc_app: App, sc_app: App):
|
||||
run(mc_app, sc_app, params, test_case)
|
||||
|
||||
sidechain._standalone_with_callback(params,
|
||||
callback,
|
||||
setup_user_accounts=False)
|
||||
|
||||
|
||||
def multinode_test(params: Params, test_case: Callable[[App, App, Params],
|
||||
None]):
|
||||
def callback(mc_app: App, sc_app: App):
|
||||
run(mc_app, sc_app, params, test_case)
|
||||
|
||||
sidechain._multinode_with_callback(params,
|
||||
callback,
|
||||
setup_user_accounts=False)
|
||||
|
||||
|
||||
def test_start(configs_dirs_dict: Dict[int, str],
|
||||
test_case: Callable[[App, App, Params], None]):
|
||||
params = sidechain.Params(configs_dir=configs_dirs_dict[1])
|
||||
|
||||
if err_str := params.check_error():
|
||||
eprint(err_str)
|
||||
sys.exit(1)
|
||||
|
||||
if params.verbose:
|
||||
print("eprint enabled")
|
||||
else:
|
||||
disable_eprint()
|
||||
|
||||
# Set to true to help debug tests
|
||||
test_utils.test_context_verbose_logging = True
|
||||
|
||||
if params.standalone:
|
||||
standalone_test(params, test_case)
|
||||
else:
|
||||
multinode_test(params, test_case)
|
||||
366
bin/sidechain/python/transaction.py
Normal file
366
bin/sidechain/python/transaction.py
Normal file
@@ -0,0 +1,366 @@
|
||||
import datetime
|
||||
import json
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from command import Command
|
||||
from common import Account, Asset, Path, PathList, to_rippled_epoch
|
||||
|
||||
|
||||
class Transaction(Command):
|
||||
'''Interface for all transactions'''
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
account: Account,
|
||||
flags: Optional[int] = None,
|
||||
fee: Optional[Union[Asset, int]] = None,
|
||||
sequence: Optional[int] = None,
|
||||
account_txn_id: Optional[str] = None,
|
||||
last_ledger_sequence: Optional[int] = None,
|
||||
src_tag: Optional[int] = None,
|
||||
memos: Optional[List[Dict[str, dict]]] = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.account = account
|
||||
# set even if None
|
||||
self.flags = flags
|
||||
self.fee = fee
|
||||
self.sequence = sequence
|
||||
self.account_txn_id = account_txn_id
|
||||
self.last_ledger_sequence = last_ledger_sequence
|
||||
self.src_tag = src_tag
|
||||
self.memos = memos
|
||||
|
||||
def cmd_name(self) -> str:
|
||||
return 'submit'
|
||||
|
||||
def set_seq_and_fee(self, seq: int, fee: Union[Asset, int]):
|
||||
self.sequence = seq
|
||||
self.fee = fee
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
txn = {
|
||||
'Account': self.account.account_id,
|
||||
}
|
||||
if self.flags is not None:
|
||||
txn['Flags'] = flags
|
||||
if self.fee is not None:
|
||||
if isinstance(self.fee, int):
|
||||
txn['Fee'] = f'{self.fee}' # must be a string
|
||||
else:
|
||||
txn['Fee'] = self.fee.to_cmd_obj()
|
||||
if self.sequence is not None:
|
||||
txn['Sequence'] = self.sequence
|
||||
if self.account_txn_id is not None:
|
||||
txn['AccountTxnID'] = self.account_txn_id
|
||||
if self.last_ledger_sequence is not None:
|
||||
txn['LastLedgerSequence'] = self.last_ledger_sequence
|
||||
if self.src_tag is not None:
|
||||
txn['SourceTag'] = self.src_tag
|
||||
if self.memos is not None:
|
||||
txn['Memos'] = self.memos
|
||||
return txn
|
||||
|
||||
|
||||
class Payment(Transaction):
|
||||
'''A payment transaction'''
|
||||
def __init__(self,
|
||||
*,
|
||||
dst: Account,
|
||||
amt: Asset,
|
||||
send_max: Optional[Asset] = None,
|
||||
paths: Optional[PathList] = None,
|
||||
dst_tag: Optional[int] = None,
|
||||
deliver_min: Optional[Asset] = None,
|
||||
**rest):
|
||||
super().__init__(**rest)
|
||||
self.dst = dst
|
||||
self.amt = amt
|
||||
self.send_max = send_max
|
||||
if paths is not None and isinstance(paths, Path):
|
||||
# allow paths = Path([...]) special case
|
||||
self.paths = PathList([paths])
|
||||
else:
|
||||
self.paths = paths
|
||||
self.dst_tag = dst_tag
|
||||
self.deliver_min = deliver_min
|
||||
|
||||
def set_partial_payment(self, value: bool = True):
|
||||
'''Set or clear the partial payment flag'''
|
||||
self._set_flag(0x0002_0000, value)
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''convert to transaction form (suitable for using json.dumps or similar)'''
|
||||
txn = super().to_cmd_obj()
|
||||
txn = {
|
||||
**txn,
|
||||
'TransactionType': 'Payment',
|
||||
'Destination': self.dst.account_id,
|
||||
'Amount': self.amt.to_cmd_obj(),
|
||||
}
|
||||
if self.paths is not None:
|
||||
txn['Paths'] = self.paths.to_cmd_obj()
|
||||
if self.send_max is not None:
|
||||
txn['SendMax'] = self.send_max.to_cmd_obj()
|
||||
if self.dst_tag is not None:
|
||||
txn['DestinationTag'] = self.dst_tag
|
||||
if self.deliver_min is not None:
|
||||
txn['DeliverMin'] = self.deliver_min
|
||||
return txn
|
||||
|
||||
|
||||
class Trust(Transaction):
|
||||
'''A trust set transaction'''
|
||||
def __init__(self,
|
||||
*,
|
||||
limit_amt: Optional[Asset] = None,
|
||||
qin: Optional[int] = None,
|
||||
qout: Optional[int] = None,
|
||||
**rest):
|
||||
super().__init__(**rest)
|
||||
self.limit_amt = limit_amt
|
||||
self.qin = qin
|
||||
self.qout = qout
|
||||
|
||||
def set_auth(self):
|
||||
'''Set the auth flag (cannot be cleared)'''
|
||||
self._set_flag(0x00010000)
|
||||
return self
|
||||
|
||||
def set_no_ripple(self, value: bool = True):
|
||||
'''Set or clear the noRipple flag'''
|
||||
self._set_flag(0x0002_0000, value)
|
||||
self._set_flag(0x0004_0000, not value)
|
||||
return self
|
||||
|
||||
def set_freeze(self, value: bool = True):
|
||||
'''Set or clear the freeze flag'''
|
||||
self._set_flag(0x0020_0000, value)
|
||||
self._set_flag(0x0040_0000, not value)
|
||||
return self
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''convert to transaction form (suitable for using json.dumps or similar)'''
|
||||
result = super().to_cmd_obj()
|
||||
result = {
|
||||
**result,
|
||||
'TransactionType': 'TrustSet',
|
||||
'LimitAmount': self.limit_amt.to_cmd_obj(),
|
||||
}
|
||||
if self.qin is not None:
|
||||
result['QualityIn'] = self.qin
|
||||
if self.qout is not None:
|
||||
result['QualityOut'] = self.qout
|
||||
return result
|
||||
|
||||
|
||||
class SetRegularKey(Transaction):
|
||||
'''A SetRegularKey transaction'''
|
||||
def __init__(self, *, key: str, **rest):
|
||||
super().__init__(**rest)
|
||||
self.key = key
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''convert to transaction form (suitable for using json.dumps or similar)'''
|
||||
result = super().to_cmd_obj()
|
||||
result = {
|
||||
**result,
|
||||
'TransactionType': 'SetRegularKey',
|
||||
'RegularKey': self.key,
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
class SignerListSet(Transaction):
|
||||
'''A SignerListSet transaction'''
|
||||
def __init__(self,
|
||||
*,
|
||||
keys: List[str],
|
||||
weights: Optional[List[int]] = None,
|
||||
quorum: int,
|
||||
**rest):
|
||||
super().__init__(**rest)
|
||||
self.keys = keys
|
||||
self.quorum = quorum
|
||||
if weights:
|
||||
if len(weights) != len(keys):
|
||||
raise ValueError(
|
||||
f'SignerSetList number of weights must equal number of keys (or be empty). Weights: {weights} Keys: {keys}'
|
||||
)
|
||||
self.weights = weights
|
||||
else:
|
||||
self.weights = [1] * len(keys)
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''convert to transaction form (suitable for using json.dumps or similar)'''
|
||||
result = super().to_cmd_obj()
|
||||
result = {
|
||||
**result,
|
||||
'TransactionType': 'SignerListSet',
|
||||
'SignerQuorum': self.quorum,
|
||||
}
|
||||
entries = []
|
||||
for k, w in zip(self.keys, self.weights):
|
||||
entries.append({'SignerEntry': {'Account': k, 'SignerWeight': w}})
|
||||
result['SignerEntries'] = entries
|
||||
return result
|
||||
|
||||
|
||||
class AccountSet(Transaction):
|
||||
'''An account set transaction'''
|
||||
def __init__(self, account: Account, **rest):
|
||||
super().__init__(account=account, **rest)
|
||||
self.clear_flag = None
|
||||
self.set_flag = None
|
||||
self.transfer_rate = None
|
||||
self.tick_size = None
|
||||
|
||||
def _set_account_flag(self, flag_id: int, value):
|
||||
if value:
|
||||
self.set_flag = flag_id
|
||||
else:
|
||||
self.clear_flag = flag_id
|
||||
return self
|
||||
|
||||
def set_account_txn_id(self, value: bool = True):
|
||||
'''Set or clear the asfAccountTxnID flag'''
|
||||
return self._set_account_flag(5, value)
|
||||
|
||||
def set_default_ripple(self, value: bool = True):
|
||||
'''Set or clear the asfDefaultRipple flag'''
|
||||
return self._set_account_flag(8, value)
|
||||
|
||||
def set_deposit_auth(self, value: bool = True):
|
||||
'''Set or clear the asfDepositAuth flag'''
|
||||
return self._set_account_flag(9, value)
|
||||
|
||||
def set_disable_master(self, value: bool = True):
|
||||
'''Set or clear the asfDisableMaster flag'''
|
||||
return self._set_account_flag(4, value)
|
||||
|
||||
def set_disallow_xrp(self, value: bool = True):
|
||||
'''Set or clear the asfDisallowXRP flag'''
|
||||
return self._set_account_flag(3, value)
|
||||
|
||||
def set_global_freeze(self, value: bool = True):
|
||||
'''Set or clear the asfGlobalFreeze flag'''
|
||||
return self._set_account_flag(7, value)
|
||||
|
||||
def set_no_freeze(self, value: bool = True):
|
||||
'''Set or clear the asfNoFreeze flag'''
|
||||
return self._set_account_flag(6, value)
|
||||
|
||||
def set_require_auth(self, value: bool = True):
|
||||
'''Set or clear the asfRequireAuth flag'''
|
||||
return self._set_account_flag(2, value)
|
||||
|
||||
def set_require_dest(self, value: bool = True):
|
||||
'''Set or clear the asfRequireDest flag'''
|
||||
return self._set_account_flag(1, value)
|
||||
|
||||
def set_transfer_rate(self, value: int):
|
||||
'''Set the fee to change when users transfer this account's issued currencies'''
|
||||
self.transfer_rate = value
|
||||
return self
|
||||
|
||||
def set_tick_size(self, value: int):
|
||||
'''Tick size to use for offers involving a currency issued by this address'''
|
||||
self.tick_size = value
|
||||
return self
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
'''convert to transaction form (suitable for using json.dumps or similar)'''
|
||||
result = super().to_cmd_obj()
|
||||
result = {
|
||||
**result,
|
||||
'TransactionType': 'AccountSet',
|
||||
}
|
||||
if self.clear_flag is not None:
|
||||
result['ClearFlag'] = self.clear_flag
|
||||
if self.set_flag is not None:
|
||||
result['SetFlag'] = self.set_flag
|
||||
if self.transfer_rate is not None:
|
||||
result['TransferRate'] = self.transfer_rate
|
||||
if self.tick_size is not None:
|
||||
result['TickSize'] = self.tick_size
|
||||
return result
|
||||
|
||||
|
||||
class Offer(Transaction):
|
||||
'''An offer transaction'''
|
||||
def __init__(self,
|
||||
*,
|
||||
taker_pays: Asset,
|
||||
taker_gets: Asset,
|
||||
expiration: Optional[int] = None,
|
||||
offer_sequence: Optional[int] = None,
|
||||
**rest):
|
||||
super().__init__(**rest)
|
||||
self.taker_pays = taker_pays
|
||||
self.taker_gets = taker_gets
|
||||
self.expiration = expiration
|
||||
self.offer_sequence = offer_sequence
|
||||
|
||||
def set_passive(self, value: bool = True):
|
||||
return self._set_flag(0x0001_0000, value)
|
||||
|
||||
def set_immediate_or_cancel(self, value: bool = True):
|
||||
return self._set_flag(0x0002_0000, value)
|
||||
|
||||
def set_fill_or_kill(self, value: bool = True):
|
||||
return self._set_flag(0x0004_0000, value)
|
||||
|
||||
def set_sell(self, value: bool = True):
|
||||
return self._set_flag(0x0008_0000, value)
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
txn = super().to_cmd_obj()
|
||||
txn = {
|
||||
**txn,
|
||||
'TransactionType': 'OfferCreate',
|
||||
'TakerPays': self.taker_pays.to_cmd_obj(),
|
||||
'TakerGets': self.taker_gets.to_cmd_obj(),
|
||||
}
|
||||
if self.expiration is not None:
|
||||
txn['Expiration'] = self.expiration
|
||||
if self.offer_sequence is not None:
|
||||
txn['OfferSequence'] = self.offer_sequence
|
||||
return txn
|
||||
|
||||
|
||||
class Ticket(Transaction):
|
||||
'''A ticket create transaction'''
|
||||
def __init__(self, *, count: int = 1, **rest):
|
||||
super().__init__(**rest)
|
||||
self.count = count
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
txn = super().to_cmd_obj()
|
||||
txn = {
|
||||
**txn,
|
||||
'TransactionType': 'TicketCreate',
|
||||
'TicketCount': self.count,
|
||||
}
|
||||
return txn
|
||||
|
||||
|
||||
class SetHook(Transaction):
|
||||
'''A SetHook transaction for the experimental hook amendment'''
|
||||
def __init__(self,
|
||||
*,
|
||||
create_code: str,
|
||||
hook_on: str = '0000000000000000',
|
||||
**rest):
|
||||
super().__init__(**rest)
|
||||
self.create_code = create_code
|
||||
self.hook_on = hook_on
|
||||
|
||||
def to_cmd_obj(self) -> dict:
|
||||
txn = super().to_cmd_obj()
|
||||
txn = {
|
||||
**txn,
|
||||
'TransactionType': 'SetHook',
|
||||
'CreateCode': self.create_code,
|
||||
'HookOn': self.hook_on,
|
||||
}
|
||||
return txn
|
||||
@@ -200,9 +200,19 @@
|
||||
#
|
||||
# admin = [ IP, IP, IP, ... ]
|
||||
#
|
||||
# A comma-separated list of IP addresses.
|
||||
# A comma-separated list of IP addresses or subnets. Subnets
|
||||
# should be represented in "slash" notation, such as:
|
||||
# 10.0.0.0/8
|
||||
# 172.16.0.0/12
|
||||
# 192.168.0.0/16
|
||||
# Those examples are ipv4, but ipv6 is also supported.
|
||||
# When configuring subnets, the address must match the
|
||||
# underlying network address. Otherwise, the desired IP range is
|
||||
# ambiguous. For example, 10.1.2.3/24 has a network address of
|
||||
# 10.1.2.0. Therefore, that subnet should be configured as
|
||||
# 10.1.2.0/24.
|
||||
#
|
||||
# When set, grants administrative command access to the specified IP
|
||||
# When set, grants administrative command access to the specified
|
||||
# addresses. These commands may be issued over http, https, ws, or wss
|
||||
# if configured on the port. If not provided, the default is to not allow
|
||||
# administrative commands.
|
||||
@@ -233,9 +243,10 @@
|
||||
#
|
||||
# secure_gateway = [ IP, IP, IP, ... ]
|
||||
#
|
||||
# A comma-separated list of IP addresses.
|
||||
# A comma-separated list of IP addresses or subnets. See the
|
||||
# details for the "admin" option above.
|
||||
#
|
||||
# When set, allows the specified IP addresses to pass HTTP headers
|
||||
# When set, allows the specified addresses to pass HTTP headers
|
||||
# containing username and remote IP address for each session. If a
|
||||
# non-empty username is passed in this way, then resource controls
|
||||
# such as often resulting in "tooBusy" errors will be lifted. However,
|
||||
@@ -250,9 +261,9 @@
|
||||
# proxies. Since rippled trusts these hosts, they must be
|
||||
# responsible for properly authenticating the remote user.
|
||||
#
|
||||
# The same IP address cannot be used in both "admin" and "secure_gateway"
|
||||
# lists for the same port. In this case, rippled will abort with an error
|
||||
# message to the console shortly after startup
|
||||
# If some IP addresses are included for both "admin" and
|
||||
# "secure_gateway" connections, then they will be treated as
|
||||
# "admin" addresses.
|
||||
#
|
||||
# ssl_key = <filename>
|
||||
# ssl_cert = <filename>
|
||||
@@ -622,18 +633,28 @@
|
||||
#
|
||||
# [relay_proposals]
|
||||
#
|
||||
# Controls the relaying behavior for proposals received by this server that
|
||||
# are issued by validators that are not on the server's UNL.
|
||||
# Controls the relay and processing behavior for proposals received by this
|
||||
# server that are issued by validators that are not on the server's UNL.
|
||||
#
|
||||
# Legal values are: "trusted" and "all". The default is "trusted".
|
||||
# Legal values are:
|
||||
# "all" - Relay and process all incoming proposals
|
||||
# "trusted" - Relay only trusted proposals, but locally process all
|
||||
# "drop_untrusted" - Relay only trusted proposals, do not process untrusted
|
||||
#
|
||||
# The default is "trusted".
|
||||
#
|
||||
#
|
||||
# [relay_validations]
|
||||
#
|
||||
# Controls the relaying behavior for validations received by this server that
|
||||
# are issued by validators that are not on the server's UNL.
|
||||
# Controls the relay and processing behavior for validations received by this
|
||||
# server that are issued by validators that are not on the server's UNL.
|
||||
#
|
||||
# Legal values are: "trusted" and "all". The default is "all".
|
||||
# Legal values are:
|
||||
# "all" - Relay and process all incoming validations
|
||||
# "trusted" - Relay only trusted validations, but locally process all
|
||||
# "drop_untrusted" - Relay only trusted validations, do not process untrusted
|
||||
#
|
||||
# The default is "all".
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -765,9 +786,17 @@
|
||||
# [workers]
|
||||
#
|
||||
# Configures the number of threads for processing work submitted by peers
|
||||
# and clients. If not specified, then the value is automatically determined
|
||||
# by factors including the number of system processors and whether this
|
||||
# node is a validator.
|
||||
# and clients. If not specified, then the value is automatically set to the
|
||||
# number of processor threads plus 2 for networked nodes. Nodes running in
|
||||
# stand alone mode default to 1 worker.
|
||||
#
|
||||
# [io_workers]
|
||||
#
|
||||
# Configures the number of threads for processing raw inbound and outbound IO.
|
||||
#
|
||||
# [prefetch_workers]
|
||||
#
|
||||
# Configures the number of threads for performing nodestore prefetching.
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -867,7 +896,7 @@
|
||||
#
|
||||
# source_ip = <IP-address>
|
||||
#
|
||||
# Required. IP address of the ETL source
|
||||
# Required. IP address of the ETL source. Can also be a DNS record.
|
||||
#
|
||||
# source_ws_port = <number>
|
||||
#
|
||||
@@ -1055,6 +1084,12 @@
|
||||
# Note: the cache will not be created if online_delete
|
||||
# is specified, or if shards are used.
|
||||
#
|
||||
# fast_load Boolean. If set, load the last persisted ledger
|
||||
# from disk upon process start before syncing to
|
||||
# the network. This is likely to improve performance
|
||||
# if sufficient IOPS capacity is available.
|
||||
# Default 0.
|
||||
#
|
||||
# Optional keys for NuDB or RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP ledger
|
||||
@@ -1132,6 +1167,9 @@
|
||||
# cluster. Setting this option can help eliminate
|
||||
# write timeouts and other write errors due to the
|
||||
# cluster being overloaded.
|
||||
# io_threads
|
||||
# Set the number of IO threads used by the
|
||||
# Cassandra driver. Defaults to 4.
|
||||
#
|
||||
# Notes:
|
||||
# The 'node_db' entry configures the primary, persistent storage.
|
||||
@@ -1426,8 +1464,20 @@
|
||||
# Tunes the servers based on the expected load and available memory. Legal
|
||||
# sizes are "tiny", "small", "medium", "large", and "huge". We recommend
|
||||
# you start at the default and raise the setting if you have extra memory.
|
||||
# If no value is specified, the code assumes the proper size is "tiny". The
|
||||
# default configuration file explicitly specifies "medium" as the size.
|
||||
#
|
||||
# The code attempts to automatically determine the appropriate size for
|
||||
# this parameter based on the amount of RAM and the number of execution
|
||||
# cores available to the server. The current decision matrix is:
|
||||
#
|
||||
# | | Cores |
|
||||
# |---------|------------------------|
|
||||
# | RAM | 1 | 2 or 3 | ≥ 4 |
|
||||
# |---------|------|--------|--------|
|
||||
# | < ~8GB | tiny | tiny | tiny |
|
||||
# | < ~12GB | tiny | small | small |
|
||||
# | < ~16GB | tiny | small | medium |
|
||||
# | < ~24GB | tiny | small | large |
|
||||
# | < ~32GB | tiny | small | huge |
|
||||
#
|
||||
# [signing_support]
|
||||
#
|
||||
@@ -1502,6 +1552,15 @@
|
||||
# Enable or disable access to /vl requests. Default is '1' which
|
||||
# enables access.
|
||||
#
|
||||
# [beta_rpc_api]
|
||||
#
|
||||
# 0 or 1.
|
||||
#
|
||||
# 0: Disable the beta API version for JSON-RPC and WebSocket [default]
|
||||
# 1: Enable the beta API version for testing. The beta API version
|
||||
# contains breaking changes that require a new API version number.
|
||||
# They are not ready for public consumption.
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# 10. Example Settings
|
||||
@@ -1589,9 +1648,6 @@ protocol = ws
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
[node_size]
|
||||
medium
|
||||
|
||||
# This is primary persistent datastore for rippled. This includes transaction
|
||||
# metadata, account states, and ledger headers. Helpful information can be
|
||||
# found at https://xrpl.org/capacity-planning.html#node-db-type
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
# Examples:
|
||||
# https://vl.ripple.com
|
||||
# https://vl.coil.com
|
||||
# https://vl.xrplf.org
|
||||
# http://127.0.0.1:8000
|
||||
# file:///etc/opt/ripple/vl.txt
|
||||
#
|
||||
@@ -54,9 +55,13 @@
|
||||
|
||||
[validator_list_sites]
|
||||
https://vl.ripple.com
|
||||
https://vl.xrplf.org
|
||||
|
||||
[validator_list_keys]
|
||||
#vl.ripple.com
|
||||
ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
|
||||
# vl.xrplf.org
|
||||
ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B
|
||||
|
||||
# To use the test network (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
|
||||
# use the following configuration instead:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
Consensus is the task of reaching agreement within a distributed system in the
|
||||
presence of faulty or even malicious participants. This document outlines the
|
||||
[Ripple Consensus Algorithm](https://ripple.com/files/ripple/consensus/whitepaper.pdf)
|
||||
[XRP Ledger Consensus Algorithm](https://arxiv.org/abs/1802.07242)
|
||||
as implemented in [rippled](https://github.com/ripple/rippled), but
|
||||
focuses on its utility as a generic consensus algorithm independent of the
|
||||
detailed mechanics of the Ripple Consensus Ledger. Most notably, the algorithm
|
||||
@@ -469,7 +469,7 @@ struct Ledger
|
||||
// Whether the ledger's close time was a non-trivial consensus result
|
||||
bool closeAgree() const;
|
||||
|
||||
// The close time resolution used in determing the close time
|
||||
// The close time resolution used in determining the close time
|
||||
NetClock::duration closeTimeResolution() const;
|
||||
|
||||
// The (effective) close time, based on the closeTimeResolution
|
||||
|
||||
217
docs/sidechain/GettingStarted.md
Normal file
217
docs/sidechain/GettingStarted.md
Normal file
@@ -0,0 +1,217 @@
|
||||
## Introduction
|
||||
|
||||
This document walks through the steps to setup a side chain running on your local
|
||||
machine and make your first cross chain transfers.
|
||||
|
||||
## Get Ready
|
||||
|
||||
This section describes how to install the python dependencies, create the
|
||||
environment variables, and create the configuration files that scripts need to
|
||||
run correctly.
|
||||
|
||||
### Build rippled
|
||||
|
||||
Checkout the `sidechain` branch from the rippled repository, and follow the
|
||||
usual process to build rippled.
|
||||
|
||||
### Create a Python virtual environment and install dependencies
|
||||
|
||||
1. Check the current python version. The python scripts require python 3.8 or greater:
|
||||
```
|
||||
python3 --version
|
||||
```
|
||||
|
||||
2. Choose a directory to put the virtual environment. For example, `~/envs`.
|
||||
|
||||
3. Create this directory and cd to it:
|
||||
```
|
||||
$ mkdir ~/env
|
||||
$ cd ~/env
|
||||
```
|
||||
|
||||
4. Create a new python virtual environment and activate it. Here the new
|
||||
envrionment is called `sidechain`. Of course, you can choose whatever name
|
||||
you'd like:
|
||||
```
|
||||
$ python3 -m venv sidechain
|
||||
$ source ./sidechain/bin/activate
|
||||
```
|
||||
|
||||
5. Install the required python modules. Change directories to where the
|
||||
side chain branch is located and use pip3 to install the modules: Assuming the
|
||||
code is located in `~/projs/sidechain`, the following commands will do it:
|
||||
```
|
||||
cd ~/projs/sidechain
|
||||
$ pip3 install -r bin/sidechain/python/requirements.txt
|
||||
```
|
||||
|
||||
### Activate the python virtual environment
|
||||
|
||||
```
|
||||
$ cd ~/env
|
||||
$ source ./sidechain/bin/activate
|
||||
```
|
||||
|
||||
There's no harm if it was already active.
|
||||
|
||||
### Environment variables
|
||||
|
||||
The python scripts need to know the locations of two files and one directory.
|
||||
These can be specified either through command line arguments or by setting
|
||||
environment variables.
|
||||
|
||||
1. The location of the rippled executable used for main chain servers. Either
|
||||
set the environment variable `RIPPLED_MAINCHAIN_EXE` or use the command line
|
||||
switch `--exe_mainchain`. Until a new RPC is integrated into the main branch
|
||||
(this will happen very soon), use the code built from the sidechain branch as
|
||||
the main chain exe.
|
||||
2. The location of the rippled executable used for side chain servers. Either
|
||||
set the environment variable `RIPPLED_SIDECHAIN_EXE` or use the command line
|
||||
switch `--exe_sidechain`. This should be the rippled executable built from
|
||||
the sidechain branch.
|
||||
3. The location of the directory that has the rippled configuration files.
|
||||
Either set the environment variable `RIPPLED_SIDECHAIN_CFG_DIR` or use the
|
||||
command line switch `--cfgs_dir`. The configuration files do not exist yet.
|
||||
There is a script to create these for you. For now, just choose a location
|
||||
where the files should live and make sure that directory exists.
|
||||
|
||||
Setting environment variables can be very convient. For example, a small script
|
||||
can be sourced to set these environment variables when working with side chains.
|
||||
|
||||
|
||||
### Creating configuration files
|
||||
|
||||
Assuming rippled is built, the three environment variables are set, and the
|
||||
python environment is activated, run the following script:
|
||||
```
|
||||
bin/sidechain/python/create_config_files.py --usd
|
||||
```
|
||||
|
||||
There should now be many configuration files in the directory specified by the
|
||||
`RIPPLED_SIDECHAIN_CFG_DIR` environment variable. The `--usd` creates a sample
|
||||
cross chain assert for USD -> USD transfers.
|
||||
|
||||
## Running the interactive shell
|
||||
|
||||
There is an interactive shell called `RiplRepl` that can be used to explore
|
||||
side chains. It will use the configuration files built above to spin up test
|
||||
rippled main chain running in standalone mode as well as 5 side chain federators
|
||||
running in regular consensus mode.
|
||||
|
||||
To start the shell, run the following script:
|
||||
```
|
||||
bin/sidechain/python/riplrepl.py
|
||||
```
|
||||
|
||||
The shell will not start until the servers have synced. It may take a minute or
|
||||
two until they do sync. The script should give feedback while it is syncing.
|
||||
|
||||
Once the shell has started, the following message should appear:
|
||||
```
|
||||
Welcome to the sidechain test shell. Type help or ? to list commands.
|
||||
|
||||
RiplRepl>
|
||||
```
|
||||
|
||||
Type the command `server_info` to make sure the servers are running. An example output would be:
|
||||
```
|
||||
RiplRepl> server_info
|
||||
pid config running server_state ledger_seq complete_ledgers
|
||||
main 0 136206 main.no_shards.mainchain_0/rippled.cfg True proposing 75 2-75
|
||||
side 0 136230 sidechain_0/rippled.cfg True proposing 92 1-92
|
||||
1 136231 sidechain_1/rippled.cfg True proposing 92 1-92
|
||||
2 136232 sidechain_2/rippled.cfg True proposing 92 1-92
|
||||
3 136233 sidechain_3/rippled.cfg True proposing 92 1-92
|
||||
4 136234 sidechain_4/rippled.cfg True proposing 92 1-92
|
||||
```
|
||||
|
||||
Of course, you'll see slightly different output on your machine. The important
|
||||
thing to notice is there are two categories, one called `main` for the main chain
|
||||
and one called `side` for the side chain. There should be a single server for the
|
||||
main chain and five servers for the side chain.
|
||||
|
||||
Next, type the `balance` command, to see the balances of the accounts in the address book:
|
||||
```
|
||||
RiplRepl> balance
|
||||
balance currency peer limit
|
||||
account
|
||||
main root 99,999,989,999.999985 XRP
|
||||
door 9,999.999940 XRP
|
||||
side door 99,999,999,999.999954 XRP
|
||||
```
|
||||
|
||||
There are two accounts on the main chain: `root` and `door`; and one account on the side chain: `door`. These are not user accounts. Let's add two user accounts, one on the main chain called `alice` and one on the side chain called `bob`. The `new_account` command does this for us.
|
||||
|
||||
```
|
||||
RiplRepl> new_account mainchain alice
|
||||
RiplRepl> new_account sidechain bob
|
||||
```
|
||||
|
||||
This just added the accounts to the address book, but they don't exist on the
|
||||
ledger yet. To do that, we need to fund the accounts with a payment. For now,
|
||||
let's just fund the `alice` account and check the balances. The `pay` command
|
||||
makes a payment on one of the chains:
|
||||
|
||||
```
|
||||
RiplRepl> pay mainchain root alice 5000
|
||||
RiplRepl> balance
|
||||
balance currency peer limit
|
||||
account
|
||||
main root 99,999,984,999.999969 XRP
|
||||
door 9,999.999940 XRP
|
||||
alice 5,000.000000 XRP
|
||||
side door 99,999,999,999.999954 XRP
|
||||
bob 0.000000 XRP
|
||||
```
|
||||
|
||||
Finally, let's do something specific to side chains: make a cross chain payment.
|
||||
The `xchain` command makes a payment between chains:
|
||||
|
||||
```
|
||||
RiplRepl> xchain mainchain alice bob 4000
|
||||
RiplRepl> balance
|
||||
balance currency peer limit
|
||||
account
|
||||
main root 99,999,984,999.999969 XRP
|
||||
door 13,999.999940 XRP
|
||||
alice 999.999990 XRP
|
||||
side door 99,999,995,999.999863 XRP
|
||||
bob 4,000.000000 XRP
|
||||
```
|
||||
|
||||
Note: the account reserve on the side chain is 100 XRP. The cross chain amount
|
||||
must be greater than 100 XRP or the payment will fail.
|
||||
|
||||
Making a cross chain transaction from the side chain to the main chain is similar:
|
||||
```
|
||||
RiplRepl> xchain sidechain bob alice 2000
|
||||
RiplRepl> balance
|
||||
balance currency peer limit
|
||||
account
|
||||
main root 99,999,984,999.999969 XRP
|
||||
door 11,999.999840 XRP
|
||||
alice 2,999.999990 XRP
|
||||
side door 99,999,997,999.999863 XRP
|
||||
bob 1,999.999990 XRP
|
||||
```
|
||||
|
||||
If you typed `balance` very quickly, you may catch a cross chain payment in
|
||||
progress and the XRP may be deducted from bob's account before it is added to
|
||||
alice's. If this happens just wait a couple seconds and retry the command. Also
|
||||
note that accounts pay a ten drop fee when submitting transactions.
|
||||
|
||||
Finally, exit the program with the `quit` command:
|
||||
```
|
||||
RiplRepl> quit
|
||||
Thank you for using RiplRepl. Goodbye.
|
||||
|
||||
|
||||
WARNING: Server 0 is being stopped. RPC commands cannot be sent until this is restarted.
|
||||
```
|
||||
|
||||
Ignore the warning about the server being stopped.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Those two cross chain payments are a "hello world" for side chains. It makes sure
|
||||
you're environment is set up correctly.
|
||||
130
docs/sidechain/configFile.md
Normal file
130
docs/sidechain/configFile.md
Normal file
@@ -0,0 +1,130 @@
|
||||
## Introduction
|
||||
|
||||
The config file for side chain servers that run as federators require three
|
||||
addition configuration stanzas. One additional stanza is required if the
|
||||
federator will run in standalone mode, and one existing stanza (`ips_fixed`) can
|
||||
be useful if running a side chain network on the local machine.
|
||||
|
||||
## The `[sidechain]` stanza
|
||||
|
||||
This stanza defines the side chain top level parameters. This includes:
|
||||
* The federator's signing key. This is needed to add a signature to a
|
||||
mutli-signed transaction before submitting it on the main chain or the side
|
||||
chain.
|
||||
* The main chain account. This is the account controlled by the federators and
|
||||
the account users will send their assets to initiate cross chain transactions.
|
||||
Some documentation calls this the main chain "door" account.
|
||||
* The ip address and port of the main chain. This is needed to communicate with
|
||||
the main chain server.
|
||||
|
||||
An example stanza may look like this (where the "X" are part of a secret key):
|
||||
```
|
||||
[sidechain]
|
||||
signing_key=sXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
mainchain_account=rDj4pMuPv8gAD5ZvUrpHza3bn6QMAK6Zoo
|
||||
mainchain_ip=127.0.0.1
|
||||
mainchain_port_ws=6007
|
||||
```
|
||||
|
||||
## The `[sidechain_federators]` stanza
|
||||
|
||||
This stanza defines the signing public keys of the sidechain federators. This is
|
||||
needed to know which servers to collect transaction signatures from. An example
|
||||
stanza may look like this:
|
||||
|
||||
```
|
||||
[sidechain_federators]
|
||||
aKNmFC2QWXbCUFq9XxaLgz1Av6SY5ccE457zFjSoNwaFPGEwz6ab
|
||||
aKE9m7iDjhy5QAtnrmE8RVbY4RRvFY1Fn3AZ5NN2sB4N9EzQe82Z
|
||||
aKNFZ3L7Y7z8SdGVewkVuqMKmDr6bqmaErXBdWAVqv1cjgkt1X36
|
||||
aKEhTF5hRYDenn2Rb1NMza1vF9RswX8gxyJuuYmz6kpU5W6hc7zi
|
||||
aKEydZ5rmPm7oYQZi9uagk8fnbXz4gmx82WBTJcTVdgYWfRBo1Mf
|
||||
```
|
||||
|
||||
## The `[sidechain_assets]` and associated stanzas.
|
||||
|
||||
These stanza define what asset is used as the cross chain asset between the main
|
||||
chain and the side chain. The `mainchain_asset` is the asset that accounts on
|
||||
the main chain send to the account controlled by the federators to initiate an
|
||||
cross chain transaction. The `sidechain_asset` is the asset that will be sent to
|
||||
the destination address on the side chain. When returning an asset from the side
|
||||
chain to the main chain, the `sidechain_asset` is sent to the side chain account
|
||||
controlled by the federators and the `mainchain_asset` will be sent to the
|
||||
destination address on the main chain. There are amounts associated with these
|
||||
two assets. These amount define an exchange rate. If the value of the main chain
|
||||
asset is 1, and the amount of the side chain asset is 2, then for every asset
|
||||
locked on the main chain, twice of many assets are sent on the side chain.
|
||||
Similarly, for every asset returned from the main chain, half as many assets are
|
||||
sent on the main chain. The format used to specify these amounts is the same as
|
||||
used in json RPC commands.
|
||||
|
||||
There are also fields for "refund_penalty" on the main chain and side chain.
|
||||
This is the amount to deduct from refunds if a transaction fails. For example,
|
||||
if a cross chain transaction sends 1 XRP to an address on the side chain that
|
||||
doesn't exist (and the reserve is greater than 1 XRP), then a refund is issued
|
||||
on the main chain. If the `mainchain_refund_penalty` is 400 drops, then the
|
||||
amount returned is 1 XRP - 400 drops.
|
||||
|
||||
An example of stanzas where the main chain asset is XRP, and the sidechain asset
|
||||
is also XRP, and the exchange rate is 1 to 1 may look like this:
|
||||
|
||||
```
|
||||
[sidechain_assets]
|
||||
xrp_xrp_sidechain_asset
|
||||
|
||||
[xrp_xrp_sidechain_asset]
|
||||
mainchain_asset="1"
|
||||
sidechain_asset="1"
|
||||
mainchain_refund_penalty="400"
|
||||
sidechain_refund_penalty="400"
|
||||
```
|
||||
|
||||
|
||||
An example of stanzas where the main chain asset is USD/rD... and the side chain
|
||||
asset is USD/rHb... and the exchange rate is 1 to 2 may look like this:
|
||||
|
||||
```
|
||||
[sidechain_assets]
|
||||
iou_iou_sidechain_asset
|
||||
|
||||
[iou_iou_sidechain_asset]
|
||||
mainchain_asset={"currency": "USD", "issuer": "rDj4pMuPv8gAD5ZvUrpHza3bn6QMAK6Zoo", "value": "1"}
|
||||
sidechain_asset={"currency": "USD", "issuer": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "value": "2"}
|
||||
mainchain_refund_penalty={"currency": "USD", "issuer": "rDj4pMuPv8gAD5ZvUrpHza3bn6QMAK6Zoo", "value": "0.02"}
|
||||
sidechain_refund_penalty={"currency": "USD", "issuer": "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "value": "0.04"}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## The `[sidechain_federators_secrets]` stanza
|
||||
|
||||
When running a side chain with a single federator in stand alone mode (useful
|
||||
for debugging), that single server needs to know the signing keys of all the
|
||||
federators in order to submit transactions. This stanza will not normally only
|
||||
be part of configuration files that are used for testing and debugging.
|
||||
|
||||
An example of a stanza with federator secrets may look like this (where the "X"
|
||||
are part of a secret key).
|
||||
|
||||
```
|
||||
[sidechain_federators_secrets]
|
||||
sXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
sXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
sXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
sXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
sXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
```
|
||||
|
||||
## The `[ips_fixed]` stanza
|
||||
|
||||
When running a test net it can be useful to hard code the ip addresses of the
|
||||
side chain servers. An example of such a stanza used to run a test net locally
|
||||
may look like this:
|
||||
|
||||
```
|
||||
[ips_fixed]
|
||||
127.0.0.2 51238
|
||||
127.0.0.3 51239
|
||||
127.0.0.4 51240
|
||||
127.0.0.5 51241
|
||||
```
|
||||
823
docs/sidechain/design.md
Normal file
823
docs/sidechain/design.md
Normal file
@@ -0,0 +1,823 @@
|
||||
# Introduction
|
||||
|
||||
This document covers the design of side chains using the XRP ledger. It covers
|
||||
the implementation of federators, how federators are initially synced and kept
|
||||
in sync, how cross chain transactions work, and how errors are handled. It does
|
||||
not give a high level overview of side chains or describe their benefits.
|
||||
|
||||
# Terminology
|
||||
|
||||
_federator_: A server that listens for triggering transactions on both the main
|
||||
chain and the side chain. Each federator has a signing key associated with it
|
||||
that is used to sign transactions. A transaction must be signed by a quorum of
|
||||
federators before it can be submitted. Federators are responsible for creating
|
||||
and signing valid response transactions, collecting signatures from other
|
||||
federators, and submitting transactions to the main chain and side chain.
|
||||
|
||||
_main chain_: Ledger where assets originate and where assets will be locked
|
||||
while used on the side chain. For most applications, the main chain will be the
|
||||
XRP ledger mainnet.
|
||||
|
||||
_side chain_: Ledger where proxy assets for the locked main chain assets are
|
||||
issued. Side chains may have rules, transactors, and validators that are very
|
||||
different from the main chain. Proxy assets on the side chain can be sent back
|
||||
to the main chain where they will be be unlocked from the control of the
|
||||
federators.
|
||||
|
||||
_door account_: Account controlled by the federators. There are two door
|
||||
accounts: one on the main chain and one on the side chain. Cross chain
|
||||
transactions are started by users sending assets to a door account. Main chain
|
||||
to side chain transactions cause the balance to increase on the main chain door
|
||||
account and the balance to decrease on the side chain door account. It is called
|
||||
a "door" because it is the mechanism to move assets from one chain to another -
|
||||
much like going between rooms in a house requires stepping through a door.
|
||||
|
||||
_triggering transaction_: A transaction that causes the federators to start the
|
||||
process of signing and submitting a new response transaction. For example,
|
||||
sending XRP to the main chain's door account is a triggering transaction that
|
||||
will cause the federators to submit a new transaction on the side chain.
|
||||
|
||||
_response transaction_: A transaction submitted by the federators in reaction to
|
||||
a triggering transaction. Note that _triggering transaction_ and _response
|
||||
transaction_ depends on context. Sending XRP from a _door account_ to a user
|
||||
account is a _response transaction_ when thinking about cross chain
|
||||
transactions. It is a _triggering transaction_ when thinking about how to handle
|
||||
failed transactions.
|
||||
|
||||
# New RPC Command is a key primitive
|
||||
|
||||
Side chains introduce a new subscription stream called
|
||||
"account_history_tx_stream". Given an account, this streams both new
|
||||
transactions and historical transactions from validated ledgers back to the
|
||||
client. The transactions are streamed in order and without gaps, and each
|
||||
transaction is given a numeric id. New transactions start at id 0 and continue
|
||||
in the positive direction. Historical transaction start at id -1 and continue in
|
||||
the negative direction. New transactions are sent in the same order as they were
|
||||
applied to the ledger, and historical tranasations are in the reverse order they
|
||||
were applied to the ledger. The server will continue to stream historical
|
||||
transaction until it reaches the account's first transaction or the user sends a
|
||||
command signaling that historical transactions are no longer needed. This can be
|
||||
done without closing the stream, and new transactions will continue to be sent.
|
||||
Note that these transactions include all the transactions that effect the
|
||||
account, not just triggering and response transactions.
|
||||
|
||||
It's important to note that while historical and new transactions may be
|
||||
interleaved in the stream, there are never any gaps in the transactions.
|
||||
Transaction 7 MUST be sent before transaction 8, and transaction -7 MUST be sent
|
||||
before transaction -8.
|
||||
|
||||
This is the key primitive that allows federators to agree on transaction
|
||||
values - transaction types, sequence numbers, asset amounts, and destination
|
||||
addresses - without communicating amung themselves (of course signing
|
||||
transaction requires communication). Since the transactions are from validated
|
||||
ledgers, all the federators will see the same transactions in the same order.
|
||||
|
||||
# Federators
|
||||
|
||||
## Federator Introduction
|
||||
|
||||
A federator acts as a bridge between a main chain and a side chain. Through a
|
||||
multi-signature scheme, the federators collectively control an account on the
|
||||
main chain and an account on the side chain. These accounts are called door
|
||||
accounts. A federator listens for transactions on these door accounts. When a
|
||||
federator hears a triggering transaction, it will eventually submit a new
|
||||
response transaction that completes the triggering transaction.
|
||||
|
||||
Initially, the federators will live in the same executable as the side chain
|
||||
validators. However, the proposed implementation purposely does not take
|
||||
advantage of this fact. The motivation for this is:
|
||||
|
||||
1. It makes it easy to eventually separate out the federator implementation from
|
||||
side chain validators.
|
||||
2. The side chain to main chain transactions will be implemented the same way as
|
||||
the main chain to side chain transactions. Building and maintaining one
|
||||
implementation is preferable to maintaining two implementations.
|
||||
|
||||
## Keeping the federators in sync
|
||||
|
||||
Federators decide to sign transactions by using the "account_history_tx_stream"
|
||||
to listen for transactions on each chain. New transactions on the main chain
|
||||
will cause a federator to sign a transaction meant for the side chain.
|
||||
Similarly, new transactions on the side chain will cause a federator to sign a
|
||||
transaction meant for the main chain. As a concrete example, consider how XRP is
|
||||
locked on the main chain and distributed on a side chain. A user sends XRP to
|
||||
the main chain door account. This causes the federators to submit a transaction
|
||||
sending XRP to a destination on the side chain. Recall that a transaction that
|
||||
causes a federator to sign a transaction is called a triggering transaction, and
|
||||
a transaction created to handle a triggering transaction is called a response
|
||||
transaction. In the example above, the user sending XRP on the main chain is a
|
||||
triggering transaction and the tranasaction created by the federators and
|
||||
submitted on the side chain is a response transaction.
|
||||
|
||||
When a new triggering transaction is detected, a federator needs to create a
|
||||
response transaction. The fee, destination address, and amount are all known.
|
||||
The only value that isn't fixed or derived from the triggering transaction is
|
||||
the account sequence number. It is easy to agree on a sequence number. But first
|
||||
let's show that stream of triggering transactions are the same for all the
|
||||
validators.
|
||||
|
||||
Notice that a response transaction is always on the opposite chain as the
|
||||
corresponding triggering transaction. If a response transaction could be on
|
||||
either chain, then there would be timing issues. Consider what would happen if
|
||||
two triggering transaction came in, one from the main chain and one from the
|
||||
side chain, and both of these triggering transactions required response
|
||||
transactions on the main chain. Since there are separate transaction streams
|
||||
comming from the main chain and side chain, different federators may see these
|
||||
transaction arrive in different orders. However, since triggering and response
|
||||
transactions are on different chains, the federators don't need to deal with
|
||||
this case. (Note: there is at one response transaction that is needed in
|
||||
exceptional circumstances that violates this. Tickets are used to handle this
|
||||
transactions and will be described later).
|
||||
|
||||
Also notice that "account_history_tx_stream" delivers transactions in the order
|
||||
they were applied to the ledger without gaps.
|
||||
|
||||
This means that once a federator is in a state where it knows what sequence
|
||||
number should be used for the next response transaction (call it S), it will
|
||||
know the sequence number for all the subsequent response transactions. It will
|
||||
just be S+1, S+2, S+3, ect. Also notice that once in sync all the federators
|
||||
will see the same transactions in the same order. So all the federators will
|
||||
create the same response transactions and in the same order.
|
||||
|
||||
## Getting a federator in sync
|
||||
|
||||
When a federator joins the network, it doesn't know which triggering
|
||||
transactions it needs to sign and it doesn't know what sequence numbers should
|
||||
be used for response transactions. The "account_history_tx_stream" can be used
|
||||
to find this information and get the federator to a state where it can start
|
||||
signing and submitting transactions.
|
||||
|
||||
While getting into sync, a federator collects historical and new transactions
|
||||
from both the side chain and main chain. Each transaction has an id that is used
|
||||
to keep the transactions in the same order they were applied to the ledger, and
|
||||
the collections are kept in this order.
|
||||
|
||||
One piece of information a syncing federator needs to find is which triggering
|
||||
transactions need to be treated as new and which ones have already been handled.
|
||||
This is easily found by looking at the historical transaction from the
|
||||
"account_history_tx_stream". The first time a response transaction is found,
|
||||
the hash of associated triggering transaction is noted (this is be recorded as
|
||||
a memo in the response transaction). All the triggering transactions that
|
||||
precede the noted triggering transaction have already been handled. The
|
||||
"account_history_tx_stream" must continue to stream historical transactions at
|
||||
least until the first response transaction is found. For example, if the first
|
||||
observed response transaction on the main chain has hash `r_hash_mainchain`
|
||||
and associated triggering transaction of `t_hash_sidechain`, that means we know
|
||||
all the triggering transactions on the side chain before `t_hash_sidechain` have
|
||||
been handled (including `t_hash_sidechain`).
|
||||
|
||||
Another set of data that needs to be collected are all the historical
|
||||
triggering transactions that come after `t_hash` (see above). Of course,
|
||||
`t_hash` comes from the "account_history_tx_stream" from one chain, and the
|
||||
triggering transactions come from the other chain. This means more
|
||||
transactions than needed may be gathered.
|
||||
|
||||
Historical transaction continue to be streamed until the triggering transaction
|
||||
associated with `t_hash_this_chain` is found and value of `t_hash_other_chain`
|
||||
is found. For example, the main chain will continue to collect historical
|
||||
transaction until:
|
||||
|
||||
1) The side chain stream has found a response transaction and informed the main
|
||||
chain of the hash of the associated triggering transaction.
|
||||
|
||||
2) The side chain stream has found that triggering transaction.
|
||||
|
||||
3) This main chain stream has found a response transaction and informed the side
|
||||
chain syncing algorithm of the associated triggering transaction.
|
||||
|
||||
The above description does not handle the case where the start of histoical
|
||||
transactions is reached without finding any response transactions. If this
|
||||
happens then the other chain must also collect all the historical transactions,
|
||||
since we cannot show that triggering transaction has ever been handled.
|
||||
|
||||
Once this data has been collected, a command will be sent to ask the
|
||||
"account_history_tx_stream" to stop sending historical transaction (Note:
|
||||
without closing the stream. If the stream were closed it would be possible to
|
||||
miss a transaction). Starting from the transaction after the `t_hash`
|
||||
transaction, the collected triggering transaction will be iterated in the order
|
||||
they were applied to the ledger and treated as if they were newly arrived from
|
||||
the transaction stream. Once this is done the federator is synced and can switch
|
||||
to handling new transactions normally.
|
||||
|
||||
As long as there are regular cross chain transactions being sent from both the
|
||||
main chain and the side chain, the above procedure doesn't require too many
|
||||
historical transactions. However, if one chain almost never sends cross chain
|
||||
transactions then the syncing procedure is not as effiecient as it could be. As
|
||||
an extreme example, consider a main chain that sends cross chain transactions to
|
||||
a side chain, and the side chain never sends cross chain transactions back.
|
||||
Since there would be no response transactions on the main chain, the sync
|
||||
algorithm would fetch all of the main chain transactions. One way to improve
|
||||
this situation is for the federators to checkpoint the last known response
|
||||
transaction and its corresponding triggering transactions. If they did this
|
||||
individually, then on startup a federator would need to fetch at most as much
|
||||
history as the time it was down. If they did this as a group (by adding a new
|
||||
ledger object on the side chain, for example), then syncing could require much
|
||||
less history. For now, these strategies are not used. The benefits of a simplier
|
||||
implementations and not adding any new ledger objects outweighted the benefits
|
||||
of faster syncing for some types of sidechains.
|
||||
|
||||
## Federator Implementation
|
||||
|
||||
The Federator is an event loop that services events sent to it from the
|
||||
listeners. Events are handed in the `mainLoop` method. This runs on a separate
|
||||
thread. It runs on a separate thread so all the event handlers run on the order
|
||||
they were received on and on the same thread.
|
||||
|
||||
### Federator Events
|
||||
|
||||
A `Federator` event is a `std::variant` of all the event types. The current
|
||||
event types are:
|
||||
|
||||
* `XChainTransferDetected`. This is added when a federator detects the start of
|
||||
cross chain transaction.
|
||||
```c++
|
||||
struct XChainTransferDetected
|
||||
{
|
||||
// direction of the transfer
|
||||
Dir dir_;
|
||||
// Src account on the src chain
|
||||
AccountID src_;
|
||||
// Dst account on the dst chain
|
||||
AccountID dst_;
|
||||
STAmount deliveredAmt_;
|
||||
std::uint32_t txnSeq_;
|
||||
uint256 txnHash_;
|
||||
std::int32_t rpcOrder_;
|
||||
|
||||
EventType
|
||||
eventType() const;
|
||||
|
||||
Json::Value
|
||||
toJson() const;
|
||||
};
|
||||
```
|
||||
|
||||
* `XChainTransferResult`. This is added when a federator detects the end of a
|
||||
cross chain transaction.
|
||||
```c++
|
||||
struct XChainTransferResult
|
||||
{
|
||||
// direction is the direction of the triggering transaction.
|
||||
// I.e. A "mainToSide" transfer result is a transaction that
|
||||
// happens on the sidechain (the triggering transaction happended on the
|
||||
// mainchain)
|
||||
Dir dir_;
|
||||
AccountID dst_;
|
||||
std::optional<STAmount> deliveredAmt_;
|
||||
std::uint32_t txnSeq_;
|
||||
// Txn hash of the initiating xchain transaction
|
||||
uint256 srcChainTxnHash_;
|
||||
// Txn has of the federator's transaction on the dst chain
|
||||
uint256 txnHash_;
|
||||
TER ter_;
|
||||
std::int32_t rpcOrder_;
|
||||
|
||||
EventType
|
||||
eventType() const;
|
||||
|
||||
Json::Value
|
||||
toJson() const;
|
||||
};
|
||||
```
|
||||
|
||||
* `RefundTransferResult`. This is added when a federator detects the end of a
|
||||
refund transactions. Refunds may occur if there is an error transfering funds
|
||||
at the end of a cross chain transaction.
|
||||
```c++
|
||||
struct RefundTransferResult
|
||||
{
|
||||
// direction is the direction of the triggering transaction.
|
||||
// I.e. A "mainToSide" refund transfer result is a transaction that
|
||||
// happens on the mainchain (the triggering transaction happended on the
|
||||
// mainchain, the failed result happened on the side chain, and the refund
|
||||
// result happened on the mainchain)
|
||||
Dir dir_;
|
||||
AccountID dst_;
|
||||
std::optional<STAmount> deliveredAmt_;
|
||||
std::uint32_t txnSeq_;
|
||||
// Txn hash of the initiating xchain transaction
|
||||
uint256 srcChainTxnHash_;
|
||||
// Txn hash of the federator's transaction on the dst chain
|
||||
uint256 dstChainTxnHash_;
|
||||
// Txn hash of the refund result
|
||||
uint256 txnHash_;
|
||||
TER ter_;
|
||||
std::int32_t rpcOrder_;
|
||||
|
||||
EventType
|
||||
eventType() const;
|
||||
|
||||
Json::Value
|
||||
toJson() const;
|
||||
};
|
||||
```
|
||||
|
||||
* `TicketCreateResult`. This is added when the federator detects a ticket create
|
||||
transaction.
|
||||
|
||||
```
|
||||
struct TicketCreateResult
|
||||
{
|
||||
Dir dir_;
|
||||
bool success_;
|
||||
std::uint32_t txnSeq_;
|
||||
std::uint32_t ledgerIndex_;
|
||||
uint256 srcChainTxnHash_;
|
||||
uint256 txnHash_;
|
||||
std::int32_t rpcOrder_;
|
||||
|
||||
std::uint32_t sourceTag_;
|
||||
std::string memoStr_;
|
||||
|
||||
EventType
|
||||
eventType() const;
|
||||
|
||||
Json::Value
|
||||
toJson() const;
|
||||
|
||||
void
|
||||
removeTrigger();
|
||||
};
|
||||
```
|
||||
|
||||
* `DepositAuthResult`. This is added when the federator detects a deposit auth
|
||||
transaction. Deposit auth is used to pause cross chain transactions if the
|
||||
federators fall too far behind.
|
||||
```
|
||||
struct DepositAuthResult
|
||||
{
|
||||
Dir dir_;
|
||||
bool success_;
|
||||
std::uint32_t txnSeq_;
|
||||
std::uint32_t ledgerIndex_;
|
||||
uint256 srcChainTxnHash_;
|
||||
std::int32_t rpcOrder_;
|
||||
|
||||
AccountFlagOp op_;
|
||||
|
||||
EventType
|
||||
eventType() const;
|
||||
|
||||
Json::Value
|
||||
toJson() const;
|
||||
};
|
||||
```
|
||||
|
||||
* `BootstrapTicket`. This is added when the federator detects one of the initial
|
||||
ticket transactions that is added during account setup.
|
||||
```
|
||||
struct BootstrapTicket
|
||||
{
|
||||
bool isMainchain_;
|
||||
bool success_;
|
||||
std::uint32_t txnSeq_;
|
||||
std::uint32_t ledgerIndex_;
|
||||
std::int32_t rpcOrder_;
|
||||
|
||||
std::uint32_t sourceTag_;
|
||||
|
||||
EventType
|
||||
eventType() const;
|
||||
|
||||
Json::Value
|
||||
toJson() const;
|
||||
};
|
||||
```
|
||||
|
||||
* `DisableMasterKeyResult`. This is added when the federator detects an
|
||||
`AccountSet` transaction that disables the master key. Transactions that come
|
||||
before this are assumed to be part of account setup.
|
||||
```
|
||||
struct DisableMasterKeyResult
|
||||
{
|
||||
bool isMainchain_;
|
||||
std::uint32_t txnSeq_;
|
||||
std::int32_t rpcOrder_;
|
||||
|
||||
EventType
|
||||
eventType() const;
|
||||
|
||||
Json::Value
|
||||
toJson() const;
|
||||
};
|
||||
```
|
||||
|
||||
* `HeartbeatTimer`. This is added at regular intervals and is used to trigger
|
||||
evants based on timeouts.
|
||||
|
||||
### Federator Event Handling
|
||||
|
||||
Handling the events is very simple. The `mainLoop` pops events off the event
|
||||
queue and dispatches it to an event handler. There is one event handler for each
|
||||
event type. There is also some logic to prevent busy waiting.
|
||||
|
||||
```c++
|
||||
void
|
||||
onEvent(event::XChainTransferDetected const& e);
|
||||
void
|
||||
onEvent(event::XChainTransferResult const& e);
|
||||
void
|
||||
onEvent(event::RefundTransferResult const& e);
|
||||
void
|
||||
onEvent(event::HeartbeatTimer const& e);
|
||||
|
||||
void
|
||||
Federator::mainLoop()
|
||||
{
|
||||
FederatorEvent event;
|
||||
while (!requestStop_)
|
||||
{
|
||||
if (!events_.pop(event))
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
// In rare cases, an event may be pushed and the condition
|
||||
// variable signaled before the condition variable is waited on.
|
||||
// To handle this, set a timeout on the wait.
|
||||
std::unique_lock l{m_};
|
||||
cv_.wait_for(
|
||||
l, 1s, [this] { return requestStop_ || !events_.empty(); });
|
||||
continue;
|
||||
}
|
||||
std::visit([this](auto&& e) { this->onEvent(e); }, event);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Events are added to a queue in the `push` method.
|
||||
```c++
|
||||
void
|
||||
Federator::push(FederatorEvent const& e)
|
||||
{
|
||||
bool const notify = events_.empty();
|
||||
events_.push(e);
|
||||
if (notify)
|
||||
{
|
||||
std::lock_guard<std::mutex> l(m_);
|
||||
cv_.notify_one();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Due the threading and lifetime issues, `Federator` is kept as a `shared_ptr`
|
||||
inside of the app and enables `shared_from_this`. Since `shared_from_this`
|
||||
cannot be used from a constructor, it uses two-phase initialization of a
|
||||
constructor and an `init` function. These constructors are private, and objects
|
||||
are created with a `make_federator` function that implements the two-phase
|
||||
initialization. (Note: since `make_shared` cannot call a private constructor, a
|
||||
private `PrivateTag` is used instead.)
|
||||
|
||||
### Federator Listeners
|
||||
|
||||
There are two listener classes: a `MainchainListener` and a `SidechainListener`
|
||||
(both inherit from a common `ChainListener` class where much of the
|
||||
implementation lives). These classes monitor the chains for transactions on the
|
||||
door accounts and add the appropriate event to the federator. The
|
||||
`MainchainListener` uses a websocket to monitor transactions. Since the
|
||||
federator lives in the same executable as the side chain validators,
|
||||
`SidechainListener` used `InfoSub` directly rather than a websocket.
|
||||
|
||||
The federator is kept as `weak_ptr` in this class. Since these class will be used
|
||||
as part of a callback from different threads, both listener classes enable
|
||||
`shared_from_this`.
|
||||
|
||||
### Federator WebsocketClient
|
||||
|
||||
The `WebsocketClient` class takes an asio `io_service`, main chain server ip and
|
||||
port, and callback. When a command response or new stream result is received,
|
||||
the callback is executed (is will be called from the `io_service` thread). The
|
||||
`MainchainListener` uses this class to listen for transactions.
|
||||
This class is also be used to send transactions to the main chain.
|
||||
```c++
|
||||
WebsocketClient(
|
||||
std::function<void(Json::Value const&)> callback,
|
||||
boost::asio::io_service& ios,
|
||||
boost::asio::ip::address const& ip,
|
||||
std::uint16_t port,
|
||||
std::unordered_map<std::string, std::string> const& headers = {});
|
||||
```
|
||||
|
||||
# Triggering Transaction Handler
|
||||
|
||||
The triggering transaction handler is part of a single-threaded event loop that
|
||||
responds to events triggered from the different chains. It is single-threaded so
|
||||
there's no danger of events being processed out-of-order and for simplicity of
|
||||
code. Note that event handlers are not computationally intensive, so there would
|
||||
be little benefit to multiple threads.
|
||||
|
||||
When a new event is handled, a response transaction may be prepared. Apart from
|
||||
the sequence number, all the values of a response transaction can be determined
|
||||
from the trigging transaction, and it does not require communicating with the
|
||||
other federators. For example, when sending assets between chains, the response
|
||||
transaction will contain an amount equal to the XRP `delivered_amt` in the
|
||||
triggering transaction, a fixed fee, and a memo with the hash of the cross chain
|
||||
transaction. The memo is important because it creates a transaction unique to
|
||||
the triggering cross chain transaction, and it safer to sign such a transaction
|
||||
in case sequence numbers somehow get out of sync between the federators. The
|
||||
`LastLedgerSequence` field is not be set.
|
||||
|
||||
Next the federator will sign these transactions and send its signature to its
|
||||
peers. This happens in another thread so the event loop isn't slowed down.
|
||||
|
||||
Next the federator adds its signatures to the txn in the `pending transactions`
|
||||
collection for the appropriate chain. See [Adding a signature to Pending
|
||||
Transactions](#adding-a-signature-to-pending-transactions)
|
||||
|
||||
If it doesn't have enough signatures to complete the multi-signature, the
|
||||
federator will add signatures by listening for signature messages from other
|
||||
peers see the [Collecting Signatures](#collecting-signatures) section.
|
||||
|
||||
## Collecting Signatures
|
||||
|
||||
A federator receives signatures by listening to peer messages. Signatures are
|
||||
automatically sent when a federator detects a new cross chain transaction.
|
||||
|
||||
When a federator receives a new signature, it forwards it to its peers that have
|
||||
not already received this signature from this federator.
|
||||
|
||||
Next it checks if this transaction has already been handled. If so, it does
|
||||
nothing further.
|
||||
|
||||
Next the federator adds the signature to the txn in the `pending transactions`
|
||||
collection. See [Adding a signature to Pending
|
||||
Transactions](#adding-a-signature-to-pending-transactions).
|
||||
|
||||
Note that a federator may receive multiple signatures for the same transaction
|
||||
but with different sequence numbers. This should only happen if a federator
|
||||
somehow has the wrong sequence number and is later corrected.
|
||||
|
||||
## Adding a signature to Pending Transactions
|
||||
|
||||
The `pending transactions` collections stores the transactions that do not yet
|
||||
have enough signatures to be submitted or have not been confirmed as sent. There
|
||||
is one collection for the main chain and one for the side chain. The key to this
|
||||
collection is a hash of the triggering transaction, but with a sequence of zero
|
||||
and a fee of zero (remember that this transaction has a memo field with the hash
|
||||
of the cross chain transaction, so it is unique). It is hashed this way to
|
||||
detect inconsistent sequence numbers and fees. The `value` of this collection is
|
||||
a struct that contains this federators signature (if available) and another map
|
||||
with a `key` of sequence number and fee, and a `value` of a collection of
|
||||
signatures. Before adding a signature to this collection, if the signature is
|
||||
not on the multi-signature list, it is discarded. The signature is also checked
|
||||
for validity. If it is invalid, it is discarded.
|
||||
|
||||
After adding a signature to this collection it checks if it this signature is
|
||||
for a transaction with the same sequence number and fee as this federator, and
|
||||
if has enough signatures for a valid multi-signed transaction. If so, the
|
||||
transactions are added to the `queued transactions` collection for the
|
||||
appropriate chain and the function to submit transactions is called (see
|
||||
[Transaction Submit](#transaction-submit)).
|
||||
|
||||
If the transaction has enough signature for a valid multi-signed transaction,
|
||||
and the sequence and fee _do not_ match the ones from this federator, then this
|
||||
federator sequence number must be out of sync with the rest of the network. If
|
||||
it is detected, the federator will correct its sequence number. Note that there
|
||||
may be other transactions that have been submitted since this transaction, so
|
||||
the sequence number needs to be appropriately adjusted to account for this.
|
||||
Inconsistent fee will not be handled in the prototype. The fee will always be a
|
||||
constant. This federator will also change its signature for this transaction and
|
||||
submit it to the network (see [Transaction Submit](#transaction-submit)). This
|
||||
new signature will be broadcast to the network.
|
||||
|
||||
A heartbeat timer event will periodically check the collection for transactions
|
||||
in the queue older than some threshold. When these are detected, a error will be
|
||||
printed to the log. If the federator knows the transaction has already been
|
||||
handled by the network, it will be removed from the queue.
|
||||
|
||||
## Transaction Submit
|
||||
|
||||
There is a limit on the number of transaction in flight at any time. While the
|
||||
number of transactions is below this limit, and the next transaction in the
|
||||
sequence is part of the `queued transactions` collection, send the response
|
||||
transaction to the appropriate chain. Once a transaction is sent, it is removed
|
||||
from the `queued transactions` collection. However, it remains part of the
|
||||
`pending transactions` collection until a response transaction result is
|
||||
observed.
|
||||
|
||||
Note that limiting the number of transactions in flight to one makes for a
|
||||
simpler design, but greatly limits the throughput of cross-chain transactions.
|
||||
|
||||
## Handling Response Transaction Results
|
||||
|
||||
Response transaction results are used to know when a response transaction has
|
||||
been handled and can be removed from the `pending transactions` collection. It
|
||||
is also used to issue refunds when a response transaction fails under some
|
||||
circumstances.
|
||||
|
||||
If a transaction is fails is anything other than `tefAlready`, then a new
|
||||
response transaction is created that refunds some protion of the origional
|
||||
amount to the origional sending account. Gathering signatures for this refund
|
||||
transaction is the same as what's done other triggering transactions. Much like
|
||||
sending an asset cross-chain, transactions that trigger refunds and their
|
||||
response transations happen on different chains. This means we can use the same
|
||||
algorithm to assign sequence numbers to the response transaction.
|
||||
|
||||
If a transaction fails with a `tefAlready`, that means another federator already
|
||||
submitted the transaction. Ignore this error.
|
||||
|
||||
There is also a timer that checks for transactions that have not had results for
|
||||
too long of a time. If they are detected, an error is logged, but the prototype
|
||||
does not attempt to handle the error further.
|
||||
|
||||
## Assigning sequence numbers
|
||||
|
||||
A federator keeps a variable that gives the next sequence number to assign to a
|
||||
response transaction. When a new triggering transaction is detected, this
|
||||
sequence number is given to the response transaction and incremented. How the
|
||||
initial value is assigned is described in the [Getting a federator in
|
||||
sync](#getting-a-federator-in-sync) section. How an incorrect sequence number is
|
||||
corrected is described in the [Adding a signature to Pending
|
||||
Transactions](#adding-a-signature-to-pending-transactions) section.
|
||||
|
||||
## Assigning fees
|
||||
|
||||
Side chain fees are burned, so this balance can never be redeemed through normal
|
||||
cross chain transactions. If we wanted, these burned fees could be made
|
||||
available to the federators by withdrawing XRP from the main chain account.
|
||||
|
||||
Given these fees are burned and are (in effect) payed by the account doing a
|
||||
cross chain transaction, I propose these fees should be set on startup and kept
|
||||
constant. Initial implementations will use a 20 drop fee.
|
||||
|
||||
## Specifying a cross-chain destination address
|
||||
|
||||
When sending a cross chain payment, the destination on the originating chain is
|
||||
the special "door" account controlled by the federators. How does the user
|
||||
specify what the destination address on the side chain should be? There are two
|
||||
ways:
|
||||
|
||||
1) Specify the destination as a memo.
|
||||
2) Assign tags to destinations.
|
||||
|
||||
The memo field can always be used to specify a destination. In addition, when a
|
||||
new account is created, a new mapping is assigned between a tag and an address.
|
||||
With a main to side transaction, the new tag will map to the newly created side
|
||||
chain account.
|
||||
|
||||
The "tags" scheme is not yet designed. For now, the implementation will always
|
||||
use the memo field to specify the destination address.
|
||||
|
||||
If an account sends an asset to the door account without specifying an address, a
|
||||
refund will be issued to the sending account.
|
||||
|
||||
## Setting up the door accounts
|
||||
|
||||
The root account on the side chain is used as the door account. The door account
|
||||
on the main chain is just a regular account. The following transactions must be
|
||||
sent to these accounts before a running the federators:
|
||||
|
||||
* `SignerListSet`: Since the federators will jointly control these accounts, a
|
||||
`SignerListSet` transaction must be sent to both the main chain account and
|
||||
the side chain account. The signer list should consist of the federator's
|
||||
public signing keys and should match the keys specified in the config file.
|
||||
The quorum should be set to 80% of the federators on the list (i.e. for five
|
||||
federators, set this to 4).
|
||||
The federators use tickets to handle unusual situations. For example, if the
|
||||
federators fall too far behind they will disallow new cross chain transactions
|
||||
until they catch up. Three tickets are needed, and three transactions are needed
|
||||
to create the tickets (since they use the source tag as a way to set the purpose
|
||||
for the ticket).
|
||||
|
||||
* `Ticket`: Sent a `Ticket` transaction with the source tag of `1` to both the
|
||||
main chain account and side chain account.
|
||||
|
||||
* `Ticket`: Sent a `Ticket` transaction with the source tag of `2` to both the
|
||||
main chain account and side chain account.
|
||||
|
||||
* `Ticket`: Sent a `Ticket` transaction with the source tag of `3` to both the
|
||||
main chain account and side chain account.
|
||||
|
||||
* `TrustSet` if the cross chain transactions involve issued assets (IOUs), set
|
||||
up the trust lines by sending a `TrustSet` transaction to the appropriate
|
||||
accounts. If the cross chain transactions only involve XRP, this is not
|
||||
needed.
|
||||
|
||||
* `AccountSet`: Disable the master key with an `AccountSet` transactions. This
|
||||
ensures that nothing except the federators (as a group) control these
|
||||
accounts. Send this transaction to both the main chain account and side chain
|
||||
account.
|
||||
|
||||
*Important*: The `AccountSet` transaction that disables the master key *must* be
|
||||
the last transaction. The federator's initialization code uses this to
|
||||
distinguish transactions that are part of setup and other transactions.
|
||||
|
||||
## Handling a crashed main chain server
|
||||
|
||||
The side chain depends on a websocket to main chain server to listen for
|
||||
transactions. If the main chain server disconnects from the side chain a
|
||||
fail-over option should be added. This would allow a federator to automatically
|
||||
connect to other main chain servers if the one it is currency connected to goes
|
||||
down.
|
||||
|
||||
## Federator throughput
|
||||
|
||||
On average, federators must process cross chain transactions faster than they
|
||||
occur. If there are 10 cross chain transactions per ledger, but the federators
|
||||
can only sign and submit 5 response transactions per ledger, the federators will
|
||||
keep falling father behind and will never catch up. Monitoring the size of the
|
||||
queues can detect this situation, but there is no good remedy for it. The rate
|
||||
of cross chain transactions are out of it's control.
|
||||
|
||||
If this situation occurs, new transactions to the door account will be disabled
|
||||
with a "deposit auth" transaction, and transactions will be disabled until the
|
||||
federators catch up. Because this transaction is not in response to a triggering
|
||||
transaction on the "opposite" chain, assigning a sequence number for the
|
||||
"deposit auth" transaction is more involved. We use the protocol
|
||||
in [Handling an unordered event](#Handling-an-unordered-event) section to submit
|
||||
the transaction.
|
||||
|
||||
## Handling an unordered event
|
||||
|
||||
Cross chain payment transactions from one chain are sorted by that chain's
|
||||
consensus protocol. Each transaction results in one payment transaction in the
|
||||
destination chain, hence consuming one sequence number of the door account in
|
||||
the destination chain. Staring from the same initial sequence number assigned
|
||||
when the account was created, and processing the same stream of payment
|
||||
transactions, the federators agree on which sequence number to use for a given
|
||||
payment transaction without communication.
|
||||
|
||||
From time to time, however, federators have to create transactions to process
|
||||
unordered events, such as temporarily "disable" a door account with a "deposit
|
||||
auth" AccountSet transaction, or update the door accounts signerLists with
|
||||
SignerListSet transactions. Assigning sequence number to these transactions are
|
||||
more involved than payment transactions, because these events are not sorted
|
||||
with themselves nor with payment transactions. Since they are not sorted, there
|
||||
is a chance that different federators use different sequence numbers. If
|
||||
different sequence numbers were used for a transaction, this transaction and
|
||||
(depending on the design) some transactions following it won't be processed. So
|
||||
for these transactions, tickets are used to assign sequence numbers. Tickets are
|
||||
reserved when the side chain first starts (on genesis), and tickets are
|
||||
replenished as they are used.
|
||||
|
||||
Our first ticket based protocol used tickets for both the transaction that
|
||||
handles an unordered event and the "TicketCreate" transaction to create new
|
||||
tickets. It was simple but had two issues: the sequence numbers allocated to
|
||||
renewed tickets and the sequence numbers used for payment transactions occurred
|
||||
at the same time may overlap so the payment transactions must be modified,
|
||||
resigned and resubmitted; there is also a small chance that the payment
|
||||
transactions are delivered to the destination chain out of order. Our current
|
||||
design grows from the first design. We use a pre-allocated ticket pair, one main
|
||||
chain ticket and one side chain ticket, to submit "no-op" transactions to both
|
||||
chains. Once they are processed by the chains' consensus and sorted with payment
|
||||
transactions, in later rounds of the protocol, both the "TicketCreate"
|
||||
transactions to create new tickets and the transaction(s) that handles an
|
||||
unordered event will use real sequence numbers instead of tickets. Hence we
|
||||
avoid both the issues of the first design. The current design is shown in the
|
||||
diagram below.
|
||||
|
||||
The top portion of the diagram shows (simplified) payment process sequence. Note
|
||||
that the only usage of the side chain sequence numbers is for processing
|
||||
payments already sorted by main chain consensus, and vice versa. The bottom
|
||||
portion of the diagram shows the 3-round protocol for processing unordered
|
||||
events (round 2 and 3 could be merged). Note that the side chain sequence
|
||||
numbers still have a single usage, i.e. processing transactions already sorted
|
||||
by main chain consensus, and vice versa.
|
||||
|
||||
In more detail, the first round of the protocol uses a ticket pair to send no-op
|
||||
transactions to both of the chains. In the second round, the inclusion of the
|
||||
no-op transactions in the ledgers means they are sorted together with other
|
||||
transactions. Since they are sorted, the sequence numbers of their corresponding
|
||||
next round transactions (TicketCreate) are agreed by the federators. The
|
||||
federators can also predict the ticket numbers that will be allocated once the
|
||||
ticketCreate transactions are processed by the consensus. Hence they will not
|
||||
use those sequence numbers for other purposes. In the final round of the
|
||||
protocol, the new tickets are indeed created and the ticket pair are refilled,
|
||||
and the transaction(s) that handles the unordered event can take a sequence
|
||||
number and submit to its destination chain.
|
||||
|
||||
This protocol can be used for one-sided events such as "disable" main chain
|
||||
account temporarily, or two-sided events such as update the signerLists of both
|
||||
door accounts. To avoid a race resulted from multiple events compete the same
|
||||
ticket pair, every ticket pair has a "purpose" so that the pair can only be used
|
||||
for one type of events. Currently three purposes are implemented, or planned.
|
||||
|
||||

|
||||
|
||||
## Federator as a separate program
|
||||
|
||||
For the prototype, the federators will be validators on the side chain. However,
|
||||
a federator can be independent from both chains. The federator can listen for
|
||||
side chain transactions with a websocket, just like it does for the main chain.
|
||||
The `last submitted txn` and `last confirmed txn` values can be kept by the
|
||||
federators themselves.
|
||||
|
||||
The biggest advantage to combining a federator and a validator is to re-use the
|
||||
overlay layer. This saves on implementation time in the prototype. Longer term,
|
||||
it makes sense to separate a federator and a validator.
|
||||
|
||||
# Config file changes
|
||||
|
||||
See [this](configFile.md) document for the config file stanzas used to support
|
||||
side chains.
|
||||
|
||||
# New ledger objects
|
||||
|
||||
Notice that side chains do not require any new ledger objects, and do not
|
||||
require federators to communicate in order to agree on transaction values.
|
||||
|
||||
# New RPC commands
|
||||
|
||||
* "account_history_tx_stream" is used to get historic transactions
|
||||
when syncing and get new transactions once synced.
|
||||
|
||||
* "Federator info" is used to get information about the state of a federator,
|
||||
including its sync state and the state of its transaction queues.
|
||||
50
docs/sidechain/federatorAccountSetup.md
Normal file
50
docs/sidechain/federatorAccountSetup.md
Normal file
@@ -0,0 +1,50 @@
|
||||
## Introduction
|
||||
|
||||
Side chain federators work by controlling an account on the main chain and an
|
||||
account on the side chain. The account on the side chain is the root account.
|
||||
The account on the main chain is specified in the configuration file (See
|
||||
[configFile.md](docs/sidechain/configFile.md) for the new configuration file
|
||||
stanzas).
|
||||
|
||||
The test scripts will set up these accounts for you when running a test network
|
||||
on your local machine (see the functions `setup_mainchain` and `setup_sidechain`
|
||||
in the sidechain.py module). This document describes what's needed to set up
|
||||
these accounts if not using the scripts.
|
||||
|
||||
## Transactions
|
||||
|
||||
* `SignerListSet`: Since the federators will jointly control these accounts, a
|
||||
`SignerListSet` transaction must be sent to both the main chain account and
|
||||
the side chain account. The signer list should consist of the federator's
|
||||
public signing keys and should match the keys specified in the config file.
|
||||
The quorum should be set to 80% of the federators on the list (i.e. for five
|
||||
federators, set this to 4).
|
||||
|
||||
The federators use tickets to handle unusual situations. For example, if the
|
||||
federators fall too far behind they will disallow new cross chain transactions
|
||||
until they catch up. Three tickets are needed, and three transactions are needed
|
||||
to create the tickets (since they use the source tag as a way to set the purpose
|
||||
for the ticket).
|
||||
|
||||
* `Ticket`: Sent a `Ticket` transaction with the source tag of `1` to both the
|
||||
main chain account and side chain account.
|
||||
|
||||
* `Ticket`: Sent a `Ticket` transaction with the source tag of `2` to both the
|
||||
main chain account and side chain account.
|
||||
|
||||
* `Ticket`: Sent a `Ticket` transaction with the source tag of `3` to both the
|
||||
main chain account and side chain account.
|
||||
|
||||
* `TrustSet` if the cross chain transactions involve issued assets (IOUs), set
|
||||
up the trust lines by sending a `TrustSet` transaction to the appropriate
|
||||
accounts. If the cross chain transactions only involve XRP, this is not
|
||||
needed.
|
||||
|
||||
* `AccountSet`: Disable the master key with an `AccountSet` transactions. This
|
||||
ensures that nothing except the federators (as a group) control these
|
||||
accounts. Send this transaction to both the main chain account and side chain
|
||||
account.
|
||||
|
||||
*Important*: The `AccountSet` transaction that disables the master key *must* be
|
||||
the last transaction. The federator's initialization code uses this to
|
||||
distinguish transactions that are part of setup and other transactions.
|
||||
BIN
docs/sidechain/ticketsAndSeq.png
Normal file
BIN
docs/sidechain/ticketsAndSeq.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 182 KiB |
76
docs/sidechain/ticketsAndSeq.puml
Normal file
76
docs/sidechain/ticketsAndSeq.puml
Normal file
@@ -0,0 +1,76 @@
|
||||
@startuml
|
||||
|
||||
participant "main chain network" as mc #LightGreen
|
||||
box "Federator"
|
||||
participant "**main** chain\ndoor account listener" as mdl
|
||||
participant "**main** chain\nsignature collector" as msc
|
||||
participant "**main** chain\nsequence number" as msn #LightBlue
|
||||
participant "ticket pair" as t #LightBlue
|
||||
participant "unordered event" as ue #LightCoral
|
||||
participant "**side** chain\nsequence number" as ssn #LightBlue
|
||||
participant "**side** chain\nsignature collector" as ssc
|
||||
participant "**side** chain\ndoor account listener" as sdl
|
||||
end box
|
||||
participant "side chain network" as sc #LightGreen
|
||||
actor "federator admin" as fa #LightGreen
|
||||
|
||||
== payments ==
|
||||
group cross chain payment to side chain
|
||||
mc -> mdl: payment tx to door account\nin ledger
|
||||
mdl -> ssn: side chain door account payment tx created
|
||||
ssn -> ssc: sequence number filled
|
||||
ssc -> ssc: payment tx signed,\ncollect signatures
|
||||
ssc -> sc : with quorum signatures\nsubmit tx to network
|
||||
end
|
||||
group cross chain payment to main chain
|
||||
sc -> sdl: payment tx to door account\nin ledger
|
||||
sdl -> msn: main chain door account payment tx created
|
||||
msn -> msc: sequence number filled
|
||||
msc -> msc: payment tx signed,\ncollect signatures
|
||||
msc -> mc : with quorum signatures\nsubmit tx to network
|
||||
end
|
||||
|
||||
== unordered events ==
|
||||
group round 1
|
||||
fa -> ue : misc request from admin\nor federator internal event\nE.g. close main chain door account due to high load
|
||||
ue -> t : **two** no-op AccountSet txns created\n(for trigger ticketCreate txns round 2)
|
||||
activate t
|
||||
t -> msc: ticket number filled
|
||||
t -> ssc: ticket number filled
|
||||
deactivate t
|
||||
msc -> msc: no-op AccountSet tx signed,\ncollect signatures
|
||||
ssc -> ssc: no-op AccountSet tx signed,\ncollect signatures
|
||||
msc -> mc : with quorum signatures\nsubmit tx to network
|
||||
ssc -> sc : with quorum signatures\nsubmit tx to network
|
||||
end
|
||||
|
||||
group round 2
|
||||
'== unordered event, round 2 ==
|
||||
mc -> mdl: no-op AccountSet in ledger
|
||||
sc -> sdl: no-op AccountSet in ledger
|
||||
mdl -> ssn: create side chain door account ticketCreate tx\nto allocate side chain door account ticket\nto refill ticket pair
|
||||
sdl -> msn: create main chain door account ticketCreate tx\nto allocate main chain door account ticket\nto refill ticket pair
|
||||
ssn -> ssc: sequence number filled
|
||||
msn -> msc: sequence number filled
|
||||
ssc -> ssc: ticketCreate tx signed,\ncollect signatures
|
||||
msc -> msc: ticketCreate tx signed,\ncollect signatures
|
||||
ssc -> sc : with quorum signatures\nsubmit tx to network
|
||||
msc -> mc : with quorum signatures\nsubmit tx to network
|
||||
end
|
||||
|
||||
group round 3
|
||||
'== unordered event, round 3 ==
|
||||
mc -> mdl: ticketCreate in ledger
|
||||
mdl -> t : refill
|
||||
sc -> sdl: ticketCreate in ledger
|
||||
activate sdl
|
||||
sdl -> t : refill
|
||||
sdl -> msn: main chain deposit-auth AccountSet created
|
||||
note left: assuming the unordered event is to\nclose main chain door account\nto block new payments temporarily
|
||||
deactivate sdl
|
||||
msn -> msc: sequence number filled
|
||||
msc -> msc: deposit-auth AccountSet tx signed,\ncollect signatures
|
||||
msc -> mc : with quorum signatures\nsubmit tx to network
|
||||
end
|
||||
|
||||
@enduml
|
||||
@@ -1,3 +0,0 @@
|
||||
# Extras
|
||||
|
||||
These are not part of the official public Beast interface but they are used by the tests and some third party programs.
|
||||
@@ -1,170 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
|
||||
#ifndef BEAST_DOC_DEBUG_HPP
|
||||
#define BEAST_DOC_DEBUG_HPP
|
||||
|
||||
namespace beast {
|
||||
|
||||
#if BEAST_DOXYGEN
|
||||
|
||||
/// doc type (documentation debug helper)
|
||||
using doc_type = int;
|
||||
|
||||
/// doc enum (documentation debug helper)
|
||||
enum doc_enum
|
||||
{
|
||||
/// One (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// Two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc enum class (documentation debug helper)
|
||||
enum class doc_enum_class : unsigned
|
||||
{
|
||||
/// one (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc func (documentation debug helper)
|
||||
void doc_func();
|
||||
|
||||
/// doc class (documentation debug helper)
|
||||
struct doc_class
|
||||
{
|
||||
/// doc class member func (documentation debug helper)
|
||||
void func();
|
||||
};
|
||||
|
||||
/// (documentation debug helper)
|
||||
namespace nested {
|
||||
|
||||
/// doc type (documentation debug helper)
|
||||
using nested_doc_type = int;
|
||||
|
||||
/// doc enum (documentation debug helper)
|
||||
enum nested_doc_enum
|
||||
{
|
||||
/// One (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// Two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc enum class (documentation debug helper)
|
||||
enum class nested_doc_enum_class : unsigned
|
||||
{
|
||||
/// one (documentation debug helper)
|
||||
one,
|
||||
|
||||
/// two (documentation debug helper)
|
||||
two
|
||||
};
|
||||
|
||||
/// doc func (documentation debug helper)
|
||||
void nested_doc_func();
|
||||
|
||||
/// doc class (documentation debug helper)
|
||||
struct nested_doc_class
|
||||
{
|
||||
/// doc class member func (documentation debug helper)
|
||||
void func();
|
||||
};
|
||||
|
||||
} // nested
|
||||
|
||||
/** This is here to help troubleshoot doc/reference.xsl problems
|
||||
|
||||
Embedded references:
|
||||
|
||||
@li type @ref doc_type
|
||||
|
||||
@li enum @ref doc_enum
|
||||
|
||||
@li enum item @ref doc_enum::one
|
||||
|
||||
@li enum_class @ref doc_enum_class
|
||||
|
||||
@li enum_class item @ref doc_enum_class::one
|
||||
|
||||
@li func @ref doc_func
|
||||
|
||||
@li class @ref doc_class
|
||||
|
||||
@li class func @ref doc_class::func
|
||||
|
||||
@li nested type @ref nested::nested_doc_type
|
||||
|
||||
@li nested enum @ref nested::nested_doc_enum
|
||||
|
||||
@li nested enum item @ref nested::nested_doc_enum::one
|
||||
|
||||
@li nested enum_class @ref nested::nested_doc_enum_class
|
||||
|
||||
@li nested enum_class item @ref nested::nested_doc_enum_class::one
|
||||
|
||||
@li nested func @ref nested::nested_doc_func
|
||||
|
||||
@li nested class @ref nested::nested_doc_class
|
||||
|
||||
@li nested class func @ref nested::nested_doc_class::func
|
||||
*/
|
||||
void doc_debug();
|
||||
|
||||
namespace nested {
|
||||
|
||||
/** This is here to help troubleshoot doc/reference.xsl problems
|
||||
|
||||
Embedded references:
|
||||
|
||||
@li type @ref doc_type
|
||||
|
||||
@li enum @ref doc_enum
|
||||
|
||||
@li enum item @ref doc_enum::one
|
||||
|
||||
@li enum_class @ref doc_enum_class
|
||||
|
||||
@li enum_class item @ref doc_enum_class::one
|
||||
|
||||
@li func @ref doc_func
|
||||
|
||||
@li class @ref doc_class
|
||||
|
||||
@li class func @ref doc_class::func
|
||||
|
||||
@li nested type @ref nested_doc_type
|
||||
|
||||
@li nested enum @ref nested_doc_enum
|
||||
|
||||
@li nested enum item @ref nested_doc_enum::one
|
||||
|
||||
@li nested enum_class @ref nested_doc_enum_class
|
||||
|
||||
@li nested enum_class item @ref nested_doc_enum_class::one
|
||||
|
||||
@li nested func @ref nested_doc_func
|
||||
|
||||
@li nested class @ref nested_doc_class
|
||||
|
||||
@li nested class func @ref nested_doc_class::func
|
||||
*/
|
||||
void nested_doc_debug();
|
||||
|
||||
} // nested
|
||||
|
||||
#endif
|
||||
|
||||
} // beast
|
||||
|
||||
#endif
|
||||
@@ -68,7 +68,6 @@ RCLConsensus::RCLConsensus(
|
||||
journal)
|
||||
, consensus_(clock, adaptor_, journal)
|
||||
, j_(journal)
|
||||
|
||||
{
|
||||
}
|
||||
|
||||
@@ -86,22 +85,37 @@ RCLConsensus::Adaptor::Adaptor(
|
||||
, localTxs_(localTxs)
|
||||
, inboundTransactions_{inboundTransactions}
|
||||
, j_(journal)
|
||||
, nodeID_{validatorKeys.nodeID}
|
||||
, valPublic_{validatorKeys.publicKey}
|
||||
, valSecret_{validatorKeys.secretKey}
|
||||
, validatorKeys_(validatorKeys)
|
||||
, valCookie_{rand_int<std::uint64_t>(
|
||||
1,
|
||||
std::numeric_limits<std::uint64_t>::max())}
|
||||
, nUnlVote_(nodeID_, j_)
|
||||
, nUnlVote_(validatorKeys_.nodeID, j_)
|
||||
{
|
||||
assert(valCookie_ != 0);
|
||||
|
||||
JLOG(j_.info()) << "Consensus engine started"
|
||||
<< " (Node: " << to_string(nodeID_)
|
||||
<< ", Cookie: " << valCookie_ << ")";
|
||||
JLOG(j_.info()) << "Consensus engine started (cookie: " +
|
||||
std::to_string(valCookie_) + ")";
|
||||
|
||||
if (validatorKeys_.nodeID != beast::zero)
|
||||
{
|
||||
std::stringstream ss;
|
||||
|
||||
JLOG(j_.info()) << "Validator identity: "
|
||||
<< toBase58(
|
||||
TokenType::NodePublic,
|
||||
validatorKeys_.masterPublicKey);
|
||||
|
||||
if (validatorKeys_.masterPublicKey != validatorKeys_.publicKey)
|
||||
{
|
||||
JLOG(j_.debug())
|
||||
<< "Validator ephemeral signing key: "
|
||||
<< toBase58(TokenType::NodePublic, validatorKeys_.publicKey)
|
||||
<< " (seq: " << std::to_string(validatorKeys_.sequence) << ")";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
boost::optional<RCLCxLedger>
|
||||
std::optional<RCLCxLedger>
|
||||
RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
|
||||
{
|
||||
// we need to switch the ledger we're working from
|
||||
@@ -117,14 +131,12 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
|
||||
acquiringLedger_ = hash;
|
||||
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE,
|
||||
"getConsensusLedger",
|
||||
[id = hash, &app = app_](Job&) {
|
||||
jtADVANCE, "getConsensusLedger", [id = hash, &app = app_]() {
|
||||
app.getInboundLedgers().acquire(
|
||||
id, 0, InboundLedger::Reason::CONSENSUS);
|
||||
});
|
||||
}
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
assert(!built->open() && built->isImmutable());
|
||||
@@ -173,8 +185,8 @@ RCLConsensus::Adaptor::share(RCLCxTx const& tx)
|
||||
msg.set_status(protocol::tsNEW);
|
||||
msg.set_receivetimestamp(
|
||||
app_.timeKeeper().now().time_since_epoch().count());
|
||||
app_.overlay().foreach(send_always(
|
||||
std::make_shared<Message>(msg, protocol::mtTRANSACTION)));
|
||||
static std::set<Peer::id_t> skip{};
|
||||
app_.overlay().relay(tx.id(), msg, skip);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -184,10 +196,9 @@ RCLConsensus::Adaptor::share(RCLCxTx const& tx)
|
||||
void
|
||||
RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
|
||||
{
|
||||
JLOG(j_.trace()) << "We propose: "
|
||||
<< (proposal.isBowOut()
|
||||
? std::string("bowOut")
|
||||
: ripple::to_string(proposal.position()));
|
||||
JLOG(j_.trace()) << (proposal.isBowOut() ? "We bow out: " : "We propose: ")
|
||||
<< ripple::to_string(proposal.prevLedger()) << " -> "
|
||||
<< ripple::to_string(proposal.position());
|
||||
|
||||
protocol::TMProposeSet prop;
|
||||
|
||||
@@ -197,8 +208,8 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
|
||||
proposal.prevLedger().begin(), proposal.prevLedger().size());
|
||||
prop.set_proposeseq(proposal.proposeSeq());
|
||||
prop.set_closetime(proposal.closeTime().time_since_epoch().count());
|
||||
|
||||
prop.set_nodepubkey(valPublic_.data(), valPublic_.size());
|
||||
prop.set_nodepubkey(
|
||||
validatorKeys_.publicKey.data(), validatorKeys_.publicKey.size());
|
||||
|
||||
auto signingHash = sha512Half(
|
||||
HashPrefix::proposal,
|
||||
@@ -207,7 +218,8 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
|
||||
proposal.prevLedger(),
|
||||
proposal.position());
|
||||
|
||||
auto sig = signDigest(valPublic_, valSecret_, signingHash);
|
||||
auto sig = signDigest(
|
||||
validatorKeys_.publicKey, validatorKeys_.secretKey, signingHash);
|
||||
|
||||
prop.set_signature(sig.data(), sig.size());
|
||||
|
||||
@@ -216,7 +228,7 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal)
|
||||
proposal.prevLedger(),
|
||||
proposal.proposeSeq(),
|
||||
proposal.closeTime(),
|
||||
valPublic_,
|
||||
validatorKeys_.publicKey,
|
||||
sig);
|
||||
|
||||
app_.getHashRouter().addSuppression(suppression);
|
||||
@@ -230,14 +242,14 @@ RCLConsensus::Adaptor::share(RCLTxSet const& txns)
|
||||
inboundTransactions_.giveSet(txns.id(), txns.map_, false);
|
||||
}
|
||||
|
||||
boost::optional<RCLTxSet>
|
||||
std::optional<RCLTxSet>
|
||||
RCLConsensus::Adaptor::acquireTxSet(RCLTxSet::ID const& setId)
|
||||
{
|
||||
if (auto txns = inboundTransactions_.getSet(setId, true))
|
||||
{
|
||||
return RCLTxSet{std::move(txns)};
|
||||
}
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -316,7 +328,7 @@ RCLConsensus::Adaptor::onClose(
|
||||
tx.first->add(s);
|
||||
initialSet->addItem(
|
||||
SHAMapNodeType::tnTRANSACTION_NM,
|
||||
SHAMapItem(tx.first->getTransactionID(), std::move(s)));
|
||||
SHAMapItem(tx.first->getTransactionID(), s.slice()));
|
||||
}
|
||||
|
||||
// Add pseudo-transactions to the set
|
||||
@@ -378,7 +390,7 @@ RCLConsensus::Adaptor::onClose(
|
||||
setHash,
|
||||
closeTime,
|
||||
app_.timeKeeper().closeTime(),
|
||||
nodeID_}};
|
||||
validatorKeys_.nodeID}};
|
||||
}
|
||||
|
||||
void
|
||||
@@ -409,9 +421,7 @@ RCLConsensus::Adaptor::onAccept(
|
||||
Json::Value&& consensusJson)
|
||||
{
|
||||
app_.getJobQueue().addJob(
|
||||
jtACCEPT,
|
||||
"acceptLedger",
|
||||
[=, cj = std::move(consensusJson)](auto&) mutable {
|
||||
jtACCEPT, "acceptLedger", [=, cj = std::move(consensusJson)]() mutable {
|
||||
// Note that no lock is held or acquired during this job.
|
||||
// This is because generic Consensus guarantees that once a ledger
|
||||
// is accepted, the consensus results and capture by reference state
|
||||
@@ -620,7 +630,7 @@ RCLConsensus::Adaptor::doAccept(
|
||||
std::lock(lock, sl);
|
||||
|
||||
auto const lastVal = ledgerMaster_.getValidatedLedger();
|
||||
boost::optional<Rules> rules;
|
||||
std::optional<Rules> rules;
|
||||
if (lastVal)
|
||||
rules.emplace(*lastVal, app_.config().features);
|
||||
else
|
||||
@@ -789,9 +799,9 @@ RCLConsensus::Adaptor::validate(
|
||||
|
||||
auto v = std::make_shared<STValidation>(
|
||||
lastValidationTime_,
|
||||
valPublic_,
|
||||
valSecret_,
|
||||
nodeID_,
|
||||
validatorKeys_.publicKey,
|
||||
validatorKeys_.secretKey,
|
||||
validatorKeys_.nodeID,
|
||||
[&](STValidation& v) {
|
||||
v.setFieldH256(sfLedgerHash, ledger.id());
|
||||
v.setFieldH256(sfConsensusHash, txns.id());
|
||||
@@ -844,16 +854,16 @@ RCLConsensus::Adaptor::validate(
|
||||
}
|
||||
});
|
||||
|
||||
auto const serialized = v->getSerialized();
|
||||
|
||||
// suppress it if we receive it
|
||||
app_.getHashRouter().addSuppression(
|
||||
sha512Half(makeSlice(v->getSerialized())));
|
||||
app_.getHashRouter().addSuppression(sha512Half(makeSlice(serialized)));
|
||||
|
||||
handleNewValidation(app_, v, "local");
|
||||
|
||||
// Broadcast to all our peers:
|
||||
Blob validation = v->getSerialized();
|
||||
protocol::TMValidation val;
|
||||
val.set_validation(&validation[0], validation.size());
|
||||
val.set_validation(serialized.data(), serialized.size());
|
||||
app_.overlay().broadcast(val);
|
||||
|
||||
// Publish to all our subscribers:
|
||||
@@ -925,7 +935,7 @@ RCLConsensus::gotTxSet(NetClock::time_point const& now, RCLTxSet const& txSet)
|
||||
void
|
||||
RCLConsensus::simulate(
|
||||
NetClock::time_point const& now,
|
||||
boost::optional<std::chrono::milliseconds> consensusDelay)
|
||||
std::optional<std::chrono::milliseconds> consensusDelay)
|
||||
{
|
||||
std::lock_guard _{mutex_};
|
||||
consensus_.simulate(now, consensusDelay);
|
||||
@@ -947,7 +957,7 @@ RCLConsensus::Adaptor::preStartRound(
|
||||
{
|
||||
// We have a key, we do not want out of sync validations after a restart
|
||||
// and are not amendment blocked.
|
||||
validating_ = valPublic_.size() != 0 &&
|
||||
validating_ = validatorKeys_.publicKey.size() != 0 &&
|
||||
prevLgr.seq() >= app_.getMaxDisallowedLedger() &&
|
||||
!app_.getOPs().isBlocked();
|
||||
|
||||
@@ -1020,7 +1030,7 @@ RCLConsensus::Adaptor::laggards(
|
||||
bool
|
||||
RCLConsensus::Adaptor::validator() const
|
||||
{
|
||||
return !valPublic_.empty();
|
||||
return !validatorKeys_.publicKey.empty();
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -63,9 +63,8 @@ class RCLConsensus
|
||||
InboundTransactions& inboundTransactions_;
|
||||
beast::Journal const j_;
|
||||
|
||||
NodeID const nodeID_;
|
||||
PublicKey const valPublic_;
|
||||
SecretKey const valSecret_;
|
||||
// If the server is validating, the necessary keying information:
|
||||
ValidatorKeys const& validatorKeys_;
|
||||
|
||||
// A randomly selected non-zero value used to tag our validations
|
||||
std::uint64_t const valCookie_;
|
||||
@@ -200,7 +199,7 @@ class RCLConsensus
|
||||
@param hash The ID/hash of the ledger acquire
|
||||
@return Optional ledger, will be seated if we locally had the ledger
|
||||
*/
|
||||
boost::optional<RCLCxLedger>
|
||||
std::optional<RCLCxLedger>
|
||||
acquireLedger(LedgerHash const& hash);
|
||||
|
||||
/** Share the given proposal with all peers
|
||||
@@ -227,7 +226,7 @@ class RCLConsensus
|
||||
@param setId The transaction set ID associated with the proposal
|
||||
@return Optional set of transactions, seated if available.
|
||||
*/
|
||||
boost::optional<RCLTxSet>
|
||||
std::optional<RCLTxSet>
|
||||
acquireTxSet(RCLTxSet::ID const& setId);
|
||||
|
||||
/** Whether the open ledger has any transactions
|
||||
@@ -386,7 +385,7 @@ class RCLConsensus
|
||||
@param failedTxs Populate with transactions that we could not
|
||||
successfully apply.
|
||||
@return The newly built ledger
|
||||
*/
|
||||
*/
|
||||
RCLCxLedger
|
||||
buildLCL(
|
||||
RCLCxLedger const& previousLedger,
|
||||
@@ -507,7 +506,7 @@ public:
|
||||
void
|
||||
simulate(
|
||||
NetClock::time_point const& now,
|
||||
boost::optional<std::chrono::milliseconds> consensusDelay);
|
||||
std::optional<std::chrono::milliseconds> consensusDelay);
|
||||
|
||||
//! @see Consensus::proposal
|
||||
bool
|
||||
|
||||
@@ -91,8 +91,7 @@ public:
|
||||
insert(Tx const& t)
|
||||
{
|
||||
return map_->addItem(
|
||||
SHAMapNodeType::tnTRANSACTION_NM,
|
||||
SHAMapItem{t.id(), t.tx_.peekData()});
|
||||
SHAMapNodeType::tnTRANSACTION_NM, SHAMapItem{t.tx_});
|
||||
}
|
||||
|
||||
/** Remove a transaction from the set.
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/consensus/LedgerTiming.h>
|
||||
#include <ripple/core/DatabaseCon.h>
|
||||
#include <ripple/core/JobQueue.h>
|
||||
#include <ripple/core/TimeKeeper.h>
|
||||
#include <memory>
|
||||
@@ -88,7 +87,8 @@ RCLValidatedLedger::operator[](Seq const& s) const -> ID
|
||||
|
||||
JLOG(j_.warn()) << "Unable to determine hash of ancestor seq=" << s
|
||||
<< " from ledger hash=" << ledgerID_
|
||||
<< " seq=" << ledgerSeq_;
|
||||
<< " seq=" << ledgerSeq_ << " (available: " << minSeq()
|
||||
<< "-" << seq() << ")";
|
||||
// Default ID that is less than all others
|
||||
return ID{0};
|
||||
}
|
||||
@@ -123,7 +123,7 @@ RCLValidationsAdaptor::now() const
|
||||
return app_.timeKeeper().closeTime();
|
||||
}
|
||||
|
||||
boost::optional<RCLValidatedLedger>
|
||||
std::optional<RCLValidatedLedger>
|
||||
RCLValidationsAdaptor::acquire(LedgerHash const& hash)
|
||||
{
|
||||
auto ledger = app_.getLedgerMaster().getLedgerByHash(hash);
|
||||
@@ -135,11 +135,11 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
|
||||
Application* pApp = &app_;
|
||||
|
||||
app_.getJobQueue().addJob(
|
||||
jtADVANCE, "getConsensusLedger", [pApp, hash](Job&) {
|
||||
jtADVANCE, "getConsensusLedger", [pApp, hash]() {
|
||||
pApp->getInboundLedgers().acquire(
|
||||
hash, 0, InboundLedger::Reason::CONSENSUS);
|
||||
});
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
assert(!ledger->open() && ledger->isImmutable());
|
||||
@@ -154,11 +154,13 @@ handleNewValidation(
|
||||
std::shared_ptr<STValidation> const& val,
|
||||
std::string const& source)
|
||||
{
|
||||
PublicKey const& signingKey = val->getSignerPublic();
|
||||
uint256 const& hash = val->getLedgerHash();
|
||||
auto const& signingKey = val->getSignerPublic();
|
||||
auto const& hash = val->getLedgerHash();
|
||||
auto const seq = val->getFieldU32(sfLedgerSequence);
|
||||
|
||||
// Ensure validation is marked as trusted if signer currently trusted
|
||||
auto masterKey = app.validators().getTrustedKey(signingKey);
|
||||
|
||||
if (!val->isTrusted() && masterKey)
|
||||
val->setTrusted();
|
||||
|
||||
@@ -166,56 +168,52 @@ handleNewValidation(
|
||||
if (!masterKey)
|
||||
masterKey = app.validators().getListedKey(signingKey);
|
||||
|
||||
RCLValidations& validations = app.getValidations();
|
||||
beast::Journal const j = validations.adaptor().journal();
|
||||
|
||||
auto dmp = [&](beast::Journal::Stream s, std::string const& msg) {
|
||||
std::string id = toBase58(TokenType::NodePublic, signingKey);
|
||||
|
||||
if (masterKey)
|
||||
id += ":" + toBase58(TokenType::NodePublic, *masterKey);
|
||||
|
||||
s << (val->isTrusted() ? "trusted" : "untrusted") << " "
|
||||
<< (val->isFull() ? "full" : "partial") << " validation: " << hash
|
||||
<< " from " << id << " via " << source << ": " << msg << "\n"
|
||||
<< " [" << val->getSerializer().slice() << "]";
|
||||
};
|
||||
auto& validations = app.getValidations();
|
||||
|
||||
// masterKey is seated only if validator is trusted or listed
|
||||
if (masterKey)
|
||||
auto const outcome =
|
||||
validations.add(calcNodeID(masterKey.value_or(signingKey)), val);
|
||||
|
||||
if (outcome == ValStatus::current)
|
||||
{
|
||||
ValStatus const outcome = validations.add(calcNodeID(*masterKey), val);
|
||||
auto const seq = val->getFieldU32(sfLedgerSequence);
|
||||
|
||||
if (j.debug())
|
||||
dmp(j.debug(), to_string(outcome));
|
||||
|
||||
// One might think that we would not wish to relay validations that
|
||||
// fail these checks. Somewhat counterintuitively, we actually want
|
||||
// to do it for validations that we receive but deem suspicious, so
|
||||
// that our peers will also observe them and realize they're bad.
|
||||
if (outcome == ValStatus::conflicting && j.warn())
|
||||
{
|
||||
dmp(j.warn(),
|
||||
"conflicting validations issued for " + to_string(seq) +
|
||||
" (likely from a Byzantine validator)");
|
||||
}
|
||||
|
||||
if (outcome == ValStatus::multiple && j.warn())
|
||||
{
|
||||
dmp(j.warn(),
|
||||
"multiple validations issued for " + to_string(seq) +
|
||||
" (multiple validators operating with the same key?)");
|
||||
}
|
||||
|
||||
if (val->isTrusted() && outcome == ValStatus::current)
|
||||
if (val->isTrusted())
|
||||
app.getLedgerMaster().checkAccept(hash, seq);
|
||||
return;
|
||||
}
|
||||
else
|
||||
|
||||
// Ensure that problematic validations from validators we trust are
|
||||
// logged at the highest possible level.
|
||||
//
|
||||
// One might think that we should more than just log: we ought to also
|
||||
// not relay validations that fail these checks. Alas, and somewhat
|
||||
// counterintuitively, we *especially* want to forward such validations,
|
||||
// so that our peers will also observe them and take independent notice of
|
||||
// such validators, informing their operators.
|
||||
if (auto const ls = val->isTrusted()
|
||||
? validations.adaptor().journal().error()
|
||||
: validations.adaptor().journal().info();
|
||||
ls.active())
|
||||
{
|
||||
JLOG(j.debug()) << "Val for " << hash << " from "
|
||||
<< toBase58(TokenType::NodePublic, signingKey)
|
||||
<< " not added UNlisted";
|
||||
auto const id = [&masterKey, &signingKey]() {
|
||||
auto ret = toBase58(TokenType::NodePublic, signingKey);
|
||||
|
||||
if (masterKey && masterKey != signingKey)
|
||||
ret += ":" + toBase58(TokenType::NodePublic, *masterKey);
|
||||
|
||||
return ret;
|
||||
}();
|
||||
|
||||
if (outcome == ValStatus::conflicting)
|
||||
ls << "Byzantine Behavior Detector: "
|
||||
<< (val->isTrusted() ? "trusted " : "untrusted ") << id
|
||||
<< ": Conflicting validation for " << seq << "!\n["
|
||||
<< val->getSerializer().slice() << "]";
|
||||
|
||||
if (outcome == ValStatus::multiple)
|
||||
ls << "Byzantine Behavior Detector: "
|
||||
<< (val->isTrusted() ? "trusted " : "untrusted ") << id
|
||||
<< ": Multiple validations for " << seq << "/" << hash << "!\n["
|
||||
<< val->getSerializer().slice() << "]";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ public:
|
||||
}
|
||||
|
||||
/// Get the load fee of the validation if it exists
|
||||
boost::optional<std::uint32_t>
|
||||
std::optional<std::uint32_t>
|
||||
loadFee() const
|
||||
{
|
||||
return ~(*val_)[~sfLoadFee];
|
||||
@@ -218,7 +218,7 @@ public:
|
||||
now() const;
|
||||
|
||||
/** Attempt to acquire the ledger with given id from the network */
|
||||
boost::optional<RCLValidatedLedger>
|
||||
std::optional<RCLValidatedLedger>
|
||||
acquire(LedgerHash const& id);
|
||||
|
||||
beast::Journal
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <boost/optional.hpp>
|
||||
#include <optional>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -37,10 +37,10 @@ public:
|
||||
/** Retrieves partial ledger data of the coresponding hash from peers.`
|
||||
|
||||
@param nodeHash The 256-bit hash of the data to fetch.
|
||||
@return `boost::none` if the hash isn't cached,
|
||||
@return `std::nullopt` if the hash isn't cached,
|
||||
otherwise, the hash associated data.
|
||||
*/
|
||||
virtual boost::optional<Blob>
|
||||
virtual std::optional<Blob>
|
||||
getFetchPack(uint256 const& nodeHash) = 0;
|
||||
};
|
||||
|
||||
|
||||
@@ -54,15 +54,4 @@ AcceptedLedger::insert(AcceptedLedgerTx::ref at)
|
||||
mMap.insert(std::make_pair(at->getIndex(), at));
|
||||
}
|
||||
|
||||
AcceptedLedgerTx::pointer
|
||||
AcceptedLedger::getTxn(int i) const
|
||||
{
|
||||
map_t::const_iterator it = mMap.find(i);
|
||||
|
||||
if (it == mMap.end())
|
||||
return AcceptedLedgerTx::pointer();
|
||||
|
||||
return it->second;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -69,9 +69,6 @@ public:
|
||||
return mMap.size();
|
||||
}
|
||||
|
||||
AcceptedLedgerTx::pointer
|
||||
getTxn(int) const;
|
||||
|
||||
AcceptedLedger(
|
||||
std::shared_ptr<ReadView const> const& ledger,
|
||||
Application& app);
|
||||
|
||||
@@ -33,7 +33,7 @@ AccountStateSF::gotNode(
|
||||
hotACCOUNT_NODE, std::move(nodeData), nodeHash.as_uint256(), ledgerSeq);
|
||||
}
|
||||
|
||||
boost::optional<Blob>
|
||||
std::optional<Blob>
|
||||
AccountStateSF::getNode(SHAMapHash const& nodeHash) const
|
||||
{
|
||||
return fp_.getFetchPack(nodeHash.as_uint256());
|
||||
|
||||
@@ -44,7 +44,7 @@ public:
|
||||
Blob&& nodeData,
|
||||
SHAMapNodeType type) const override;
|
||||
|
||||
boost::optional<Blob>
|
||||
std::optional<Blob>
|
||||
getNode(SHAMapHash const& nodeHash) const override;
|
||||
|
||||
private:
|
||||
|
||||
@@ -62,10 +62,9 @@ ConsensusTransSetSF::gotNode(
|
||||
auto stx = std::make_shared<STTx const>(std::ref(sit));
|
||||
assert(stx->getTransactionID() == nodeHash.as_uint256());
|
||||
auto const pap = &app_;
|
||||
app_.getJobQueue().addJob(
|
||||
jtTRANSACTION, "TXS->TXN", [pap, stx](Job&) {
|
||||
pap->getOPs().submitTransaction(stx);
|
||||
});
|
||||
app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() {
|
||||
pap->getOPs().submitTransaction(stx);
|
||||
});
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
@@ -74,7 +73,7 @@ ConsensusTransSetSF::gotNode(
|
||||
}
|
||||
}
|
||||
|
||||
boost::optional<Blob>
|
||||
std::optional<Blob>
|
||||
ConsensusTransSetSF::getNode(SHAMapHash const& nodeHash) const
|
||||
{
|
||||
Blob nodeData;
|
||||
@@ -96,7 +95,7 @@ ConsensusTransSetSF::getNode(SHAMapHash const& nodeHash) const
|
||||
return nodeData;
|
||||
}
|
||||
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -47,7 +47,7 @@ public:
|
||||
Blob&& nodeData,
|
||||
SHAMapNodeType type) const override;
|
||||
|
||||
boost::optional<Blob>
|
||||
std::optional<Blob>
|
||||
getNode(SHAMapHash const& nodeHash) const override;
|
||||
|
||||
private:
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
#define RIPPLE_APP_LEDGER_INBOUNDLEDGERS_H_INCLUDED
|
||||
|
||||
#include <ripple/app/ledger/InboundLedger.h>
|
||||
#include <ripple/core/Stoppable.h>
|
||||
#include <ripple/protocol/RippleLedgerHash.h>
|
||||
#include <memory>
|
||||
|
||||
@@ -83,14 +82,13 @@ public:
|
||||
sweep() = 0;
|
||||
|
||||
virtual void
|
||||
onStop() = 0;
|
||||
stop() = 0;
|
||||
};
|
||||
|
||||
std::unique_ptr<InboundLedgers>
|
||||
make_InboundLedgers(
|
||||
Application& app,
|
||||
InboundLedgers::clock_type& clock,
|
||||
Stoppable& parent,
|
||||
beast::insight::Collector::ptr const& collector);
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
#define RIPPLE_APP_LEDGER_INBOUNDTRANSACTIONS_H_INCLUDED
|
||||
|
||||
#include <ripple/beast/clock/abstract_clock.h>
|
||||
#include <ripple/core/Stoppable.h>
|
||||
#include <ripple/overlay/Peer.h>
|
||||
#include <ripple/shamap/SHAMap.h>
|
||||
#include <memory>
|
||||
@@ -85,12 +84,14 @@ public:
|
||||
*/
|
||||
virtual void
|
||||
newRound(std::uint32_t seq) = 0;
|
||||
|
||||
virtual void
|
||||
stop() = 0;
|
||||
};
|
||||
|
||||
std::unique_ptr<InboundTransactions>
|
||||
make_InboundTransactions(
|
||||
Application& app,
|
||||
Stoppable& parent,
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
std::function<void(std::shared_ptr<SHAMap> const&, bool)> gotSet);
|
||||
|
||||
|
||||
@@ -29,14 +29,14 @@
|
||||
#include <ripple/app/misc/HashRouter.h>
|
||||
#include <ripple/app/misc/LoadFeeTrack.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/app/reporting/DBHelpers.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfacePostgres.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/contract.h>
|
||||
#include <ripple/beast/core/LexicalCast.h>
|
||||
#include <ripple/consensus/LedgerTiming.h>
|
||||
#include <ripple/core/Config.h>
|
||||
#include <ripple/core/DatabaseCon.h>
|
||||
#include <ripple/core/JobQueue.h>
|
||||
#include <ripple/core/Pg.h>
|
||||
#include <ripple/core/SociDB.h>
|
||||
@@ -141,7 +141,7 @@ public:
|
||||
txs_iter_impl(txs_iter_impl const&) = default;
|
||||
|
||||
txs_iter_impl(bool metadata, SHAMap::const_iterator iter)
|
||||
: metadata_(metadata), iter_(iter)
|
||||
: metadata_(metadata), iter_(std::move(iter))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -371,7 +371,8 @@ Ledger::setAccepted(
|
||||
bool
|
||||
Ledger::addSLE(SLE const& sle)
|
||||
{
|
||||
SHAMapItem item(sle.key(), sle.getSerializer());
|
||||
auto const s = sle.getSerializer();
|
||||
SHAMapItem item(sle.key(), s.slice());
|
||||
return stateMap_->addItem(SHAMapNodeType::tnACCOUNT_STATE, std::move(item));
|
||||
}
|
||||
|
||||
@@ -416,14 +417,14 @@ Ledger::exists(uint256 const& key) const
|
||||
return stateMap_->hasItem(key);
|
||||
}
|
||||
|
||||
boost::optional<uint256>
|
||||
Ledger::succ(uint256 const& key, boost::optional<uint256> const& last) const
|
||||
std::optional<uint256>
|
||||
Ledger::succ(uint256 const& key, std::optional<uint256> const& last) const
|
||||
{
|
||||
auto item = stateMap_->upper_bound(key);
|
||||
if (item == stateMap_->end())
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
if (last && item->key() >= last)
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
return item->key();
|
||||
}
|
||||
|
||||
@@ -438,8 +439,7 @@ Ledger::read(Keylet const& k) const
|
||||
auto const& item = stateMap_->peekItem(k.key);
|
||||
if (!item)
|
||||
return nullptr;
|
||||
auto sle = std::make_shared<SLE>(
|
||||
SerialIter{item->data(), item->size()}, item->key());
|
||||
auto sle = std::make_shared<SLE>(SerialIter{item->slice()}, item->key());
|
||||
if (!k.check(*sle))
|
||||
return nullptr;
|
||||
return sle;
|
||||
@@ -500,13 +500,13 @@ Ledger::txRead(key_type const& key) const -> tx_type
|
||||
}
|
||||
|
||||
auto
|
||||
Ledger::digest(key_type const& key) const -> boost::optional<digest_type>
|
||||
Ledger::digest(key_type const& key) const -> std::optional<digest_type>
|
||||
{
|
||||
SHAMapHash digest;
|
||||
// VFALCO Unfortunately this loads the item
|
||||
// from the NodeStore needlessly.
|
||||
if (!stateMap_->peekItem(key, digest))
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
return digest.as_uint256();
|
||||
}
|
||||
|
||||
@@ -533,7 +533,7 @@ Ledger::rawInsert(std::shared_ptr<SLE> const& sle)
|
||||
sle->add(ss);
|
||||
if (!stateMap_->addGiveItem(
|
||||
SHAMapNodeType::tnACCOUNT_STATE,
|
||||
std::make_shared<SHAMapItem const>(sle->key(), std::move(ss))))
|
||||
std::make_shared<SHAMapItem const>(sle->key(), ss.slice())))
|
||||
LogicError("Ledger::rawInsert: key already exists");
|
||||
}
|
||||
|
||||
@@ -544,7 +544,7 @@ Ledger::rawReplace(std::shared_ptr<SLE> const& sle)
|
||||
sle->add(ss);
|
||||
if (!stateMap_->updateGiveItem(
|
||||
SHAMapNodeType::tnACCOUNT_STATE,
|
||||
std::make_shared<SHAMapItem const>(sle->key(), std::move(ss))))
|
||||
std::make_shared<SHAMapItem const>(sle->key(), ss.slice())))
|
||||
LogicError("Ledger::rawReplace: key not found");
|
||||
}
|
||||
|
||||
@@ -562,7 +562,7 @@ Ledger::rawTxInsert(
|
||||
s.addVL(metaData->peekData());
|
||||
if (!txMap().addGiveItem(
|
||||
SHAMapNodeType::tnTRANSACTION_MD,
|
||||
std::make_shared<SHAMapItem const>(key, std::move(s))))
|
||||
std::make_shared<SHAMapItem const>(key, s.slice())))
|
||||
LogicError("duplicate_tx: " + to_string(key));
|
||||
}
|
||||
|
||||
@@ -578,9 +578,8 @@ Ledger::rawTxInsertWithHash(
|
||||
Serializer s(txn->getDataLength() + metaData->getDataLength() + 16);
|
||||
s.addVL(txn->peekData());
|
||||
s.addVL(metaData->peekData());
|
||||
auto item = std::make_shared<SHAMapItem const>(key, std::move(s));
|
||||
auto hash = sha512Half(
|
||||
HashPrefix::txNode, makeSlice(item->peekData()), item->key());
|
||||
auto item = std::make_shared<SHAMapItem const>(key, s.slice());
|
||||
auto hash = sha512Half(HashPrefix::txNode, item->slice(), item->key());
|
||||
if (!txMap().addGiveItem(SHAMapNodeType::tnTRANSACTION_MD, std::move(item)))
|
||||
LogicError("duplicate_tx: " + to_string(key));
|
||||
|
||||
@@ -647,8 +646,7 @@ Ledger::peek(Keylet const& k) const
|
||||
auto const& value = stateMap_->peekItem(k.key);
|
||||
if (!value)
|
||||
return nullptr;
|
||||
auto sle = std::make_shared<SLE>(
|
||||
SerialIter{value->data(), value->size()}, value->key());
|
||||
auto sle = std::make_shared<SLE>(SerialIter{value->slice()}, value->key());
|
||||
if (!k.check(*sle))
|
||||
return nullptr;
|
||||
return sle;
|
||||
@@ -680,7 +678,7 @@ Ledger::negativeUNL() const
|
||||
return negUnl;
|
||||
}
|
||||
|
||||
boost::optional<PublicKey>
|
||||
std::optional<PublicKey>
|
||||
Ledger::validatorToDisable() const
|
||||
{
|
||||
if (auto sle = read(keylet::negativeUNL());
|
||||
@@ -692,10 +690,10 @@ Ledger::validatorToDisable() const
|
||||
return PublicKey(s);
|
||||
}
|
||||
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
boost::optional<PublicKey>
|
||||
std::optional<PublicKey>
|
||||
Ledger::validatorToReEnable() const
|
||||
{
|
||||
if (auto sle = read(keylet::negativeUNL());
|
||||
@@ -707,7 +705,7 @@ Ledger::validatorToReEnable() const
|
||||
return PublicKey(s);
|
||||
}
|
||||
|
||||
return boost::none;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -762,7 +760,7 @@ Ledger::updateNegativeUNL()
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
bool
|
||||
Ledger::walkLedger(beast::Journal j) const
|
||||
Ledger::walkLedger(beast::Journal j, bool parallel) const
|
||||
{
|
||||
std::vector<SHAMapMissingNode> missingNodes1;
|
||||
std::vector<SHAMapMissingNode> missingNodes2;
|
||||
@@ -775,7 +773,10 @@ Ledger::walkLedger(beast::Journal j) const
|
||||
}
|
||||
else
|
||||
{
|
||||
stateMap_->walkMap(missingNodes1, 32);
|
||||
if (parallel)
|
||||
return stateMap_->walkMapParallel(missingNodes1, 32);
|
||||
else
|
||||
stateMap_->walkMap(missingNodes1, 32);
|
||||
}
|
||||
|
||||
if (!missingNodes1.empty())
|
||||
@@ -929,196 +930,14 @@ saveValidatedLedger(
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO(tom): Fix this hard-coded SQL!
|
||||
JLOG(j.trace()) << "saveValidatedLedger " << (current ? "" : "fromAcquire ")
|
||||
<< seq;
|
||||
|
||||
if (!ledger->info().accountHash.isNonZero())
|
||||
{
|
||||
JLOG(j.fatal()) << "AH is zero: " << getJson({*ledger, {}});
|
||||
assert(false);
|
||||
}
|
||||
|
||||
if (ledger->info().accountHash != ledger->stateMap().getHash().as_uint256())
|
||||
{
|
||||
JLOG(j.fatal()) << "sAL: " << ledger->info().accountHash
|
||||
<< " != " << ledger->stateMap().getHash();
|
||||
JLOG(j.fatal()) << "saveAcceptedLedger: seq=" << seq
|
||||
<< ", current=" << current;
|
||||
assert(false);
|
||||
}
|
||||
|
||||
assert(ledger->info().txHash == ledger->txMap().getHash().as_uint256());
|
||||
|
||||
// Save the ledger header in the hashed object store
|
||||
{
|
||||
Serializer s(128);
|
||||
s.add32(HashPrefix::ledgerMaster);
|
||||
addRaw(ledger->info(), s);
|
||||
app.getNodeStore().store(
|
||||
hotLEDGER, std::move(s.modData()), ledger->info().hash, seq);
|
||||
}
|
||||
|
||||
AcceptedLedger::pointer aLedger;
|
||||
try
|
||||
{
|
||||
aLedger = app.getAcceptedLedgerCache().fetch(ledger->info().hash);
|
||||
if (!aLedger)
|
||||
{
|
||||
aLedger = std::make_shared<AcceptedLedger>(ledger, app);
|
||||
app.getAcceptedLedgerCache().canonicalize_replace_client(
|
||||
ledger->info().hash, aLedger);
|
||||
}
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
JLOG(j.warn()) << "An accepted ledger was missing nodes";
|
||||
app.getLedgerMaster().failedSave(seq, ledger->info().hash);
|
||||
// Clients can now trust the database for information about this
|
||||
// ledger sequence.
|
||||
app.pendingSaves().finishWork(seq);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!app.config().reporting())
|
||||
{
|
||||
static boost::format deleteLedger(
|
||||
"DELETE FROM Ledgers WHERE LedgerSeq = %u;");
|
||||
static boost::format deleteTrans1(
|
||||
"DELETE FROM Transactions WHERE LedgerSeq = %u;");
|
||||
static boost::format deleteTrans2(
|
||||
"DELETE FROM AccountTransactions WHERE LedgerSeq = %u;");
|
||||
static boost::format deleteAcctTrans(
|
||||
"DELETE FROM AccountTransactions WHERE TransID = '%s';");
|
||||
|
||||
{
|
||||
auto db = app.getLedgerDB().checkoutDb();
|
||||
*db << boost::str(deleteLedger % seq);
|
||||
}
|
||||
|
||||
if (app.config().useTxTables())
|
||||
{
|
||||
auto db = app.getTxnDB().checkoutDb();
|
||||
|
||||
soci::transaction tr(*db);
|
||||
|
||||
*db << boost::str(deleteTrans1 % seq);
|
||||
*db << boost::str(deleteTrans2 % seq);
|
||||
|
||||
std::string const ledgerSeq(std::to_string(seq));
|
||||
|
||||
for (auto const& [_, acceptedLedgerTx] : aLedger->getMap())
|
||||
{
|
||||
(void)_;
|
||||
uint256 transactionID = acceptedLedgerTx->getTransactionID();
|
||||
|
||||
std::string const txnId(to_string(transactionID));
|
||||
std::string const txnSeq(
|
||||
std::to_string(acceptedLedgerTx->getTxnSeq()));
|
||||
|
||||
*db << boost::str(deleteAcctTrans % transactionID);
|
||||
|
||||
auto const& accts = acceptedLedgerTx->getAffected();
|
||||
|
||||
if (!accts.empty())
|
||||
{
|
||||
std::string sql(
|
||||
"INSERT INTO AccountTransactions "
|
||||
"(TransID, Account, LedgerSeq, TxnSeq) VALUES ");
|
||||
|
||||
// Try to make an educated guess on how much space we'll
|
||||
// need for our arguments. In argument order we have: 64
|
||||
// + 34 + 10 + 10 = 118 + 10 extra = 128 bytes
|
||||
sql.reserve(sql.length() + (accts.size() * 128));
|
||||
|
||||
bool first = true;
|
||||
for (auto const& account : accts)
|
||||
{
|
||||
if (!first)
|
||||
sql += ", ('";
|
||||
else
|
||||
{
|
||||
sql += "('";
|
||||
first = false;
|
||||
}
|
||||
|
||||
sql += txnId;
|
||||
sql += "','";
|
||||
sql += app.accountIDCache().toBase58(account);
|
||||
sql += "',";
|
||||
sql += ledgerSeq;
|
||||
sql += ",";
|
||||
sql += txnSeq;
|
||||
sql += ")";
|
||||
}
|
||||
sql += ";";
|
||||
JLOG(j.trace()) << "ActTx: " << sql;
|
||||
*db << sql;
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(j.warn()) << "Transaction in ledger " << seq
|
||||
<< " affects no accounts";
|
||||
JLOG(j.warn()) << acceptedLedgerTx->getTxn()->getJson(
|
||||
JsonOptions::none);
|
||||
}
|
||||
|
||||
*db
|
||||
<< (STTx::getMetaSQLInsertReplaceHeader() +
|
||||
acceptedLedgerTx->getTxn()->getMetaSQL(
|
||||
seq, acceptedLedgerTx->getEscMeta()) +
|
||||
";");
|
||||
|
||||
app.getMasterTransaction().inLedger(transactionID, seq);
|
||||
}
|
||||
|
||||
tr.commit();
|
||||
}
|
||||
|
||||
{
|
||||
static std::string addLedger(
|
||||
R"sql(INSERT OR REPLACE INTO Ledgers
|
||||
(LedgerHash,LedgerSeq,PrevHash,TotalCoins,ClosingTime,PrevClosingTime,
|
||||
CloseTimeRes,CloseFlags,AccountSetHash,TransSetHash)
|
||||
VALUES
|
||||
(:ledgerHash,:ledgerSeq,:prevHash,:totalCoins,:closingTime,:prevClosingTime,
|
||||
:closeTimeRes,:closeFlags,:accountSetHash,:transSetHash);)sql");
|
||||
|
||||
auto db(app.getLedgerDB().checkoutDb());
|
||||
|
||||
soci::transaction tr(*db);
|
||||
|
||||
auto const hash = to_string(ledger->info().hash);
|
||||
auto const parentHash = to_string(ledger->info().parentHash);
|
||||
auto const drops = to_string(ledger->info().drops);
|
||||
auto const closeTime =
|
||||
ledger->info().closeTime.time_since_epoch().count();
|
||||
auto const parentCloseTime =
|
||||
ledger->info().parentCloseTime.time_since_epoch().count();
|
||||
auto const closeTimeResolution =
|
||||
ledger->info().closeTimeResolution.count();
|
||||
auto const closeFlags = ledger->info().closeFlags;
|
||||
auto const accountHash = to_string(ledger->info().accountHash);
|
||||
auto const txHash = to_string(ledger->info().txHash);
|
||||
|
||||
*db << addLedger, soci::use(hash), soci::use(seq),
|
||||
soci::use(parentHash), soci::use(drops), soci::use(closeTime),
|
||||
soci::use(parentCloseTime), soci::use(closeTimeResolution),
|
||||
soci::use(closeFlags), soci::use(accountHash),
|
||||
soci::use(txHash);
|
||||
|
||||
tr.commit();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(false);
|
||||
}
|
||||
auto res = dynamic_cast<RelationalDBInterfaceSqlite*>(
|
||||
&app.getRelationalDBInterface())
|
||||
->saveValidatedLedger(ledger, current);
|
||||
|
||||
// Clients can now trust the database for
|
||||
// information about this ledger sequence.
|
||||
app.pendingSaves().finishWork(seq);
|
||||
return true;
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Save, or arrange to save, a fully-validated ledger
|
||||
@@ -1162,10 +981,9 @@ pendSaveValidated(
|
||||
|
||||
// See if we can use the JobQueue.
|
||||
if (!isSynchronous &&
|
||||
app.getJobQueue().addJob(
|
||||
jobType, jobName, [&app, ledger, isCurrent](Job&) {
|
||||
saveValidatedLedger(app, ledger, isCurrent);
|
||||
}))
|
||||
app.getJobQueue().addJob(jobType, jobName, [&app, ledger, isCurrent]() {
|
||||
saveValidatedLedger(app, ledger, isCurrent);
|
||||
}))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@@ -1190,74 +1008,16 @@ Ledger::invariants() const
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/*
|
||||
* Load a ledger from the database.
|
||||
* Make ledger using info loaded from database.
|
||||
*
|
||||
* @param sqlSuffix: Additional string to append to the sql query.
|
||||
* (typically a where clause).
|
||||
* @param LedgerInfo: Ledger information.
|
||||
* @param app: Link to the Application.
|
||||
* @param acquire: Acquire the ledger if not found locally.
|
||||
* @return The ledger, ledger sequence, and ledger hash.
|
||||
* @return Shared pointer to the ledger.
|
||||
*/
|
||||
std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
loadLedgerHelper(std::string const& sqlSuffix, Application& app, bool acquire)
|
||||
std::shared_ptr<Ledger>
|
||||
loadLedgerHelper(LedgerInfo const& info, Application& app, bool acquire)
|
||||
{
|
||||
uint256 ledgerHash{};
|
||||
std::uint32_t ledgerSeq{0};
|
||||
|
||||
auto db = app.getLedgerDB().checkoutDb();
|
||||
|
||||
boost::optional<std::string> sLedgerHash, sPrevHash, sAccountHash,
|
||||
sTransHash;
|
||||
boost::optional<std::uint64_t> totDrops, closingTime, prevClosingTime,
|
||||
closeResolution, closeFlags, ledgerSeq64;
|
||||
|
||||
std::string const sql =
|
||||
"SELECT "
|
||||
"LedgerHash, PrevHash, AccountSetHash, TransSetHash, "
|
||||
"TotalCoins,"
|
||||
"ClosingTime, PrevClosingTime, CloseTimeRes, CloseFlags,"
|
||||
"LedgerSeq from Ledgers " +
|
||||
sqlSuffix + ";";
|
||||
|
||||
*db << sql, soci::into(sLedgerHash), soci::into(sPrevHash),
|
||||
soci::into(sAccountHash), soci::into(sTransHash), soci::into(totDrops),
|
||||
soci::into(closingTime), soci::into(prevClosingTime),
|
||||
soci::into(closeResolution), soci::into(closeFlags),
|
||||
soci::into(ledgerSeq64);
|
||||
|
||||
if (!db->got_data())
|
||||
{
|
||||
auto stream = app.journal("Ledger").debug();
|
||||
JLOG(stream) << "Ledger not found: " << sqlSuffix;
|
||||
return std::make_tuple(
|
||||
std::shared_ptr<Ledger>(), ledgerSeq, ledgerHash);
|
||||
}
|
||||
|
||||
ledgerSeq = rangeCheckedCast<std::uint32_t>(ledgerSeq64.value_or(0));
|
||||
|
||||
uint256 prevHash{}, accountHash{}, transHash{};
|
||||
if (sLedgerHash)
|
||||
(void)ledgerHash.parseHex(*sLedgerHash);
|
||||
if (sPrevHash)
|
||||
(void)prevHash.parseHex(*sPrevHash);
|
||||
if (sAccountHash)
|
||||
(void)accountHash.parseHex(*sAccountHash);
|
||||
if (sTransHash)
|
||||
(void)transHash.parseHex(*sTransHash);
|
||||
|
||||
using time_point = NetClock::time_point;
|
||||
using duration = NetClock::duration;
|
||||
|
||||
LedgerInfo info;
|
||||
info.parentHash = prevHash;
|
||||
info.txHash = transHash;
|
||||
info.accountHash = accountHash;
|
||||
info.drops = totDrops.value_or(0);
|
||||
info.closeTime = time_point{duration{closingTime.value_or(0)}};
|
||||
info.parentCloseTime = time_point{duration{prevClosingTime.value_or(0)}};
|
||||
info.closeFlags = closeFlags.value_or(0);
|
||||
info.closeTimeResolution = duration{closeResolution.value_or(0)};
|
||||
info.seq = ledgerSeq;
|
||||
|
||||
bool loaded;
|
||||
auto ledger = std::make_shared<Ledger>(
|
||||
info,
|
||||
@@ -1270,7 +1030,7 @@ loadLedgerHelper(std::string const& sqlSuffix, Application& app, bool acquire)
|
||||
if (!loaded)
|
||||
ledger.reset();
|
||||
|
||||
return std::make_tuple(ledger, ledgerSeq, ledgerHash);
|
||||
return ledger;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1289,419 +1049,41 @@ finishLoadByIndexOrHash(
|
||||
ledger->setFull();
|
||||
}
|
||||
|
||||
// Load the ledger info for the specified ledger/s from the database
|
||||
// @param whichLedger specifies the ledger to load via ledger sequence, ledger
|
||||
// hash, a range of ledgers, or std::monostate (which loads the most recent)
|
||||
// @param app Application
|
||||
// @return vector of LedgerInfos
|
||||
static std::vector<LedgerInfo>
|
||||
loadLedgerInfosPostgres(
|
||||
std::variant<
|
||||
std::monostate,
|
||||
uint256,
|
||||
uint32_t,
|
||||
std::pair<uint32_t, uint32_t>> const& whichLedger,
|
||||
Application& app)
|
||||
{
|
||||
std::vector<LedgerInfo> infos;
|
||||
#ifdef RIPPLED_REPORTING
|
||||
auto log = app.journal("Ledger");
|
||||
assert(app.config().reporting());
|
||||
std::stringstream sql;
|
||||
sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, "
|
||||
"total_coins, closing_time, prev_closing_time, close_time_res, "
|
||||
"close_flags, ledger_seq FROM ledgers ";
|
||||
|
||||
uint32_t expNumResults = 1;
|
||||
|
||||
if (auto ledgerSeq = std::get_if<uint32_t>(&whichLedger))
|
||||
{
|
||||
sql << "WHERE ledger_seq = " + std::to_string(*ledgerSeq);
|
||||
}
|
||||
else if (auto ledgerHash = std::get_if<uint256>(&whichLedger))
|
||||
{
|
||||
sql << ("WHERE ledger_hash = \'\\x" + strHex(*ledgerHash) + "\'");
|
||||
}
|
||||
else if (
|
||||
auto minAndMax =
|
||||
std::get_if<std::pair<uint32_t, uint32_t>>(&whichLedger))
|
||||
{
|
||||
expNumResults = minAndMax->second - minAndMax->first;
|
||||
|
||||
sql
|
||||
<< ("WHERE ledger_seq >= " + std::to_string(minAndMax->first) +
|
||||
" AND ledger_seq <= " + std::to_string(minAndMax->second));
|
||||
}
|
||||
else
|
||||
{
|
||||
sql << ("ORDER BY ledger_seq desc LIMIT 1");
|
||||
}
|
||||
sql << ";";
|
||||
|
||||
JLOG(log.trace()) << __func__ << " : sql = " << sql.str();
|
||||
|
||||
auto res = PgQuery(app.getPgPool())(sql.str().data());
|
||||
if (!res)
|
||||
{
|
||||
JLOG(log.error()) << __func__ << " : Postgres response is null - sql = "
|
||||
<< sql.str();
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
else if (res.status() != PGRES_TUPLES_OK)
|
||||
{
|
||||
JLOG(log.error()) << __func__
|
||||
<< " : Postgres response should have been "
|
||||
"PGRES_TUPLES_OK but instead was "
|
||||
<< res.status() << " - msg = " << res.msg()
|
||||
<< " - sql = " << sql.str();
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
|
||||
JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg();
|
||||
|
||||
if (res.isNull() || res.ntuples() == 0)
|
||||
{
|
||||
JLOG(log.debug()) << __func__
|
||||
<< " : Ledger not found. sql = " << sql.str();
|
||||
return {};
|
||||
}
|
||||
else if (res.ntuples() > 0)
|
||||
{
|
||||
if (res.nfields() != 10)
|
||||
{
|
||||
JLOG(log.error()) << __func__
|
||||
<< " : Wrong number of fields in Postgres "
|
||||
"response. Expected 10, but got "
|
||||
<< res.nfields() << " . sql = " << sql.str();
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < res.ntuples(); ++i)
|
||||
{
|
||||
char const* hash = res.c_str(i, 0);
|
||||
char const* prevHash = res.c_str(i, 1);
|
||||
char const* accountHash = res.c_str(i, 2);
|
||||
char const* txHash = res.c_str(i, 3);
|
||||
std::int64_t totalCoins = res.asBigInt(i, 4);
|
||||
std::int64_t closeTime = res.asBigInt(i, 5);
|
||||
std::int64_t parentCloseTime = res.asBigInt(i, 6);
|
||||
std::int64_t closeTimeRes = res.asBigInt(i, 7);
|
||||
std::int64_t closeFlags = res.asBigInt(i, 8);
|
||||
std::int64_t ledgerSeq = res.asBigInt(i, 9);
|
||||
|
||||
JLOG(log.trace()) << __func__ << " - Postgres response = " << hash
|
||||
<< " , " << prevHash << " , " << accountHash << " , "
|
||||
<< txHash << " , " << totalCoins << ", " << closeTime
|
||||
<< ", " << parentCloseTime << ", " << closeTimeRes
|
||||
<< ", " << closeFlags << ", " << ledgerSeq
|
||||
<< " - sql = " << sql.str();
|
||||
JLOG(log.debug()) << __func__
|
||||
<< " - Successfully fetched ledger with sequence = "
|
||||
<< ledgerSeq << " from Postgres";
|
||||
|
||||
using time_point = NetClock::time_point;
|
||||
using duration = NetClock::duration;
|
||||
|
||||
LedgerInfo info;
|
||||
if (!info.parentHash.parseHex(prevHash + 2))
|
||||
assert(false);
|
||||
if (!info.txHash.parseHex(txHash + 2))
|
||||
assert(false);
|
||||
if (!info.accountHash.parseHex(accountHash + 2))
|
||||
assert(false);
|
||||
info.drops = totalCoins;
|
||||
info.closeTime = time_point{duration{closeTime}};
|
||||
info.parentCloseTime = time_point{duration{parentCloseTime}};
|
||||
info.closeFlags = closeFlags;
|
||||
info.closeTimeResolution = duration{closeTimeRes};
|
||||
info.seq = ledgerSeq;
|
||||
if (!info.hash.parseHex(hash + 2))
|
||||
assert(false);
|
||||
info.validated = true;
|
||||
infos.push_back(info);
|
||||
}
|
||||
|
||||
#endif
|
||||
return infos;
|
||||
}
|
||||
|
||||
// Load a ledger from Postgres
|
||||
// @param whichLedger specifies sequence or hash of ledger. Passing
|
||||
// std::monostate loads the most recent ledger
|
||||
// @param app the Application
|
||||
// @return tuple of (ledger, sequence, hash)
|
||||
static std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
loadLedgerHelperPostgres(
|
||||
std::variant<std::monostate, uint256, uint32_t> const& whichLedger,
|
||||
Application& app)
|
||||
{
|
||||
std::vector<LedgerInfo> infos;
|
||||
std::visit(
|
||||
[&infos, &app](auto&& arg) {
|
||||
infos = loadLedgerInfosPostgres(arg, app);
|
||||
},
|
||||
whichLedger);
|
||||
assert(infos.size() <= 1);
|
||||
if (!infos.size())
|
||||
return std::make_tuple(std::shared_ptr<Ledger>(), 0, uint256{});
|
||||
LedgerInfo info = infos[0];
|
||||
bool loaded;
|
||||
auto ledger = std::make_shared<Ledger>(
|
||||
info,
|
||||
loaded,
|
||||
false,
|
||||
app.config(),
|
||||
app.getNodeFamily(),
|
||||
app.journal("Ledger"));
|
||||
|
||||
if (!loaded)
|
||||
ledger.reset();
|
||||
|
||||
return std::make_tuple(ledger, info.seq, info.hash);
|
||||
}
|
||||
|
||||
std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
getLatestLedger(Application& app)
|
||||
{
|
||||
if (app.config().reporting())
|
||||
return loadLedgerHelperPostgres({}, app);
|
||||
else
|
||||
return loadLedgerHelper("order by LedgerSeq desc limit 1", app);
|
||||
}
|
||||
|
||||
// Load a ledger by index (AKA sequence) from Postgres
|
||||
// @param ledgerIndex the ledger index (or sequence) to load
|
||||
// @param app reference to Application
|
||||
// @return the loaded ledger
|
||||
static std::shared_ptr<Ledger>
|
||||
loadByIndexPostgres(std::uint32_t ledgerIndex, Application& app)
|
||||
{
|
||||
std::shared_ptr<Ledger> ledger;
|
||||
std::tie(ledger, std::ignore, std::ignore) =
|
||||
loadLedgerHelperPostgres(uint32_t{ledgerIndex}, app);
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
return ledger;
|
||||
}
|
||||
|
||||
// Load a ledger by hash from Postgres
|
||||
// @param hash hash of the ledger to load
|
||||
// @param app reference to Application
|
||||
// @return the loaded ledger
|
||||
static std::shared_ptr<Ledger>
|
||||
loadByHashPostgres(uint256 const& ledgerHash, Application& app)
|
||||
{
|
||||
std::shared_ptr<Ledger> ledger;
|
||||
std::tie(ledger, std::ignore, std::ignore) =
|
||||
loadLedgerHelperPostgres(uint256{ledgerHash}, app);
|
||||
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
|
||||
assert(!ledger || ledger->info().hash == ledgerHash);
|
||||
|
||||
return ledger;
|
||||
}
|
||||
|
||||
// Given a ledger sequence, return the ledger hash
|
||||
// @param ledgerIndex ledger sequence
|
||||
// @param app Application
|
||||
// @return hash of ledger
|
||||
static uint256
|
||||
getHashByIndexPostgres(std::uint32_t ledgerIndex, Application& app)
|
||||
{
|
||||
uint256 ret;
|
||||
|
||||
auto infos = loadLedgerInfosPostgres(ledgerIndex, app);
|
||||
assert(infos.size() <= 1);
|
||||
if (infos.size())
|
||||
return infos[0].hash;
|
||||
return {};
|
||||
}
|
||||
|
||||
// Given a ledger sequence, return the ledger hash and the parent hash
|
||||
// @param ledgerIndex ledger sequence
|
||||
// @param[out] ledgerHash hash of ledger
|
||||
// @param[out] parentHash hash of parent ledger
|
||||
// @param app Application
|
||||
// @return true if the data was found
|
||||
static bool
|
||||
getHashesByIndexPostgres(
|
||||
std::uint32_t ledgerIndex,
|
||||
uint256& ledgerHash,
|
||||
uint256& parentHash,
|
||||
Application& app)
|
||||
{
|
||||
auto infos = loadLedgerInfosPostgres(ledgerIndex, app);
|
||||
assert(infos.size() <= 1);
|
||||
if (infos.size())
|
||||
{
|
||||
ledgerHash = infos[0].hash;
|
||||
parentHash = infos[0].parentHash;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Given a contiguous range of sequences, return a map of
|
||||
// sequence -> (hash, parent hash)
|
||||
// @param minSeq lower bound of range
|
||||
// @param maxSeq upper bound of range
|
||||
// @param app Application
|
||||
// @return mapping of all found ledger sequences to their hash and parent hash
|
||||
static std::map<std::uint32_t, std::pair<uint256, uint256>>
|
||||
getHashesByIndexPostgres(
|
||||
std::uint32_t minSeq,
|
||||
std::uint32_t maxSeq,
|
||||
Application& app)
|
||||
{
|
||||
std::map<uint32_t, std::pair<uint256, uint256>> ret;
|
||||
auto infos = loadLedgerInfosPostgres(std::make_pair(minSeq, maxSeq), app);
|
||||
for (auto& info : infos)
|
||||
{
|
||||
ret[info.seq] = std::make_pair(info.hash, info.parentHash);
|
||||
}
|
||||
return ret;
|
||||
const std::optional<LedgerInfo> info =
|
||||
app.getRelationalDBInterface().getNewestLedgerInfo();
|
||||
if (!info)
|
||||
return {std::shared_ptr<Ledger>(), {}, {}};
|
||||
return {loadLedgerHelper(*info, app, true), info->seq, info->hash};
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire)
|
||||
{
|
||||
if (app.config().reporting())
|
||||
return loadByIndexPostgres(ledgerIndex, app);
|
||||
std::shared_ptr<Ledger> ledger;
|
||||
if (std::optional<LedgerInfo> info =
|
||||
app.getRelationalDBInterface().getLedgerInfoByIndex(ledgerIndex))
|
||||
{
|
||||
std::ostringstream s;
|
||||
s << "WHERE LedgerSeq = " << ledgerIndex;
|
||||
std::tie(ledger, std::ignore, std::ignore) =
|
||||
loadLedgerHelper(s.str(), app, acquire);
|
||||
std::shared_ptr<Ledger> ledger = loadLedgerHelper(*info, app, acquire);
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
return ledger;
|
||||
}
|
||||
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
return ledger;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadByHash(uint256 const& ledgerHash, Application& app, bool acquire)
|
||||
{
|
||||
if (app.config().reporting())
|
||||
return loadByHashPostgres(ledgerHash, app);
|
||||
std::shared_ptr<Ledger> ledger;
|
||||
if (std::optional<LedgerInfo> info =
|
||||
app.getRelationalDBInterface().getLedgerInfoByHash(ledgerHash))
|
||||
{
|
||||
std::ostringstream s;
|
||||
s << "WHERE LedgerHash = '" << ledgerHash << "'";
|
||||
std::tie(ledger, std::ignore, std::ignore) =
|
||||
loadLedgerHelper(s.str(), app, acquire);
|
||||
std::shared_ptr<Ledger> ledger = loadLedgerHelper(*info, app, acquire);
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
assert(!ledger || ledger->info().hash == ledgerHash);
|
||||
return ledger;
|
||||
}
|
||||
|
||||
finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger"));
|
||||
|
||||
assert(!ledger || ledger->info().hash == ledgerHash);
|
||||
|
||||
return ledger;
|
||||
}
|
||||
|
||||
uint256
|
||||
getHashByIndex(std::uint32_t ledgerIndex, Application& app)
|
||||
{
|
||||
if (app.config().reporting())
|
||||
return getHashByIndexPostgres(ledgerIndex, app);
|
||||
uint256 ret;
|
||||
|
||||
std::string sql =
|
||||
"SELECT LedgerHash FROM Ledgers INDEXED BY SeqLedger WHERE LedgerSeq='";
|
||||
sql.append(std::to_string(ledgerIndex));
|
||||
sql.append("';");
|
||||
|
||||
std::string hash;
|
||||
{
|
||||
auto db = app.getLedgerDB().checkoutDb();
|
||||
|
||||
boost::optional<std::string> lh;
|
||||
*db << sql, soci::into(lh);
|
||||
|
||||
if (!db->got_data() || !lh)
|
||||
return ret;
|
||||
|
||||
hash = *lh;
|
||||
if (hash.empty())
|
||||
return ret;
|
||||
}
|
||||
|
||||
(void)ret.parseHex(hash);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool
|
||||
getHashesByIndex(
|
||||
std::uint32_t ledgerIndex,
|
||||
uint256& ledgerHash,
|
||||
uint256& parentHash,
|
||||
Application& app)
|
||||
{
|
||||
if (app.config().reporting())
|
||||
return getHashesByIndexPostgres(
|
||||
ledgerIndex, ledgerHash, parentHash, app);
|
||||
auto db = app.getLedgerDB().checkoutDb();
|
||||
|
||||
boost::optional<std::string> lhO, phO;
|
||||
|
||||
*db << "SELECT LedgerHash,PrevHash FROM Ledgers "
|
||||
"INDEXED BY SeqLedger Where LedgerSeq = :ls;",
|
||||
soci::into(lhO), soci::into(phO), soci::use(ledgerIndex);
|
||||
|
||||
if (!lhO || !phO)
|
||||
{
|
||||
auto stream = app.journal("Ledger").trace();
|
||||
JLOG(stream) << "Don't have ledger " << ledgerIndex;
|
||||
return false;
|
||||
}
|
||||
|
||||
return ledgerHash.parseHex(*lhO) && parentHash.parseHex(*phO);
|
||||
}
|
||||
|
||||
std::map<std::uint32_t, std::pair<uint256, uint256>>
|
||||
getHashesByIndex(std::uint32_t minSeq, std::uint32_t maxSeq, Application& app)
|
||||
{
|
||||
if (app.config().reporting())
|
||||
return getHashesByIndexPostgres(minSeq, maxSeq, app);
|
||||
std::map<std::uint32_t, std::pair<uint256, uint256>> ret;
|
||||
|
||||
std::string sql =
|
||||
"SELECT LedgerSeq,LedgerHash,PrevHash FROM Ledgers WHERE LedgerSeq >= ";
|
||||
sql.append(std::to_string(minSeq));
|
||||
sql.append(" AND LedgerSeq <= ");
|
||||
sql.append(std::to_string(maxSeq));
|
||||
sql.append(";");
|
||||
|
||||
auto db = app.getLedgerDB().checkoutDb();
|
||||
|
||||
std::uint64_t ls;
|
||||
std::string lh;
|
||||
boost::optional<std::string> ph;
|
||||
soci::statement st =
|
||||
(db->prepare << sql, soci::into(ls), soci::into(lh), soci::into(ph));
|
||||
|
||||
st.execute();
|
||||
while (st.fetch())
|
||||
{
|
||||
std::pair<uint256, uint256>& hashes =
|
||||
ret[rangeCheckedCast<std::uint32_t>(ls)];
|
||||
(void)hashes.first.parseHex(lh);
|
||||
if (ph)
|
||||
(void)hashes.second.parseHex(*ph);
|
||||
else
|
||||
hashes.second.zero();
|
||||
if (!ph)
|
||||
{
|
||||
auto stream = app.journal("Ledger").warn();
|
||||
JLOG(stream) << "Null prev hash for ledger seq: " << ls;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<
|
||||
@@ -1782,69 +1164,10 @@ flatFetchTransactions(ReadView const& ledger, Application& app)
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
std::vector<uint256> nodestoreHashes;
|
||||
#ifdef RIPPLED_REPORTING
|
||||
|
||||
auto log = app.journal("Ledger");
|
||||
|
||||
std::string query =
|
||||
"SELECT nodestore_hash"
|
||||
" FROM transactions "
|
||||
" WHERE ledger_seq = " +
|
||||
std::to_string(ledger.info().seq);
|
||||
auto res = PgQuery(app.getPgPool())(query.c_str());
|
||||
|
||||
if (!res)
|
||||
{
|
||||
JLOG(log.error()) << __func__
|
||||
<< " : Postgres response is null - query = " << query;
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
else if (res.status() != PGRES_TUPLES_OK)
|
||||
{
|
||||
JLOG(log.error()) << __func__
|
||||
<< " : Postgres response should have been "
|
||||
"PGRES_TUPLES_OK but instead was "
|
||||
<< res.status() << " - msg = " << res.msg()
|
||||
<< " - query = " << query;
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
|
||||
JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg();
|
||||
|
||||
if (res.isNull() || res.ntuples() == 0)
|
||||
{
|
||||
JLOG(log.debug()) << __func__
|
||||
<< " : Ledger not found. query = " << query;
|
||||
return {};
|
||||
}
|
||||
else if (res.ntuples() > 0)
|
||||
{
|
||||
if (res.nfields() != 1)
|
||||
{
|
||||
JLOG(log.error()) << __func__
|
||||
<< " : Wrong number of fields in Postgres "
|
||||
"response. Expected 1, but got "
|
||||
<< res.nfields() << " . query = " << query;
|
||||
assert(false);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(log.trace()) << __func__ << " : result = " << res.c_str()
|
||||
<< " : query = " << query;
|
||||
for (size_t i = 0; i < res.ntuples(); ++i)
|
||||
{
|
||||
char const* nodestoreHash = res.c_str(i, 0);
|
||||
uint256 hash;
|
||||
if (!hash.parseHex(nodestoreHash + 2))
|
||||
assert(false);
|
||||
|
||||
nodestoreHashes.push_back(hash);
|
||||
}
|
||||
#endif
|
||||
auto nodestoreHashes = dynamic_cast<RelationalDBInterfacePostgres*>(
|
||||
&app.getRelationalDBInterface())
|
||||
->getTxHashes(ledger.info().seq);
|
||||
|
||||
return flatFetchTransactions(app, nodestoreHashes);
|
||||
}
|
||||
|
||||
@@ -24,14 +24,13 @@
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/core/TimeKeeper.h>
|
||||
#include <ripple/ledger/CachedView.h>
|
||||
#include <ripple/ledger/TxMeta.h>
|
||||
#include <ripple/ledger/View.h>
|
||||
#include <ripple/protocol/Book.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include <ripple/shamap/SHAMap.h>
|
||||
#include <boost/optional.hpp>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
@@ -175,8 +174,8 @@ public:
|
||||
bool
|
||||
exists(uint256 const& key) const;
|
||||
|
||||
boost::optional<uint256>
|
||||
succ(uint256 const& key, boost::optional<uint256> const& last = boost::none)
|
||||
std::optional<uint256>
|
||||
succ(uint256 const& key, std::optional<uint256> const& last = std::nullopt)
|
||||
const override;
|
||||
|
||||
std::shared_ptr<SLE const>
|
||||
@@ -207,7 +206,7 @@ public:
|
||||
// DigestAwareReadView
|
||||
//
|
||||
|
||||
boost::optional<digest_type>
|
||||
std::optional<digest_type>
|
||||
digest(key_type const& key) const override;
|
||||
|
||||
//
|
||||
@@ -338,7 +337,7 @@ public:
|
||||
updateSkipList();
|
||||
|
||||
bool
|
||||
walkLedger(beast::Journal j) const;
|
||||
walkLedger(beast::Journal j, bool parallel = false) const;
|
||||
|
||||
bool
|
||||
assertSensible(beast::Journal ledgerJ) const;
|
||||
@@ -361,7 +360,7 @@ public:
|
||||
*
|
||||
* @return the public key if any
|
||||
*/
|
||||
boost::optional<PublicKey>
|
||||
std::optional<PublicKey>
|
||||
validatorToDisable() const;
|
||||
|
||||
/**
|
||||
@@ -369,7 +368,7 @@ public:
|
||||
*
|
||||
* @return the public key if any
|
||||
*/
|
||||
boost::optional<PublicKey>
|
||||
std::optional<PublicKey>
|
||||
validatorToReEnable() const;
|
||||
|
||||
/**
|
||||
@@ -432,31 +431,15 @@ pendSaveValidated(
|
||||
bool isSynchronous,
|
||||
bool isCurrent);
|
||||
|
||||
extern std::shared_ptr<Ledger>
|
||||
std::shared_ptr<Ledger>
|
||||
loadLedgerHelper(LedgerInfo const& sinfo, Application& app, bool acquire);
|
||||
|
||||
std::shared_ptr<Ledger>
|
||||
loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire = true);
|
||||
|
||||
extern std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
loadLedgerHelper(
|
||||
std::string const& sqlSuffix,
|
||||
Application& app,
|
||||
bool acquire = true);
|
||||
|
||||
extern std::shared_ptr<Ledger>
|
||||
std::shared_ptr<Ledger>
|
||||
loadByHash(uint256 const& ledgerHash, Application& app, bool acquire = true);
|
||||
|
||||
extern uint256
|
||||
getHashByIndex(std::uint32_t index, Application& app);
|
||||
|
||||
extern bool
|
||||
getHashesByIndex(
|
||||
std::uint32_t index,
|
||||
uint256& ledgerHash,
|
||||
uint256& parentHash,
|
||||
Application& app);
|
||||
|
||||
extern std::map<std::uint32_t, std::pair<uint256, uint256>>
|
||||
getHashesByIndex(std::uint32_t minSeq, std::uint32_t maxSeq, Application& app);
|
||||
|
||||
// Fetch the ledger with the highest sequence contained in the database
|
||||
extern std::tuple<std::shared_ptr<Ledger>, std::uint32_t, uint256>
|
||||
getLatestLedger(Application& app);
|
||||
|
||||
@@ -23,27 +23,32 @@
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/beast/utility/PropertyStream.h>
|
||||
#include <ripple/core/Stoppable.h>
|
||||
#include <ripple/json/json_value.h>
|
||||
#include <memory>
|
||||
|
||||
namespace ripple {
|
||||
namespace detail {
|
||||
|
||||
/** Check the ledger/transaction databases to make sure they have continuity */
|
||||
class LedgerCleaner : public Stoppable, public beast::PropertyStream::Source
|
||||
class LedgerCleaner : public beast::PropertyStream::Source
|
||||
{
|
||||
protected:
|
||||
explicit LedgerCleaner(Stoppable& parent);
|
||||
LedgerCleaner() : beast::PropertyStream::Source("ledgercleaner")
|
||||
{
|
||||
}
|
||||
|
||||
public:
|
||||
/** Destroy the object. */
|
||||
virtual ~LedgerCleaner() = 0;
|
||||
virtual ~LedgerCleaner() = default;
|
||||
|
||||
virtual void
|
||||
start() = 0;
|
||||
|
||||
virtual void
|
||||
stop() = 0;
|
||||
|
||||
/** Start a long running task to clean the ledger.
|
||||
The ledger is cleaned asynchronously, on an implementation defined
|
||||
thread. This function call does not block. The long running task
|
||||
will be stopped if the Stoppable stops.
|
||||
will be stopped by a call to stop().
|
||||
|
||||
Thread safety:
|
||||
Safe to call from any thread at any time.
|
||||
@@ -51,13 +56,12 @@ public:
|
||||
@param parameters A Json object with configurable parameters.
|
||||
*/
|
||||
virtual void
|
||||
doClean(Json::Value const& parameters) = 0;
|
||||
clean(Json::Value const& parameters) = 0;
|
||||
};
|
||||
|
||||
std::unique_ptr<LedgerCleaner>
|
||||
make_LedgerCleaner(Application& app, Stoppable& parent, beast::Journal journal);
|
||||
make_LedgerCleaner(Application& app, beast::Journal journal);
|
||||
|
||||
} // namespace detail
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -323,8 +323,8 @@ void
|
||||
LedgerHistory::handleMismatch(
|
||||
LedgerHash const& built,
|
||||
LedgerHash const& valid,
|
||||
boost::optional<uint256> const& builtConsensusHash,
|
||||
boost::optional<uint256> const& validatedConsensusHash,
|
||||
std::optional<uint256> const& builtConsensusHash,
|
||||
std::optional<uint256> const& validatedConsensusHash,
|
||||
Json::Value const& consensus)
|
||||
{
|
||||
assert(built != valid);
|
||||
@@ -411,7 +411,7 @@ LedgerHistory::handleMismatch(
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((*b)->peekData() != (*v)->peekData())
|
||||
if ((*b)->slice() != (*v)->slice())
|
||||
{
|
||||
// Same transaction with different metadata
|
||||
log_metadata_difference(
|
||||
@@ -444,14 +444,14 @@ LedgerHistory::builtLedger(
|
||||
|
||||
if (entry->validated && !entry->built)
|
||||
{
|
||||
if (entry->validated.get() != hash)
|
||||
if (entry->validated.value() != hash)
|
||||
{
|
||||
JLOG(j_.error())
|
||||
<< "MISMATCH: seq=" << index
|
||||
<< " validated:" << entry->validated.get() << " then:" << hash;
|
||||
JLOG(j_.error()) << "MISMATCH: seq=" << index
|
||||
<< " validated:" << entry->validated.value()
|
||||
<< " then:" << hash;
|
||||
handleMismatch(
|
||||
hash,
|
||||
entry->validated.get(),
|
||||
entry->validated.value(),
|
||||
consensusHash,
|
||||
entry->validatedConsensusHash,
|
||||
consensus);
|
||||
@@ -471,7 +471,7 @@ LedgerHistory::builtLedger(
|
||||
void
|
||||
LedgerHistory::validatedLedger(
|
||||
std::shared_ptr<Ledger const> const& ledger,
|
||||
boost::optional<uint256> const& consensusHash)
|
||||
std::optional<uint256> const& consensusHash)
|
||||
{
|
||||
LedgerIndex index = ledger->info().seq;
|
||||
LedgerHash hash = ledger->info().hash;
|
||||
@@ -484,17 +484,17 @@ LedgerHistory::validatedLedger(
|
||||
|
||||
if (entry->built && !entry->validated)
|
||||
{
|
||||
if (entry->built.get() != hash)
|
||||
if (entry->built.value() != hash)
|
||||
{
|
||||
JLOG(j_.error())
|
||||
<< "MISMATCH: seq=" << index << " built:" << entry->built.get()
|
||||
<< " then:" << hash;
|
||||
<< "MISMATCH: seq=" << index
|
||||
<< " built:" << entry->built.value() << " then:" << hash;
|
||||
handleMismatch(
|
||||
entry->built.get(),
|
||||
entry->built.value(),
|
||||
hash,
|
||||
entry->builtConsensusHash,
|
||||
consensusHash,
|
||||
entry->consensus.get());
|
||||
entry->consensus.value());
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
@@ -26,6 +26,8 @@
|
||||
#include <ripple/beast/insight/Event.h>
|
||||
#include <ripple/protocol/RippleLedgerHash.h>
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// VFALCO TODO Rename to OldLedgers ?
|
||||
@@ -95,7 +97,7 @@ public:
|
||||
void
|
||||
validatedLedger(
|
||||
std::shared_ptr<Ledger const> const&,
|
||||
boost::optional<uint256> const& consensusHash);
|
||||
std::optional<uint256> const& consensusHash);
|
||||
|
||||
/** Repair a hash to index mapping
|
||||
@param ledgerIndex The index whose mapping is to be repaired
|
||||
@@ -123,8 +125,8 @@ private:
|
||||
handleMismatch(
|
||||
LedgerHash const& built,
|
||||
LedgerHash const& valid,
|
||||
boost::optional<uint256> const& builtConsensusHash,
|
||||
boost::optional<uint256> const& validatedConsensusHash,
|
||||
std::optional<uint256> const& builtConsensusHash,
|
||||
std::optional<uint256> const& validatedConsensusHash,
|
||||
Json::Value const& consensus);
|
||||
|
||||
Application& app_;
|
||||
@@ -140,15 +142,15 @@ private:
|
||||
struct cv_entry
|
||||
{
|
||||
// Hash of locally built ledger
|
||||
boost::optional<LedgerHash> built;
|
||||
std::optional<LedgerHash> built;
|
||||
// Hash of the validated ledger
|
||||
boost::optional<LedgerHash> validated;
|
||||
std::optional<LedgerHash> validated;
|
||||
// Hash of locally accepted consensus transaction set
|
||||
boost::optional<uint256> builtConsensusHash;
|
||||
std::optional<uint256> builtConsensusHash;
|
||||
// Hash of validated consensus transaction set
|
||||
boost::optional<uint256> validatedConsensusHash;
|
||||
std::optional<uint256> validatedConsensusHash;
|
||||
// Consensus metadata of built ledger
|
||||
boost::optional<Json::Value> consensus;
|
||||
std::optional<Json::Value> consensus;
|
||||
};
|
||||
using ConsensusValidated = TaggedCache<LedgerIndex, cv_entry>;
|
||||
ConsensusValidated m_consensus_validated;
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include <ripple/app/ledger/AbstractFetchPackContainer.h>
|
||||
#include <ripple/app/ledger/InboundLedgers.h>
|
||||
#include <ripple/app/ledger/Ledger.h>
|
||||
#include <ripple/app/ledger/LedgerCleaner.h>
|
||||
#include <ripple/app/ledger/LedgerHistory.h>
|
||||
#include <ripple/app/ledger/LedgerHolder.h>
|
||||
#include <ripple/app/ledger/LedgerReplay.h>
|
||||
@@ -31,15 +30,14 @@
|
||||
#include <ripple/app/misc/CanonicalTXSet.h>
|
||||
#include <ripple/basics/RangeSet.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/UptimeClock.h>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/beast/insight/Collector.h>
|
||||
#include <ripple/beast/utility/PropertyStream.h>
|
||||
#include <ripple/core/Stoppable.h>
|
||||
#include <ripple/protocol/Protocol.h>
|
||||
#include <ripple/protocol/RippleLedgerHash.h>
|
||||
#include <ripple/protocol/STValidation.h>
|
||||
#include <ripple/protocol/messages.h>
|
||||
#include <boost/optional.hpp>
|
||||
#include <optional>
|
||||
|
||||
#include <mutex>
|
||||
|
||||
@@ -69,17 +67,12 @@ public:
|
||||
// Tracks the current ledger and any ledgers in the process of closing
|
||||
// Tracks ledger history
|
||||
// Tracks held transactions
|
||||
class LedgerMaster : public Stoppable, public AbstractFetchPackContainer
|
||||
class LedgerMaster : public AbstractFetchPackContainer
|
||||
{
|
||||
public:
|
||||
// Age for last validated ledger if the process has yet to validate.
|
||||
static constexpr std::chrono::seconds NO_VALIDATED_LEDGER_AGE =
|
||||
std::chrono::hours{24 * 14};
|
||||
|
||||
explicit LedgerMaster(
|
||||
Application& app,
|
||||
Stopwatch& stopwatch,
|
||||
Stoppable& parent,
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
beast::Journal journal);
|
||||
|
||||
@@ -181,7 +174,7 @@ public:
|
||||
getHashBySeq(std::uint32_t index);
|
||||
|
||||
/** Walk to a ledger's hash using the skip list */
|
||||
boost::optional<LedgerHash>
|
||||
std::optional<LedgerHash>
|
||||
walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason);
|
||||
|
||||
/** Walk the chain of ledger hashes to determine the hash of the
|
||||
@@ -191,7 +184,7 @@ public:
|
||||
from the reference ledger or any prior ledger are not present
|
||||
in the node store.
|
||||
*/
|
||||
boost::optional<LedgerHash>
|
||||
std::optional<LedgerHash>
|
||||
walkHashBySeq(
|
||||
std::uint32_t index,
|
||||
std::shared_ptr<ReadView const> const& referenceLedger,
|
||||
@@ -206,10 +199,10 @@ public:
|
||||
void
|
||||
setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV);
|
||||
|
||||
boost::optional<NetClock::time_point>
|
||||
std::optional<NetClock::time_point>
|
||||
getCloseTimeBySeq(LedgerIndex ledgerIndex);
|
||||
|
||||
boost::optional<NetClock::time_point>
|
||||
std::optional<NetClock::time_point>
|
||||
getCloseTimeByHash(LedgerHash const& ledgerHash, LedgerIndex ledgerIndex);
|
||||
|
||||
void
|
||||
@@ -243,8 +236,6 @@ public:
|
||||
uint256 const& consensusHash,
|
||||
Json::Value consensus);
|
||||
|
||||
LedgerIndex
|
||||
getBuildingLedger();
|
||||
void
|
||||
setBuildingLedger(LedgerIndex index);
|
||||
|
||||
@@ -259,11 +250,6 @@ public:
|
||||
|
||||
bool
|
||||
fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash);
|
||||
void
|
||||
doLedgerCleaner(Json::Value const& parameters);
|
||||
|
||||
beast::PropertyStream::Source&
|
||||
getPropertySource();
|
||||
|
||||
void
|
||||
clearPriorLedgers(LedgerIndex seq);
|
||||
@@ -284,7 +270,7 @@ public:
|
||||
void
|
||||
addFetchPack(uint256 const& hash, std::shared_ptr<Blob> data);
|
||||
|
||||
boost::optional<Blob>
|
||||
std::optional<Blob>
|
||||
getFetchPack(uint256 const& hash) override;
|
||||
|
||||
void
|
||||
@@ -305,7 +291,7 @@ public:
|
||||
}
|
||||
|
||||
// Returns the minimum ledger sequence in SQL database, if any.
|
||||
boost::optional<LedgerIndex>
|
||||
std::optional<LedgerIndex>
|
||||
minSqlSeq();
|
||||
|
||||
private:
|
||||
@@ -315,19 +301,17 @@ private:
|
||||
setPubLedger(std::shared_ptr<Ledger const> const& l);
|
||||
|
||||
void
|
||||
tryFill(Job& job, std::shared_ptr<Ledger const> ledger);
|
||||
tryFill(std::shared_ptr<Ledger const> ledger);
|
||||
|
||||
void
|
||||
getFetchPack(LedgerIndex missing, InboundLedger::Reason reason);
|
||||
|
||||
boost::optional<LedgerHash>
|
||||
std::optional<LedgerHash>
|
||||
getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason);
|
||||
|
||||
std::size_t
|
||||
getNeededValidations();
|
||||
void
|
||||
advanceThread();
|
||||
void
|
||||
fetchForHistory(
|
||||
std::uint32_t missing,
|
||||
bool& progress,
|
||||
@@ -342,7 +326,7 @@ private:
|
||||
findNewLedgersToPublish(std::unique_lock<std::recursive_mutex>&);
|
||||
|
||||
void
|
||||
updatePaths(Job& job);
|
||||
updatePaths();
|
||||
|
||||
// Returns true if work started. Always called with m_mutex locked.
|
||||
// The passed lock is a reminder to callers.
|
||||
@@ -385,8 +369,6 @@ private:
|
||||
std::recursive_mutex mCompleteLock;
|
||||
RangeSet<std::uint32_t> mCompleteLedgers;
|
||||
|
||||
std::unique_ptr<detail::LedgerCleaner> mLedgerCleaner;
|
||||
|
||||
// Publish thread is running.
|
||||
bool mAdvanceThread{false};
|
||||
|
||||
|
||||
@@ -129,12 +129,6 @@ public:
|
||||
bool
|
||||
finished() const;
|
||||
|
||||
static char const*
|
||||
getCountedObjectName()
|
||||
{
|
||||
return "LedgerReplayTask";
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
onTimer(bool progress, ScopedLockType& sl) override;
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include <ripple/app/ledger/LedgerReplayTask.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/core/Stoppable.h>
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
@@ -70,14 +69,13 @@ std::uint32_t constexpr MAX_QUEUED_TASKS = 100;
|
||||
/**
|
||||
* Manages the lifetime of ledger replay tasks.
|
||||
*/
|
||||
class LedgerReplayer final : public Stoppable
|
||||
class LedgerReplayer final
|
||||
{
|
||||
public:
|
||||
LedgerReplayer(
|
||||
Application& app,
|
||||
InboundLedgers& inboundLedgers,
|
||||
std::unique_ptr<PeerSetBuilder> peerSetBuilder,
|
||||
Stoppable& parent);
|
||||
std::unique_ptr<PeerSetBuilder> peerSetBuilder);
|
||||
|
||||
~LedgerReplayer();
|
||||
|
||||
@@ -125,7 +123,7 @@ public:
|
||||
sweep();
|
||||
|
||||
void
|
||||
onStop() override;
|
||||
stop();
|
||||
|
||||
private:
|
||||
mutable std::mutex mtx_;
|
||||
|
||||
@@ -37,7 +37,7 @@ struct LedgerFill
|
||||
RPC::Context* ctx,
|
||||
int o = 0,
|
||||
std::vector<TxQ::TxDetails> q = {},
|
||||
LedgerEntryType t = ltINVALID)
|
||||
LedgerEntryType t = ltANY)
|
||||
: ledger(l), options(o), txQueue(std::move(q)), type(t), context(ctx)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -185,7 +185,6 @@ private:
|
||||
FwdRange const& txs,
|
||||
OrderedTxs& retries,
|
||||
ApplyFlags flags,
|
||||
std::map<uint256, bool>& shouldRecover,
|
||||
beast::Journal j);
|
||||
|
||||
enum Result { success, failure, retry };
|
||||
@@ -200,7 +199,6 @@ private:
|
||||
std::shared_ptr<STTx const> const& tx,
|
||||
bool retry,
|
||||
ApplyFlags flags,
|
||||
bool shouldRecover,
|
||||
beast::Journal j);
|
||||
};
|
||||
|
||||
@@ -215,27 +213,25 @@ OpenLedger::apply(
|
||||
FwdRange const& txs,
|
||||
OrderedTxs& retries,
|
||||
ApplyFlags flags,
|
||||
std::map<uint256, bool>& shouldRecover,
|
||||
beast::Journal j)
|
||||
{
|
||||
for (auto iter = txs.begin(); iter != txs.end(); ++iter)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Dereferencing the iterator can
|
||||
// throw since it may be transformed.
|
||||
// Dereferencing the iterator can throw since it may be transformed.
|
||||
auto const tx = *iter;
|
||||
auto const txId = tx->getTransactionID();
|
||||
if (check.txExists(txId))
|
||||
continue;
|
||||
auto const result =
|
||||
apply_one(app, view, tx, true, flags, shouldRecover[txId], j);
|
||||
auto const result = apply_one(app, view, tx, true, flags, j);
|
||||
if (result == Result::retry)
|
||||
retries.insert(tx);
|
||||
}
|
||||
catch (std::exception const&)
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j.error()) << "Caught exception";
|
||||
JLOG(j.error())
|
||||
<< "OpenLedger::apply: Caught exception: " << e.what();
|
||||
}
|
||||
}
|
||||
bool retry = true;
|
||||
@@ -245,14 +241,7 @@ OpenLedger::apply(
|
||||
auto iter = retries.begin();
|
||||
while (iter != retries.end())
|
||||
{
|
||||
switch (apply_one(
|
||||
app,
|
||||
view,
|
||||
iter->second,
|
||||
retry,
|
||||
flags,
|
||||
shouldRecover[iter->second->getTransactionID()],
|
||||
j))
|
||||
switch (apply_one(app, view, iter->second, retry, flags, j))
|
||||
{
|
||||
case Result::success:
|
||||
++changes;
|
||||
|
||||
@@ -27,11 +27,8 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
OrderBookDB::OrderBookDB(Application& app, Stoppable& parent)
|
||||
: Stoppable("OrderBookDB", parent)
|
||||
, app_(app)
|
||||
, mSeq(0)
|
||||
, j_(app.journal("OrderBookDB"))
|
||||
OrderBookDB::OrderBookDB(Application& app)
|
||||
: app_(app), mSeq(0), j_(app.journal("OrderBookDB"))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -65,17 +62,16 @@ OrderBookDB::setup(std::shared_ptr<ReadView const> const& ledger)
|
||||
mSeq = seq;
|
||||
}
|
||||
|
||||
if (app_.config().PATH_SEARCH_MAX == 0)
|
||||
if (app_.config().PATH_SEARCH_MAX != 0)
|
||||
{
|
||||
// nothing to do
|
||||
if (app_.config().standalone())
|
||||
update(ledger);
|
||||
else
|
||||
app_.getJobQueue().addJob(
|
||||
jtUPDATE_PF, "OrderBookDB::update", [this, ledger]() {
|
||||
update(ledger);
|
||||
});
|
||||
}
|
||||
else if (app_.config().standalone())
|
||||
update(ledger);
|
||||
else
|
||||
app_.getJobQueue().addJob(
|
||||
jtUPDATE_PF, "OrderBookDB::update", [this, ledger](Job&) {
|
||||
update(ledger);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
@@ -101,7 +97,7 @@ OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
|
||||
{
|
||||
for (auto& sle : ledger->sles)
|
||||
{
|
||||
if (isStopping())
|
||||
if (app_.isStopping())
|
||||
{
|
||||
JLOG(j_.info())
|
||||
<< "OrderBookDB::update exiting due to isStopping";
|
||||
|
||||
@@ -28,10 +28,10 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class OrderBookDB : public Stoppable
|
||||
class OrderBookDB
|
||||
{
|
||||
public:
|
||||
OrderBookDB(Application& app, Stoppable& parent);
|
||||
explicit OrderBookDB(Application& app);
|
||||
|
||||
void
|
||||
setup(std::shared_ptr<ReadView const> const& ledger);
|
||||
|
||||
@@ -38,7 +38,7 @@ TransactionStateSF::gotNode(
|
||||
ledgerSeq);
|
||||
}
|
||||
|
||||
boost::optional<Blob>
|
||||
std::optional<Blob>
|
||||
TransactionStateSF::getNode(SHAMapHash const& nodeHash) const
|
||||
{
|
||||
return fp_.getFetchPack(nodeHash.as_uint256());
|
||||
|
||||
@@ -44,7 +44,7 @@ public:
|
||||
Blob&& nodeData,
|
||||
SHAMapNodeType type) const override;
|
||||
|
||||
boost::optional<Blob>
|
||||
std::optional<Blob>
|
||||
getNode(SHAMapHash const& nodeHash) const override;
|
||||
|
||||
private:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user