mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-05 11:45:51 +00:00
Compare commits
120 Commits
vlntb/job-
...
a1q123456/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52becffa48 | ||
|
|
b5c4fd4c51 | ||
|
|
ffa323808d | ||
|
|
e7e800197e | ||
|
|
6e35bb91ec | ||
|
|
276c02197f | ||
|
|
fb228860c8 | ||
|
|
b6c2b5cec5 | ||
|
|
516271e8fc | ||
|
|
0d87dfbdb4 | ||
|
|
f7b00a929b | ||
|
|
9b3dd2c3b2 | ||
|
|
1a159e040e | ||
|
|
56964984a5 | ||
|
|
0b31d52896 | ||
|
|
5cf589af16 | ||
|
|
2754c6343b | ||
|
|
98bc036d1f | ||
|
|
429617e1ca | ||
|
|
a513f95fb5 | ||
|
|
3740308b61 | ||
|
|
f1625c9802 | ||
|
|
73bc28bf4f | ||
|
|
1240bae12b | ||
|
|
ceb0ce5634 | ||
|
|
fb89213d4d | ||
|
|
d8628d481d | ||
|
|
a14551b151 | ||
|
|
de33a6a241 | ||
|
|
28eec6ce1b | ||
|
|
c9a723128a | ||
|
|
da82e52613 | ||
|
|
c9d73b6135 | ||
|
|
b7ed99426b | ||
|
|
97f0747e10 | ||
|
|
abf12db788 | ||
|
|
bdfc376951 | ||
|
|
b40a3684ae | ||
|
|
86ef16dbeb | ||
|
|
39b5031ab5 | ||
|
|
94decc753b | ||
|
|
991891625a | ||
|
|
69314e6832 | ||
|
|
dbeb841b5a | ||
|
|
4eae037fee | ||
|
|
b5a63b39d3 | ||
|
|
6419f9a253 | ||
|
|
31c99caa65 | ||
|
|
d835e97490 | ||
|
|
baf4b8381f | ||
|
|
9b45b6888b | ||
|
|
7179ce9c58 | ||
|
|
921aef9934 | ||
|
|
e7a7bb83c1 | ||
|
|
5c2a3a2779 | ||
|
|
b2960b9e7f | ||
|
|
5713f9782a | ||
|
|
60e340d356 | ||
|
|
80d82c5b2b | ||
|
|
433eeabfa5 | ||
|
|
faa781b71f | ||
|
|
c233df720a | ||
|
|
7ff4f79d30 | ||
|
|
60909655d3 | ||
|
|
03e46cd026 | ||
|
|
e95683a0fb | ||
|
|
13353ae36d | ||
|
|
1a40f18bdd | ||
|
|
90e6380383 | ||
|
|
8bfaa7fe0a | ||
|
|
c9135a63cd | ||
|
|
452263eaa5 | ||
|
|
8aa94ea09a | ||
|
|
258ba71363 | ||
|
|
b8626ea3c6 | ||
|
|
6534757d85 | ||
|
|
8e94ea3154 | ||
|
|
b113190563 | ||
|
|
358b7f50a7 | ||
|
|
f47e2f4e82 | ||
|
|
a7eea9546f | ||
|
|
9874d47d7f | ||
|
|
c2f3e2e263 | ||
|
|
e18f27f5f7 | ||
|
|
df6daf0d8f | ||
|
|
e9d46f0bfc | ||
|
|
42fd74b77b | ||
|
|
c55ea56c5e | ||
|
|
1e01cd34f7 | ||
|
|
e2fa5c1b7c | ||
|
|
fc0984d286 | ||
|
|
8b3dcd41f7 | ||
|
|
8f2f5310e2 | ||
|
|
edb4f0342c | ||
|
|
ea17abb92a | ||
|
|
35a40a8e62 | ||
|
|
d494bf45b2 | ||
|
|
8bf4a5cbff | ||
|
|
58c2c82a30 | ||
|
|
11edaa441d | ||
|
|
a5e953b191 | ||
|
|
506ae12a8c | ||
|
|
0310c5cbe0 | ||
|
|
053e1af7ff | ||
|
|
7e24adbdd0 | ||
|
|
621df422a7 | ||
|
|
0a34b5c691 | ||
|
|
e0bc3dd51f | ||
|
|
dacecd24ba | ||
|
|
05105743e9 | ||
|
|
9e1fe9a85e | ||
|
|
d71ce51901 | ||
|
|
be668ee26d | ||
|
|
cae5294b4e | ||
|
|
cd777f79ef | ||
|
|
8b9e21e3f5 | ||
|
|
2a61aee562 | ||
|
|
40ce8a8833 | ||
|
|
7713ff8c5c | ||
|
|
70371a4344 |
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
Language: Cpp
|
Language: Cpp
|
||||||
AccessModifierOffset: -4
|
AccessModifierOffset: -4
|
||||||
AlignAfterOpenBracket: AlwaysBreak
|
AlignAfterOpenBracket: AlwaysBreak
|
||||||
AlignConsecutiveAssignments: false
|
AlignConsecutiveAssignments: false
|
||||||
@@ -19,52 +19,52 @@ AlwaysBreakTemplateDeclarations: true
|
|||||||
BinPackArguments: false
|
BinPackArguments: false
|
||||||
BinPackParameters: false
|
BinPackParameters: false
|
||||||
BraceWrapping:
|
BraceWrapping:
|
||||||
AfterClass: true
|
AfterClass: true
|
||||||
AfterControlStatement: true
|
AfterControlStatement: true
|
||||||
AfterEnum: false
|
AfterEnum: false
|
||||||
AfterFunction: true
|
AfterFunction: true
|
||||||
AfterNamespace: false
|
AfterNamespace: false
|
||||||
AfterObjCDeclaration: true
|
AfterObjCDeclaration: true
|
||||||
AfterStruct: true
|
AfterStruct: true
|
||||||
AfterUnion: true
|
AfterUnion: true
|
||||||
BeforeCatch: true
|
BeforeCatch: true
|
||||||
BeforeElse: true
|
BeforeElse: true
|
||||||
IndentBraces: false
|
IndentBraces: false
|
||||||
BreakBeforeBinaryOperators: false
|
BreakBeforeBinaryOperators: false
|
||||||
BreakBeforeBraces: Custom
|
BreakBeforeBraces: Custom
|
||||||
BreakBeforeTernaryOperators: true
|
BreakBeforeTernaryOperators: true
|
||||||
BreakConstructorInitializersBeforeComma: true
|
BreakConstructorInitializersBeforeComma: true
|
||||||
ColumnLimit: 80
|
ColumnLimit: 80
|
||||||
CommentPragmas: '^ IWYU pragma:'
|
CommentPragmas: "^ IWYU pragma:"
|
||||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||||
ConstructorInitializerIndentWidth: 4
|
ConstructorInitializerIndentWidth: 4
|
||||||
ContinuationIndentWidth: 4
|
ContinuationIndentWidth: 4
|
||||||
Cpp11BracedListStyle: true
|
Cpp11BracedListStyle: true
|
||||||
DerivePointerAlignment: false
|
DerivePointerAlignment: false
|
||||||
DisableFormat: false
|
DisableFormat: false
|
||||||
ExperimentalAutoDetectBinPacking: false
|
ExperimentalAutoDetectBinPacking: false
|
||||||
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
|
ForEachMacros: [Q_FOREACH, BOOST_FOREACH]
|
||||||
IncludeBlocks: Regroup
|
IncludeBlocks: Regroup
|
||||||
IncludeCategories:
|
IncludeCategories:
|
||||||
- Regex: '^<(test)/'
|
- Regex: "^<(test)/"
|
||||||
Priority: 0
|
Priority: 0
|
||||||
- Regex: '^<(xrpld)/'
|
- Regex: "^<(xrpld)/"
|
||||||
Priority: 1
|
Priority: 1
|
||||||
- Regex: '^<(xrpl)/'
|
- Regex: "^<(xrpl)/"
|
||||||
Priority: 2
|
Priority: 2
|
||||||
- Regex: '^<(boost)/'
|
- Regex: "^<(boost)/"
|
||||||
Priority: 3
|
Priority: 3
|
||||||
- Regex: '^.*/'
|
- Regex: "^.*/"
|
||||||
Priority: 4
|
Priority: 4
|
||||||
- Regex: '^.*\.h'
|
- Regex: '^.*\.h'
|
||||||
Priority: 5
|
Priority: 5
|
||||||
- Regex: '.*'
|
- Regex: ".*"
|
||||||
Priority: 6
|
Priority: 6
|
||||||
IncludeIsMainRegex: '$'
|
IncludeIsMainRegex: "$"
|
||||||
IndentCaseLabels: true
|
IndentCaseLabels: true
|
||||||
IndentFunctionDeclarationAfterType: false
|
IndentFunctionDeclarationAfterType: false
|
||||||
IndentRequiresClause: true
|
IndentRequiresClause: true
|
||||||
IndentWidth: 4
|
IndentWidth: 4
|
||||||
IndentWrappedFunctionNames: false
|
IndentWrappedFunctionNames: false
|
||||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||||
MaxEmptyLinesToKeep: 1
|
MaxEmptyLinesToKeep: 1
|
||||||
@@ -78,20 +78,25 @@ PenaltyBreakString: 1000
|
|||||||
PenaltyExcessCharacter: 1000000
|
PenaltyExcessCharacter: 1000000
|
||||||
PenaltyReturnTypeOnItsOwnLine: 200
|
PenaltyReturnTypeOnItsOwnLine: 200
|
||||||
PointerAlignment: Left
|
PointerAlignment: Left
|
||||||
ReflowComments: true
|
ReflowComments: true
|
||||||
RequiresClausePosition: OwnLine
|
RequiresClausePosition: OwnLine
|
||||||
SortIncludes: true
|
SortIncludes: true
|
||||||
SpaceAfterCStyleCast: false
|
SpaceAfterCStyleCast: false
|
||||||
SpaceBeforeAssignmentOperators: true
|
SpaceBeforeAssignmentOperators: true
|
||||||
SpaceBeforeParens: ControlStatements
|
SpaceBeforeParens: ControlStatements
|
||||||
SpaceInEmptyParentheses: false
|
SpaceInEmptyParentheses: false
|
||||||
SpacesBeforeTrailingComments: 2
|
SpacesBeforeTrailingComments: 2
|
||||||
SpacesInAngles: false
|
SpacesInAngles: false
|
||||||
SpacesInContainerLiterals: true
|
SpacesInContainerLiterals: true
|
||||||
SpacesInCStyleCastParentheses: false
|
SpacesInCStyleCastParentheses: false
|
||||||
SpacesInParentheses: false
|
SpacesInParentheses: false
|
||||||
SpacesInSquareBrackets: false
|
SpacesInSquareBrackets: false
|
||||||
Standard: Cpp11
|
Standard: Cpp11
|
||||||
TabWidth: 8
|
TabWidth: 8
|
||||||
UseTab: Never
|
UseTab: Never
|
||||||
QualifierAlignment: Right
|
QualifierAlignment: Right
|
||||||
|
---
|
||||||
|
Language: JavaScript
|
||||||
|
---
|
||||||
|
Language: Json
|
||||||
|
IndentWidth: 2
|
||||||
|
|||||||
@@ -7,13 +7,13 @@ comment:
|
|||||||
show_carryforward_flags: false
|
show_carryforward_flags: false
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
range: "60..80"
|
range: "70..85"
|
||||||
precision: 1
|
precision: 1
|
||||||
round: nearest
|
round: nearest
|
||||||
status:
|
status:
|
||||||
project:
|
project:
|
||||||
default:
|
default:
|
||||||
target: 60%
|
target: 75%
|
||||||
threshold: 2%
|
threshold: 2%
|
||||||
patch:
|
patch:
|
||||||
default:
|
default:
|
||||||
@@ -27,7 +27,7 @@ github_checks:
|
|||||||
parsers:
|
parsers:
|
||||||
cobertura:
|
cobertura:
|
||||||
partials_as_hits: true
|
partials_as_hits: true
|
||||||
handle_missing_conditions : true
|
handle_missing_conditions: true
|
||||||
|
|
||||||
slack_app: false
|
slack_app: false
|
||||||
|
|
||||||
|
|||||||
@@ -11,3 +11,4 @@ b9d007813378ad0ff45660dc07285b823c7e9855
|
|||||||
fe9a5365b8a52d4acc42eb27369247e6f238a4f9
|
fe9a5365b8a52d4acc42eb27369247e6f238a4f9
|
||||||
9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8
|
9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8
|
||||||
552377c76f55b403a1c876df873a23d780fcc81c
|
552377c76f55b403a1c876df873a23d780fcc81c
|
||||||
|
97f0747e103f13e26e45b731731059b32f7679ac
|
||||||
|
|||||||
13
.github/ISSUE_TEMPLATE/bug_report.md
vendored
13
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -2,30 +2,35 @@
|
|||||||
name: Bug Report
|
name: Bug Report
|
||||||
about: Create a report to help us improve rippled
|
about: Create a report to help us improve rippled
|
||||||
title: "[Title with short description] (Version: [rippled version])"
|
title: "[Title with short description] (Version: [rippled version])"
|
||||||
labels: ''
|
labels: ""
|
||||||
assignees: ''
|
assignees: ""
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
<!-- Please search existing issues to avoid creating duplicates.-->
|
<!-- Please search existing issues to avoid creating duplicates.-->
|
||||||
|
|
||||||
## Issue Description
|
## Issue Description
|
||||||
|
|
||||||
<!--Provide a summary for your issue/bug.-->
|
<!--Provide a summary for your issue/bug.-->
|
||||||
|
|
||||||
## Steps to Reproduce
|
## Steps to Reproduce
|
||||||
|
|
||||||
<!--List in detail the exact steps to reproduce the unexpected behavior of the software.-->
|
<!--List in detail the exact steps to reproduce the unexpected behavior of the software.-->
|
||||||
|
|
||||||
## Expected Result
|
## Expected Result
|
||||||
|
|
||||||
<!--Explain in detail what behavior you expected to happen.-->
|
<!--Explain in detail what behavior you expected to happen.-->
|
||||||
|
|
||||||
## Actual Result
|
## Actual Result
|
||||||
|
|
||||||
<!--Explain in detail what behavior actually happened.-->
|
<!--Explain in detail what behavior actually happened.-->
|
||||||
|
|
||||||
## Environment
|
## Environment
|
||||||
|
|
||||||
<!--Please describe your environment setup (such as Ubuntu 18.04 with Boost 1.70).-->
|
<!--Please describe your environment setup (such as Ubuntu 18.04 with Boost 1.70).-->
|
||||||
<!-- If you are using a formal release, please use the version returned by './rippled --version' as the version number-->
|
<!-- If you are using a formal release, please use the version returned by './rippled --version' as the version number-->
|
||||||
<!-- If you are working off of develop, please add the git hash via 'git rev-parse HEAD'-->
|
<!-- If you are working off of develop, please add the git hash via 'git rev-parse HEAD'-->
|
||||||
|
|
||||||
## Supporting Files
|
## Supporting Files
|
||||||
|
|
||||||
<!--If you have supporting files such as a log, feel free to post a link here using Github Gist.-->
|
<!--If you have supporting files such as a log, feel free to post a link here using Github Gist.-->
|
||||||
<!--Consider adding configuration files with private information removed via Github Gist. -->
|
<!--Consider adding configuration files with private information removed via Github Gist. -->
|
||||||
|
|
||||||
|
|||||||
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -3,19 +3,23 @@ name: Feature Request
|
|||||||
about: Suggest a new feature for the rippled project
|
about: Suggest a new feature for the rippled project
|
||||||
title: "[Title with short description] (Version: [rippled version])"
|
title: "[Title with short description] (Version: [rippled version])"
|
||||||
labels: Feature Request
|
labels: Feature Request
|
||||||
assignees: ''
|
assignees: ""
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
<!-- Please search existing issues to avoid creating duplicates.-->
|
<!-- Please search existing issues to avoid creating duplicates.-->
|
||||||
|
|
||||||
## Summary
|
## Summary
|
||||||
|
|
||||||
<!-- Provide a summary to the feature request-->
|
<!-- Provide a summary to the feature request-->
|
||||||
|
|
||||||
## Motivation
|
## Motivation
|
||||||
|
|
||||||
<!-- Why do we need this feature?-->
|
<!-- Why do we need this feature?-->
|
||||||
|
|
||||||
## Solution
|
## Solution
|
||||||
|
|
||||||
<!-- What is the solution?-->
|
<!-- What is the solution?-->
|
||||||
|
|
||||||
## Paths Not Taken
|
## Paths Not Taken
|
||||||
|
|
||||||
<!-- What other alternatives have been considered?-->
|
<!-- What other alternatives have been considered?-->
|
||||||
|
|||||||
61
.github/actions/dependencies/action.yml
vendored
61
.github/actions/dependencies/action.yml
vendored
@@ -2,56 +2,37 @@ name: dependencies
|
|||||||
inputs:
|
inputs:
|
||||||
configuration:
|
configuration:
|
||||||
required: true
|
required: true
|
||||||
# An implicit input is the environment variable `build_dir`.
|
# Implicit inputs are the environment variables `build_dir`, CONAN_REMOTE_URL,
|
||||||
|
# CONAN_REMOTE_USERNAME, and CONAN_REMOTE_PASSWORD. The latter two are only
|
||||||
|
# used to upload newly built dependencies to the Conan remote.
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: unlock Conan
|
- name: add Conan remote
|
||||||
shell: bash
|
if: ${{ env.CONAN_REMOTE_URL != '' }}
|
||||||
run: conan remove --locks
|
|
||||||
- name: export custom recipes
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
conan config set general.revisions_enabled=1
|
|
||||||
conan export external/snappy snappy/1.1.10@
|
|
||||||
conan export external/rocksdb rocksdb/9.7.3@
|
|
||||||
conan export external/soci soci/4.0.3@
|
|
||||||
conan export external/nudb nudb/2.0.8@
|
|
||||||
- name: add Ripple Conan remote
|
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
|
||||||
|
conan remote add --index 0 --force xrplf ${{ env.CONAN_REMOTE_URL }}
|
||||||
|
echo "Listing Conan remotes."
|
||||||
conan remote list
|
conan remote list
|
||||||
conan remote remove ripple || true
|
|
||||||
# Do not quote the URL. An empty string will be accepted (with
|
|
||||||
# a non-fatal warning), but a missing argument will not.
|
|
||||||
conan remote add ripple ${{ env.CONAN_URL }} --insert 0
|
|
||||||
- name: try to authenticate to Ripple Conan remote
|
|
||||||
id: remote
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# `conan user` implicitly uses the environment variables
|
|
||||||
# CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
|
|
||||||
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
|
|
||||||
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
|
|
||||||
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
|
|
||||||
echo outcome=$(conan user --remote ripple --password >&2 \
|
|
||||||
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
|
|
||||||
- name: list missing binaries
|
|
||||||
id: binaries
|
|
||||||
shell: bash
|
|
||||||
# Print the list of dependencies that would need to be built locally.
|
|
||||||
# A non-empty list means we have "failed" to cache binaries remotely.
|
|
||||||
run: |
|
|
||||||
echo missing=$(conan info . --build missing --settings build_type=${{ inputs.configuration }} --json 2>/dev/null | grep '^\[') | tee ${GITHUB_OUTPUT}
|
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
mkdir ${build_dir}
|
mkdir -p ${{ env.build_dir }}
|
||||||
cd ${build_dir}
|
cd ${{ env.build_dir }}
|
||||||
conan install \
|
conan install \
|
||||||
--output-folder . \
|
--output-folder . \
|
||||||
--build missing \
|
--build missing \
|
||||||
--options tests=True \
|
--options:host "&:tests=True" \
|
||||||
--options xrpld=True \
|
--options:host "&:xrpld=True" \
|
||||||
--settings build_type=${{ inputs.configuration }} \
|
--settings:all build_type=${{ inputs.configuration }} \
|
||||||
..
|
..
|
||||||
|
- name: upload dependencies
|
||||||
|
if: ${{ env.CONAN_REMOTE_URL != '' && env.CONAN_REMOTE_USERNAME != '' && env.CONAN_REMOTE_PASSWORD != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Logging into Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
|
||||||
|
conan remote login xrplf "${{ env.CONAN_REMOTE_USERNAME }}" --password "${{ env.CONAN_REMOTE_PASSWORD }}"
|
||||||
|
echo "Uploading dependencies."
|
||||||
|
conan upload '*' --confirm --check --remote xrplf
|
||||||
|
|||||||
29
.github/workflows/clang-format.yml
vendored
29
.github/workflows/clang-format.yml
vendored
@@ -9,24 +9,25 @@ jobs:
|
|||||||
check:
|
check:
|
||||||
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
env:
|
container: ghcr.io/xrplf/ci/tools-rippled-clang-format
|
||||||
CLANG_VERSION: 18
|
|
||||||
steps:
|
steps:
|
||||||
|
# For jobs running in containers, $GITHUB_WORKSPACE and ${{ github.workspace }} might not be the
|
||||||
|
# same directory. The actions/checkout step is *supposed* to checkout into $GITHUB_WORKSPACE and
|
||||||
|
# then add it to safe.directory (see instructions at https://github.com/actions/checkout)
|
||||||
|
# but that's apparently not happening for some container images. We can't be sure what is actually
|
||||||
|
# happening, so let's pre-emptively add both directories to safe.directory. There's a
|
||||||
|
# Github issue opened in 2022 and not resolved in 2025 https://github.com/actions/runner/issues/2058 ¯\_(ツ)_/¯
|
||||||
|
- run: |
|
||||||
|
git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||||
|
git config --global --add safe.directory ${{ github.workspace }}
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Install clang-format
|
|
||||||
run: |
|
|
||||||
codename=$( lsb_release --codename --short )
|
|
||||||
sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null <<EOF
|
|
||||||
deb http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
|
|
||||||
deb-src http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
|
|
||||||
EOF
|
|
||||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install clang-format-${CLANG_VERSION}
|
|
||||||
- name: Format first-party sources
|
- name: Format first-party sources
|
||||||
run: find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format-${CLANG_VERSION} -i {} +
|
run: |
|
||||||
|
clang-format --version
|
||||||
|
find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format -i {} +
|
||||||
- name: Check for differences
|
- name: Check for differences
|
||||||
id: assert
|
id: assert
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
git diff --exit-code | tee "clang-format.patch"
|
git diff --exit-code | tee "clang-format.patch"
|
||||||
@@ -58,6 +59,6 @@ jobs:
|
|||||||
in your repo, commit, and push.
|
in your repo, commit, and push.
|
||||||
run: |
|
run: |
|
||||||
echo "${PREAMBLE}"
|
echo "${PREAMBLE}"
|
||||||
clang-format-${CLANG_VERSION} --version
|
clang-format --version
|
||||||
echo "${SUGGESTION}"
|
echo "${SUGGESTION}"
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
2
.github/workflows/doxygen.yml
vendored
2
.github/workflows/doxygen.yml
vendored
@@ -10,7 +10,7 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
job:
|
documentation:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|||||||
24
.github/workflows/libxrpl.yml
vendored
24
.github/workflows/libxrpl.yml
vendored
@@ -1,13 +1,13 @@
|
|||||||
name: Check libXRPL compatibility with Clio
|
name: Check libXRPL compatibility with Clio
|
||||||
env:
|
env:
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
|
CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
|
CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- 'src/libxrpl/protocol/BuildInfo.cpp'
|
- "src/libxrpl/protocol/BuildInfo.cpp"
|
||||||
- '.github/workflows/libxrpl.yml'
|
- ".github/workflows/libxrpl.yml"
|
||||||
types: [opened, reopened, synchronize, ready_for_review]
|
types: [opened, reopened, synchronize, ready_for_review]
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
@@ -29,7 +29,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha || github.sha }}
|
ref: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
running-workflow-name: wait-for-check-regexp
|
running-workflow-name: wait-for-check-regexp
|
||||||
check-regexp: '(dependencies|test).*linux.*' # Ignore windows and mac tests but make sure linux passes
|
check-regexp: "(dependencies|test).*linux.*" # Ignore windows and mac tests but make sure linux passes
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
wait-interval: 10
|
wait-interval: 10
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -43,20 +43,20 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
conan export . ${{ steps.channel.outputs.channel }}
|
conan export . ${{ steps.channel.outputs.channel }}
|
||||||
- name: Add Ripple Conan remote
|
- name: Add Conan remote
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
|
||||||
|
conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force
|
||||||
|
echo "Listing Conan remotes."
|
||||||
conan remote list
|
conan remote list
|
||||||
conan remote remove ripple || true
|
|
||||||
# Do not quote the URL. An empty string will be accepted (with a non-fatal warning), but a missing argument will not.
|
|
||||||
conan remote add ripple ${{ env.CONAN_URL }} --insert 0
|
|
||||||
- name: Parse new version
|
- name: Parse new version
|
||||||
id: version
|
id: version
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
|
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
|
||||||
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
|
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
|
||||||
- name: Try to authenticate to Ripple Conan remote
|
- name: Try to authenticate to Conan remote
|
||||||
id: remote
|
id: remote
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -64,7 +64,7 @@ jobs:
|
|||||||
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
|
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
|
||||||
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
|
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
|
||||||
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
|
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
|
||||||
echo outcome=$(conan user --remote ripple --password >&2 \
|
echo outcome=$(conan user --remote xrplf --password >&2 \
|
||||||
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
|
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
|
||||||
- name: Upload new package
|
- name: Upload new package
|
||||||
id: upload
|
id: upload
|
||||||
|
|||||||
43
.github/workflows/macos.yml
vendored
43
.github/workflows/macos.yml
vendored
@@ -11,13 +11,27 @@ on:
|
|||||||
- release
|
- release
|
||||||
- master
|
- master
|
||||||
# Branches that opt-in to running
|
# Branches that opt-in to running
|
||||||
- 'ci/**'
|
- "ci/**"
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
# This part of Conan configuration is specific to this workflow only; we do not want
|
||||||
|
# to pollute conan/profiles directory with settings which might not work for others
|
||||||
|
env:
|
||||||
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
|
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
# This part of the Conan configuration is specific to this workflow only; we
|
||||||
|
# do not want to pollute the 'conan/profiles' directory with settings that
|
||||||
|
# might not work for other workflows.
|
||||||
|
CONAN_GLOBAL_CONF: |
|
||||||
|
core.download:parallel={{os.cpu_count()}}
|
||||||
|
core.upload:parallel={{os.cpu_count()}}
|
||||||
|
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
|
||||||
|
tools.build:verbosity=verbose
|
||||||
|
tools.compilation:verbosity=verbose
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
test:
|
test:
|
||||||
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
strategy:
|
strategy:
|
||||||
@@ -28,23 +42,22 @@ jobs:
|
|||||||
- Ninja
|
- Ninja
|
||||||
configuration:
|
configuration:
|
||||||
- Release
|
- Release
|
||||||
runs-on: [self-hosted, macOS]
|
runs-on: [self-hosted, macOS, mac-runner-m1]
|
||||||
env:
|
env:
|
||||||
# The `build` action requires these variables.
|
# The `build` action requires these variables.
|
||||||
build_dir: .build
|
build_dir: .build
|
||||||
NUM_PROCESSORS: 12
|
NUM_PROCESSORS: 12
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
- name: install Conan
|
- name: install Conan
|
||||||
run: |
|
run: |
|
||||||
brew install conan@1
|
brew install conan
|
||||||
echo '/opt/homebrew/opt/conan@1/bin' >> $GITHUB_PATH
|
|
||||||
- name: install Ninja
|
- name: install Ninja
|
||||||
if: matrix.generator == 'Ninja'
|
if: matrix.generator == 'Ninja'
|
||||||
run: brew install ninja
|
run: brew install ninja
|
||||||
- name: install python
|
- name: install python
|
||||||
run: |
|
run: |
|
||||||
if which python > /dev/null 2>&1; then
|
if which python > /dev/null 2>&1; then
|
||||||
echo "Python executable exists"
|
echo "Python executable exists"
|
||||||
else
|
else
|
||||||
@@ -75,15 +88,12 @@ jobs:
|
|||||||
sysctl -n hw.logicalcpu
|
sysctl -n hw.logicalcpu
|
||||||
clang --version
|
clang --version
|
||||||
- name: configure Conan
|
- name: configure Conan
|
||||||
run : |
|
run: |
|
||||||
conan profile new default --detect || true
|
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
|
||||||
conan profile update settings.compiler.cppstd=20 default
|
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||||
|
conan profile show
|
||||||
- name: build dependencies
|
- name: build dependencies
|
||||||
uses: ./.github/actions/dependencies
|
uses: ./.github/actions/dependencies
|
||||||
env:
|
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
|
||||||
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
|
|
||||||
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
configuration: ${{ matrix.configuration }}
|
configuration: ${{ matrix.configuration }}
|
||||||
- name: build
|
- name: build
|
||||||
@@ -96,4 +106,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
n=$(nproc)
|
n=$(nproc)
|
||||||
echo "Using $n test jobs"
|
echo "Using $n test jobs"
|
||||||
${build_dir}/rippled --unittest --unittest-jobs $n
|
|
||||||
|
cd ${build_dir}
|
||||||
|
./rippled --unittest --unittest-jobs $n
|
||||||
|
ctest -j $n --output-on-failure
|
||||||
|
|||||||
84
.github/workflows/missing-commits.yml
vendored
84
.github/workflows/missing-commits.yml
vendored
@@ -12,49 +12,49 @@ jobs:
|
|||||||
up_to_date:
|
up_to_date:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Check for missing commits
|
- name: Check for missing commits
|
||||||
id: commits
|
id: commits
|
||||||
env:
|
env:
|
||||||
SUGGESTION: |
|
SUGGESTION: |
|
||||||
|
|
||||||
If you are reading this, then the commits indicated above are
|
If you are reading this, then the commits indicated above are
|
||||||
missing from "develop" and/or "release". Do a reverse-merge
|
missing from "develop" and/or "release". Do a reverse-merge
|
||||||
as soon as possible. See CONTRIBUTING.md for instructions.
|
as soon as possible. See CONTRIBUTING.md for instructions.
|
||||||
run: |
|
run: |
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
# Branches ordered by how "canonical" they are. Every commit in
|
# Branches ordered by how "canonical" they are. Every commit in
|
||||||
# one branch should be in all the branches behind it
|
# one branch should be in all the branches behind it
|
||||||
order=( master release develop )
|
order=( master release develop )
|
||||||
branches=()
|
branches=()
|
||||||
for branch in "${order[@]}"
|
for branch in "${order[@]}"
|
||||||
do
|
do
|
||||||
# Check that the branches exist so that this job will work on
|
# Check that the branches exist so that this job will work on
|
||||||
# forked repos, which don't necessarily have master and
|
# forked repos, which don't necessarily have master and
|
||||||
# release branches.
|
# release branches.
|
||||||
if git ls-remote --exit-code --heads origin \
|
if git ls-remote --exit-code --heads origin \
|
||||||
refs/heads/${branch} > /dev/null
|
refs/heads/${branch} > /dev/null
|
||||||
then
|
then
|
||||||
branches+=( origin/${branch} )
|
branches+=( origin/${branch} )
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
prior=()
|
prior=()
|
||||||
for branch in "${branches[@]}"
|
for branch in "${branches[@]}"
|
||||||
do
|
do
|
||||||
if [[ ${#prior[@]} -ne 0 ]]
|
if [[ ${#prior[@]} -ne 0 ]]
|
||||||
|
then
|
||||||
|
echo "Checking ${prior[@]} for commits missing from ${branch}"
|
||||||
|
git log --oneline --no-merges "${prior[@]}" \
|
||||||
|
^$branch | tee -a "missing-commits.txt"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
prior+=( "${branch}" )
|
||||||
|
done
|
||||||
|
if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]]
|
||||||
then
|
then
|
||||||
echo "Checking ${prior[@]} for commits missing from ${branch}"
|
echo "${SUGGESTION}"
|
||||||
git log --oneline --no-merges "${prior[@]}" \
|
exit 1
|
||||||
^$branch | tee -a "missing-commits.txt"
|
|
||||||
echo
|
|
||||||
fi
|
fi
|
||||||
prior+=( "${branch}" )
|
|
||||||
done
|
|
||||||
if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]]
|
|
||||||
then
|
|
||||||
echo "${SUGGESTION}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|||||||
301
.github/workflows/nix.yml
vendored
301
.github/workflows/nix.yml
vendored
@@ -16,6 +16,20 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
|
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
# This part of the Conan configuration is specific to this workflow only; we
|
||||||
|
# do not want to pollute the 'conan/profiles' directory with settings that
|
||||||
|
# might not work for other workflows.
|
||||||
|
CONAN_GLOBAL_CONF: |
|
||||||
|
core.download:parallel={{ os.cpu_count() }}
|
||||||
|
core.upload:parallel={{ os.cpu_count() }}
|
||||||
|
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
|
||||||
|
tools.build:verbosity=verbose
|
||||||
|
tools.compilation:verbosity=verbose
|
||||||
|
|
||||||
# This workflow has multiple job matrixes.
|
# This workflow has multiple job matrixes.
|
||||||
# They can be considered phases because most of the matrices ("test",
|
# They can be considered phases because most of the matrices ("test",
|
||||||
# "coverage", "conan", ) depend on the first ("dependencies").
|
# "coverage", "conan", ) depend on the first ("dependencies").
|
||||||
@@ -54,59 +68,45 @@ jobs:
|
|||||||
- Release
|
- Release
|
||||||
include:
|
include:
|
||||||
- compiler: gcc
|
- compiler: gcc
|
||||||
profile:
|
compiler_version: 12
|
||||||
version: 11
|
distro: ubuntu
|
||||||
cc: /usr/bin/gcc
|
codename: jammy
|
||||||
cxx: /usr/bin/g++
|
|
||||||
- compiler: clang
|
- compiler: clang
|
||||||
profile:
|
compiler_version: 16
|
||||||
version: 14
|
distro: debian
|
||||||
cc: /usr/bin/clang-14
|
codename: bookworm
|
||||||
cxx: /usr/bin/clang++-14
|
|
||||||
runs-on: [self-hosted, heavy]
|
runs-on: [self-hosted, heavy]
|
||||||
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
|
container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }}
|
||||||
env:
|
env:
|
||||||
build_dir: .build
|
build_dir: .build
|
||||||
steps:
|
steps:
|
||||||
- name: upgrade conan
|
|
||||||
run: |
|
|
||||||
pip install --upgrade "conan<2"
|
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
- name: check environment
|
- name: check environment
|
||||||
run: |
|
run: |
|
||||||
echo ${PATH} | tr ':' '\n'
|
echo ${PATH} | tr ':' '\n'
|
||||||
lsb_release -a || true
|
lsb_release -a || true
|
||||||
${{ matrix.profile.cc }} --version
|
${{ matrix.compiler }}-${{ matrix.compiler_version }} --version
|
||||||
conan --version
|
conan --version
|
||||||
cmake --version
|
cmake --version
|
||||||
env | sort
|
env | sort
|
||||||
- name: configure Conan
|
- name: configure Conan
|
||||||
run: |
|
run: |
|
||||||
conan profile new default --detect
|
echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf
|
||||||
conan profile update settings.compiler.cppstd=20 default
|
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||||
conan profile update settings.compiler=${{ matrix.compiler }} default
|
conan profile show
|
||||||
conan profile update settings.compiler.version=${{ matrix.profile.version }} default
|
|
||||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
|
||||||
conan profile update env.CC=${{ matrix.profile.cc }} default
|
|
||||||
conan profile update env.CXX=${{ matrix.profile.cxx }} default
|
|
||||||
conan profile update conf.tools.build:compiler_executables='{"c": "${{ matrix.profile.cc }}", "cpp": "${{ matrix.profile.cxx }}"}' default
|
|
||||||
- name: archive profile
|
- name: archive profile
|
||||||
# Create this archive before dependencies are added to the local cache.
|
# Create this archive before dependencies are added to the local cache.
|
||||||
run: tar -czf conan.tar -C ~/.conan .
|
run: tar -czf conan.tar.gz -C ${CONAN_HOME} .
|
||||||
- name: build dependencies
|
- name: build dependencies
|
||||||
uses: ./.github/actions/dependencies
|
uses: ./.github/actions/dependencies
|
||||||
env:
|
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
|
||||||
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
|
|
||||||
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
configuration: ${{ matrix.configuration }}
|
configuration: ${{ matrix.configuration }}
|
||||||
- name: upload archive
|
- name: upload archive
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
path: conan.tar
|
path: conan.tar.gz
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
|
||||||
test:
|
test:
|
||||||
@@ -121,26 +121,32 @@ jobs:
|
|||||||
configuration:
|
configuration:
|
||||||
- Debug
|
- Debug
|
||||||
- Release
|
- Release
|
||||||
|
include:
|
||||||
|
- compiler: gcc
|
||||||
|
compiler_version: 12
|
||||||
|
distro: ubuntu
|
||||||
|
codename: jammy
|
||||||
|
- compiler: clang
|
||||||
|
compiler_version: 16
|
||||||
|
distro: debian
|
||||||
|
codename: bookworm
|
||||||
cmake-args:
|
cmake-args:
|
||||||
-
|
-
|
||||||
- "-Dunity=ON"
|
- "-Dunity=ON"
|
||||||
needs: dependencies
|
needs: dependencies
|
||||||
runs-on: [self-hosted, heavy]
|
runs-on: [self-hosted, heavy]
|
||||||
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
|
container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }}
|
||||||
env:
|
env:
|
||||||
build_dir: .build
|
build_dir: .build
|
||||||
steps:
|
steps:
|
||||||
- name: upgrade conan
|
|
||||||
run: |
|
|
||||||
pip install --upgrade "conan<2"
|
|
||||||
- name: download cache
|
- name: download cache
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
- name: extract cache
|
- name: extract cache
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ~/.conan
|
mkdir -p ${CONAN_HOME}
|
||||||
tar -xzf conan.tar -C ~/.conan
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
- name: check environment
|
- name: check environment
|
||||||
run: |
|
run: |
|
||||||
env | sort
|
env | sort
|
||||||
@@ -148,11 +154,9 @@ jobs:
|
|||||||
conan --version
|
conan --version
|
||||||
cmake --version
|
cmake --version
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
- name: dependencies
|
- name: dependencies
|
||||||
uses: ./.github/actions/dependencies
|
uses: ./.github/actions/dependencies
|
||||||
env:
|
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
|
||||||
with:
|
with:
|
||||||
configuration: ${{ matrix.configuration }}
|
configuration: ${{ matrix.configuration }}
|
||||||
- name: build
|
- name: build
|
||||||
@@ -161,9 +165,21 @@ jobs:
|
|||||||
generator: Ninja
|
generator: Ninja
|
||||||
configuration: ${{ matrix.configuration }}
|
configuration: ${{ matrix.configuration }}
|
||||||
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
|
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
|
||||||
|
- name: check linking
|
||||||
|
run: |
|
||||||
|
cd ${build_dir}
|
||||||
|
ldd ./rippled
|
||||||
|
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
|
||||||
|
echo 'The binary is statically linked.'
|
||||||
|
else
|
||||||
|
echo 'The binary is dynamically linked.'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
- name: test
|
- name: test
|
||||||
run: |
|
run: |
|
||||||
${build_dir}/rippled --unittest --unittest-jobs $(nproc)
|
cd ${build_dir}
|
||||||
|
./rippled --unittest --unittest-jobs $(nproc)
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
|
|
||||||
reference-fee-test:
|
reference-fee-test:
|
||||||
strategy:
|
strategy:
|
||||||
@@ -180,21 +196,18 @@ jobs:
|
|||||||
- "-DUNIT_TEST_REFERENCE_FEE=1000"
|
- "-DUNIT_TEST_REFERENCE_FEE=1000"
|
||||||
needs: dependencies
|
needs: dependencies
|
||||||
runs-on: [self-hosted, heavy]
|
runs-on: [self-hosted, heavy]
|
||||||
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
|
container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
|
||||||
env:
|
env:
|
||||||
build_dir: .build
|
build_dir: .build
|
||||||
steps:
|
steps:
|
||||||
- name: upgrade conan
|
|
||||||
run: |
|
|
||||||
pip install --upgrade "conan<2"
|
|
||||||
- name: download cache
|
- name: download cache
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
- name: extract cache
|
- name: extract cache
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ~/.conan
|
mkdir -p ${CONAN_HOME}
|
||||||
tar -xzf conan.tar -C ~/.conan
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
- name: check environment
|
- name: check environment
|
||||||
run: |
|
run: |
|
||||||
env | sort
|
env | sort
|
||||||
@@ -202,11 +215,9 @@ jobs:
|
|||||||
conan --version
|
conan --version
|
||||||
cmake --version
|
cmake --version
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
- name: dependencies
|
- name: dependencies
|
||||||
uses: ./.github/actions/dependencies
|
uses: ./.github/actions/dependencies
|
||||||
env:
|
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
|
||||||
with:
|
with:
|
||||||
configuration: ${{ matrix.configuration }}
|
configuration: ${{ matrix.configuration }}
|
||||||
- name: build
|
- name: build
|
||||||
@@ -217,7 +228,9 @@ jobs:
|
|||||||
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
|
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
|
||||||
- name: test
|
- name: test
|
||||||
run: |
|
run: |
|
||||||
${build_dir}/rippled --unittest --unittest-jobs $(nproc)
|
cd ${build_dir}
|
||||||
|
./rippled --unittest --unittest-jobs $(nproc)
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
strategy:
|
strategy:
|
||||||
@@ -231,23 +244,18 @@ jobs:
|
|||||||
- Debug
|
- Debug
|
||||||
needs: dependencies
|
needs: dependencies
|
||||||
runs-on: [self-hosted, heavy]
|
runs-on: [self-hosted, heavy]
|
||||||
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
|
container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
|
||||||
env:
|
env:
|
||||||
build_dir: .build
|
build_dir: .build
|
||||||
steps:
|
steps:
|
||||||
- name: upgrade conan
|
|
||||||
run: |
|
|
||||||
pip install --upgrade "conan<2"
|
|
||||||
- name: download cache
|
- name: download cache
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
- name: extract cache
|
- name: extract cache
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ~/.conan
|
mkdir -p ${CONAN_HOME}
|
||||||
tar -xzf conan.tar -C ~/.conan
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
- name: install gcovr
|
|
||||||
run: pip install "gcovr>=7,<9"
|
|
||||||
- name: check environment
|
- name: check environment
|
||||||
run: |
|
run: |
|
||||||
echo ${PATH} | tr ':' '\n'
|
echo ${PATH} | tr ':' '\n'
|
||||||
@@ -255,13 +263,11 @@ jobs:
|
|||||||
cmake --version
|
cmake --version
|
||||||
gcovr --version
|
gcovr --version
|
||||||
env | sort
|
env | sort
|
||||||
ls ~/.conan
|
ls ${CONAN_HOME}
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
- name: dependencies
|
- name: dependencies
|
||||||
uses: ./.github/actions/dependencies
|
uses: ./.github/actions/dependencies
|
||||||
env:
|
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
|
||||||
with:
|
with:
|
||||||
configuration: ${{ matrix.configuration }}
|
configuration: ${{ matrix.configuration }}
|
||||||
- name: build
|
- name: build
|
||||||
@@ -283,7 +289,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mv "${build_dir}/coverage.xml" ./
|
mv "${build_dir}/coverage.xml" ./
|
||||||
- name: archive coverage report
|
- name: archive coverage report
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||||
with:
|
with:
|
||||||
name: coverage.xml
|
name: coverage.xml
|
||||||
path: coverage.xml
|
path: coverage.xml
|
||||||
@@ -305,22 +311,23 @@ jobs:
|
|||||||
conan:
|
conan:
|
||||||
needs: dependencies
|
needs: dependencies
|
||||||
runs-on: [self-hosted, heavy]
|
runs-on: [self-hosted, heavy]
|
||||||
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
|
container:
|
||||||
|
image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
|
||||||
env:
|
env:
|
||||||
build_dir: .build
|
build_dir: .build
|
||||||
|
platform: linux
|
||||||
|
compiler: gcc
|
||||||
|
compiler_version: 12
|
||||||
configuration: Release
|
configuration: Release
|
||||||
steps:
|
steps:
|
||||||
- name: upgrade conan
|
|
||||||
run: |
|
|
||||||
pip install --upgrade "conan<2"
|
|
||||||
- name: download cache
|
- name: download cache
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
with:
|
with:
|
||||||
name: linux-gcc-${{ env.configuration }}
|
name: ${{ env.platform }}-${{ env.compiler }}-${{ env.configuration }}
|
||||||
- name: extract cache
|
- name: extract cache
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ~/.conan
|
mkdir -p ${CONAN_HOME}
|
||||||
tar -xzf conan.tar -C ~/.conan
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
- name: check environment
|
- name: check environment
|
||||||
run: |
|
run: |
|
||||||
env | sort
|
env | sort
|
||||||
@@ -328,116 +335,88 @@ jobs:
|
|||||||
conan --version
|
conan --version
|
||||||
cmake --version
|
cmake --version
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
- name: dependencies
|
- name: dependencies
|
||||||
uses: ./.github/actions/dependencies
|
uses: ./.github/actions/dependencies
|
||||||
env:
|
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
|
||||||
with:
|
with:
|
||||||
configuration: ${{ env.configuration }}
|
configuration: ${{ env.configuration }}
|
||||||
- name: export
|
- name: export
|
||||||
run: |
|
run: |
|
||||||
version=$(conan inspect --raw version .)
|
conan export . --version head
|
||||||
reference="xrpl/${version}@local/test"
|
|
||||||
conan remove -f ${reference} || true
|
|
||||||
conan export . local/test
|
|
||||||
echo "reference=${reference}" >> "${GITHUB_ENV}"
|
|
||||||
- name: build
|
- name: build
|
||||||
run: |
|
run: |
|
||||||
cd tests/conan
|
cd tests/conan
|
||||||
mkdir ${build_dir}
|
mkdir ${build_dir} && cd ${build_dir}
|
||||||
cd ${build_dir}
|
conan install .. \
|
||||||
conan install .. --output-folder . \
|
--settings:all build_type=${configuration} \
|
||||||
--require-override ${reference} --build missing
|
--output-folder . \
|
||||||
|
--build missing
|
||||||
cmake .. \
|
cmake .. \
|
||||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=./build/${configuration}/generators/conan_toolchain.cmake \
|
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=./build/${configuration}/generators/conan_toolchain.cmake \
|
||||||
-DCMAKE_BUILD_TYPE=${configuration}
|
-DCMAKE_BUILD_TYPE=${configuration}
|
||||||
cmake --build .
|
cmake --build .
|
||||||
./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
|
./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
|
||||||
|
|
||||||
# NOTE we are not using dependencies built above because it lags with
|
|
||||||
# compiler versions. Instrumentation requires clang version 16 or
|
|
||||||
# later
|
|
||||||
|
|
||||||
instrumentation-build:
|
instrumentation-build:
|
||||||
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
needs: dependencies
|
||||||
env:
|
|
||||||
CLANG_RELEASE: 16
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
runs-on: [self-hosted, heavy]
|
runs-on: [self-hosted, heavy]
|
||||||
container: debian:bookworm
|
container: ghcr.io/xrplf/ci/debian-bookworm:clang-16
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
steps:
|
steps:
|
||||||
- name: install prerequisites
|
- name: download cache
|
||||||
env:
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
DEBIAN_FRONTEND: noninteractive
|
with:
|
||||||
run: |
|
name: linux-clang-Debug
|
||||||
apt-get update
|
|
||||||
apt-get install --yes --no-install-recommends \
|
|
||||||
clang-${CLANG_RELEASE} clang++-${CLANG_RELEASE} \
|
|
||||||
python3-pip python-is-python3 make cmake git wget
|
|
||||||
apt-get clean
|
|
||||||
update-alternatives --install \
|
|
||||||
/usr/bin/clang clang /usr/bin/clang-${CLANG_RELEASE} 100 \
|
|
||||||
--slave /usr/bin/clang++ clang++ /usr/bin/clang++-${CLANG_RELEASE}
|
|
||||||
update-alternatives --auto clang
|
|
||||||
pip install --no-cache --break-system-packages "conan<2"
|
|
||||||
|
|
||||||
- name: checkout
|
- name: extract cache
|
||||||
uses: actions/checkout@v4
|
run: |
|
||||||
|
mkdir -p ${CONAN_HOME}
|
||||||
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
|
|
||||||
- name: prepare environment
|
- name: check environment
|
||||||
run: |
|
run: |
|
||||||
mkdir ${GITHUB_WORKSPACE}/.build
|
echo ${PATH} | tr ':' '\n'
|
||||||
echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV
|
conan --version
|
||||||
echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV
|
cmake --version
|
||||||
echo "CC=/usr/bin/clang" >> $GITHUB_ENV
|
env | sort
|
||||||
echo "CXX=/usr/bin/clang++" >> $GITHUB_ENV
|
ls ${CONAN_HOME}
|
||||||
|
|
||||||
- name: configure Conan
|
- name: checkout
|
||||||
run: |
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
conan profile new --detect default
|
|
||||||
conan profile update settings.compiler=clang default
|
|
||||||
conan profile update settings.compiler.version=${CLANG_RELEASE} default
|
|
||||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
|
||||||
conan profile update settings.compiler.cppstd=20 default
|
|
||||||
conan profile update options.rocksdb=False default
|
|
||||||
conan profile update \
|
|
||||||
'conf.tools.build:compiler_executables={"c": "/usr/bin/clang", "cpp": "/usr/bin/clang++"}' default
|
|
||||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default
|
|
||||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
|
|
||||||
conan export external/snappy snappy/1.1.10@
|
|
||||||
conan export external/soci soci/4.0.3@
|
|
||||||
|
|
||||||
- name: build dependencies
|
- name: dependencies
|
||||||
run: |
|
uses: ./.github/actions/dependencies
|
||||||
cd ${BUILD_DIR}
|
with:
|
||||||
conan install ${SOURCE_DIR} \
|
configuration: Debug
|
||||||
--output-folder ${BUILD_DIR} \
|
|
||||||
--install-folder ${BUILD_DIR} \
|
|
||||||
--build missing \
|
|
||||||
--settings build_type=Debug
|
|
||||||
|
|
||||||
- name: build with instrumentation
|
- name: prepare environment
|
||||||
run: |
|
run: |
|
||||||
cd ${BUILD_DIR}
|
mkdir -p ${build_dir}
|
||||||
cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \
|
echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV
|
||||||
-Dvoidstar=ON \
|
echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV
|
||||||
-Dtests=ON \
|
|
||||||
-Dxrpld=ON \
|
|
||||||
-DCMAKE_BUILD_TYPE=Debug \
|
|
||||||
-DSECP256K1_BUILD_BENCHMARK=OFF \
|
|
||||||
-DSECP256K1_BUILD_TESTS=OFF \
|
|
||||||
-DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \
|
|
||||||
-DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake
|
|
||||||
cmake --build . --parallel $(nproc)
|
|
||||||
|
|
||||||
- name: verify instrumentation enabled
|
- name: build with instrumentation
|
||||||
run: |
|
run: |
|
||||||
cd ${BUILD_DIR}
|
cd ${BUILD_DIR}
|
||||||
./rippled --version | grep libvoidstar
|
cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \
|
||||||
|
-Dvoidstar=ON \
|
||||||
|
-Dtests=ON \
|
||||||
|
-Dxrpld=ON \
|
||||||
|
-DCMAKE_BUILD_TYPE=Debug \
|
||||||
|
-DSECP256K1_BUILD_BENCHMARK=OFF \
|
||||||
|
-DSECP256K1_BUILD_TESTS=OFF \
|
||||||
|
-DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \
|
||||||
|
-DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake
|
||||||
|
cmake --build . --parallel $(nproc)
|
||||||
|
|
||||||
- name: run unit tests
|
- name: verify instrumentation enabled
|
||||||
run: |
|
run: |
|
||||||
cd ${BUILD_DIR}
|
cd ${BUILD_DIR}
|
||||||
./rippled -u --unittest-jobs $(( $(nproc)/4 ))
|
./rippled --version | grep libvoidstar
|
||||||
|
|
||||||
|
- name: run unit tests
|
||||||
|
run: |
|
||||||
|
cd ${BUILD_DIR}
|
||||||
|
./rippled -u --unittest-jobs $(( $(nproc)/4 ))
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
|
|||||||
51
.github/workflows/windows.yml
vendored
51
.github/workflows/windows.yml
vendored
@@ -12,15 +12,27 @@ on:
|
|||||||
- release
|
- release
|
||||||
- master
|
- master
|
||||||
# Branches that opt-in to running
|
# Branches that opt-in to running
|
||||||
- 'ci/**'
|
- "ci/**"
|
||||||
|
|
||||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
env:
|
||||||
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
|
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
# This part of the Conan configuration is specific to this workflow only; we
|
||||||
|
# do not want to pollute the 'conan/profiles' directory with settings that
|
||||||
|
# might not work for other workflows.
|
||||||
|
CONAN_GLOBAL_CONF: |
|
||||||
|
core.download:parallel={{os.cpu_count()}}
|
||||||
|
core.upload:parallel={{os.cpu_count()}}
|
||||||
|
tools.build:jobs=24
|
||||||
|
tools.build:verbosity=verbose
|
||||||
|
tools.compilation:verbosity=verbose
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
test:
|
test:
|
||||||
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
strategy:
|
strategy:
|
||||||
@@ -42,11 +54,11 @@ jobs:
|
|||||||
build_dir: .build
|
build_dir: .build
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
- name: choose Python
|
- name: choose Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: 3.13
|
||||||
- name: learn Python cache directory
|
- name: learn Python cache directory
|
||||||
id: pip-cache
|
id: pip-cache
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -54,12 +66,12 @@ jobs:
|
|||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT}
|
echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT}
|
||||||
- name: restore Python cache directory
|
- name: restore Python cache directory
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684
|
||||||
with:
|
with:
|
||||||
path: ${{ steps.pip-cache.outputs.dir }}
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }}
|
key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }}
|
||||||
- name: install Conan
|
- name: install Conan
|
||||||
run: pip install wheel 'conan<2'
|
run: pip install wheel conan
|
||||||
- name: check environment
|
- name: check environment
|
||||||
run: |
|
run: |
|
||||||
dir env:
|
dir env:
|
||||||
@@ -70,30 +82,25 @@ jobs:
|
|||||||
- name: configure Conan
|
- name: configure Conan
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
conan profile new default --detect
|
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
|
||||||
conan profile update settings.compiler.cppstd=20 default
|
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||||
conan profile update \
|
conan profile show
|
||||||
settings.compiler.runtime=MT${{ matrix.configuration.runtime }} \
|
|
||||||
default
|
|
||||||
- name: build dependencies
|
- name: build dependencies
|
||||||
uses: ./.github/actions/dependencies
|
uses: ./.github/actions/dependencies
|
||||||
env:
|
|
||||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
|
||||||
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
|
|
||||||
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
configuration: ${{ matrix.configuration.type }}
|
configuration: ${{ matrix.configuration.type }}
|
||||||
- name: build
|
- name: build
|
||||||
uses: ./.github/actions/build
|
uses: ./.github/actions/build
|
||||||
with:
|
with:
|
||||||
generator: '${{ matrix.version.generator }}'
|
generator: "${{ matrix.version.generator }}"
|
||||||
configuration: ${{ matrix.configuration.type }}
|
configuration: ${{ matrix.configuration.type }}
|
||||||
# Hard code for now. Move to the matrix if varied options are needed
|
# Hard code for now. Move to the matrix if varied options are needed
|
||||||
cmake-args: '-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON'
|
cmake-args: "-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON"
|
||||||
cmake-target: install
|
cmake-target: install
|
||||||
- name: test
|
- name: test
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ matrix.configuration.tests }}
|
if: ${{ matrix.configuration.tests }}
|
||||||
run: |
|
run: |
|
||||||
${build_dir}/${{ matrix.configuration.type }}/rippled --unittest \
|
cd ${build_dir}/${{ matrix.configuration.type }}
|
||||||
--unittest-jobs $(nproc)
|
./rippled --unittest --unittest-jobs $(nproc)
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# .pre-commit-config.yaml
|
# .pre-commit-config.yaml
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||||
rev: v18.1.3
|
rev: v18.1.8
|
||||||
hooks:
|
hooks:
|
||||||
- id: clang-format
|
- id: clang-format
|
||||||
|
|||||||
607
BUILD.md
607
BUILD.md
@@ -3,29 +3,29 @@
|
|||||||
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
|
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
|
||||||
|
|
||||||
> These instructions also assume a basic familiarity with Conan and CMake.
|
> These instructions also assume a basic familiarity with Conan and CMake.
|
||||||
> If you are unfamiliar with Conan,
|
> If you are unfamiliar with Conan, you can read our
|
||||||
> you can read our [crash course](./docs/build/conan.md)
|
> [crash course](./docs/build/conan.md) or the official [Getting Started][3]
|
||||||
> or the official [Getting Started][3] walkthrough.
|
> walkthrough.
|
||||||
|
|
||||||
## Branches
|
## Branches
|
||||||
|
|
||||||
For a stable release, choose the `master` branch or one of the [tagged
|
For a stable release, choose the `master` branch or one of the [tagged
|
||||||
releases](https://github.com/ripple/rippled/releases).
|
releases](https://github.com/ripple/rippled/releases).
|
||||||
|
|
||||||
```
|
```bash
|
||||||
git checkout master
|
git checkout master
|
||||||
```
|
```
|
||||||
|
|
||||||
For the latest release candidate, choose the `release` branch.
|
For the latest release candidate, choose the `release` branch.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
git checkout release
|
git checkout release
|
||||||
```
|
```
|
||||||
|
|
||||||
For the latest set of untested features, or to contribute, choose the `develop`
|
For the latest set of untested features, or to contribute, choose the `develop`
|
||||||
branch.
|
branch.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
git checkout develop
|
git checkout develop
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -33,176 +33,323 @@ git checkout develop
|
|||||||
|
|
||||||
See [System Requirements](https://xrpl.org/system-requirements.html).
|
See [System Requirements](https://xrpl.org/system-requirements.html).
|
||||||
|
|
||||||
Building rippled generally requires git, Python, Conan, CMake, and a C++ compiler. Some guidance on setting up such a [C++ development environment can be found here](./docs/build/environment.md).
|
Building rippled generally requires git, Python, Conan, CMake, and a C++
|
||||||
|
compiler. Some guidance on setting up such a [C++ development environment can be
|
||||||
|
found here](./docs/build/environment.md).
|
||||||
|
|
||||||
- [Python 3.7](https://www.python.org/downloads/)
|
- [Python 3.11](https://www.python.org/downloads/), or higher
|
||||||
- [Conan 1.60](https://conan.io/downloads.html)[^1]
|
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
|
||||||
- [CMake 3.16](https://cmake.org/download/)
|
- [CMake 3.22](https://cmake.org/download/)[^2], or higher
|
||||||
|
|
||||||
[^1]: It is possible to build with Conan 2.x,
|
[^1]:
|
||||||
but the instructions are significantly different,
|
It is possible to build with Conan 1.60+, but the instructions are
|
||||||
which is why we are not recommending it yet.
|
significantly different, which is why we are not recommending it.
|
||||||
Notably, the `conan profile update` command is removed in 2.x.
|
|
||||||
Profiles must be edited by hand.
|
[^2]:
|
||||||
|
CMake 4 is not yet supported by all dependencies required by this project.
|
||||||
|
If you are affected by this issue, follow [conan workaround for cmake
|
||||||
|
4](#workaround-for-cmake-4)
|
||||||
|
|
||||||
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
|
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||||
The [minimum compiler versions][2] required are:
|
The [minimum compiler versions][2] required are:
|
||||||
|
|
||||||
| Compiler | Version |
|
| Compiler | Version |
|
||||||
|-------------|---------|
|
| ----------- | --------- |
|
||||||
| GCC | 11 |
|
| GCC | 12 |
|
||||||
| Clang | 13 |
|
| Clang | 16 |
|
||||||
| Apple Clang | 13.1.6 |
|
| Apple Clang | 16 |
|
||||||
| MSVC | 19.23 |
|
| MSVC | 19.44[^3] |
|
||||||
|
|
||||||
### Linux
|
### Linux
|
||||||
|
|
||||||
The Ubuntu operating system has received the highest level of
|
The Ubuntu Linux distribution has received the highest level of quality
|
||||||
quality assurance, testing, and support.
|
assurance, testing, and support. We also support Red Hat and use Debian
|
||||||
|
internally.
|
||||||
|
|
||||||
Here are [sample instructions for setting up a C++ development environment on Linux](./docs/build/environment.md#linux).
|
Here are [sample instructions for setting up a C++ development environment on
|
||||||
|
Linux](./docs/build/environment.md#linux).
|
||||||
|
|
||||||
### Mac
|
### Mac
|
||||||
|
|
||||||
Many rippled engineers use macOS for development.
|
Many rippled engineers use macOS for development.
|
||||||
|
|
||||||
Here are [sample instructions for setting up a C++ development environment on macOS](./docs/build/environment.md#macos).
|
Here are [sample instructions for setting up a C++ development environment on
|
||||||
|
macOS](./docs/build/environment.md#macos).
|
||||||
|
|
||||||
### Windows
|
### Windows
|
||||||
|
|
||||||
Windows is not recommended for production use at this time.
|
Windows is used by some engineers for development only.
|
||||||
|
|
||||||
- Additionally, 32-bit Windows development is not supported.
|
[^3]: Windows is not recommended for production use.
|
||||||
|
|
||||||
[Boost]: https://www.boost.org/
|
|
||||||
|
|
||||||
## Steps
|
## Steps
|
||||||
|
|
||||||
### Set Up Conan
|
### Set Up Conan
|
||||||
|
|
||||||
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python, Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
|
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python,
|
||||||
|
Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
|
||||||
|
|
||||||
These instructions assume a basic familiarity with Conan and CMake.
|
These instructions assume a basic familiarity with Conan and CMake. If you are
|
||||||
|
unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official
|
||||||
|
[Getting Started][3] walkthrough.
|
||||||
|
|
||||||
If you are unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official [Getting Started][3] walkthrough.
|
#### Default profile
|
||||||
|
|
||||||
You'll need at least one Conan profile:
|
We recommend that you import the provided `conan/profiles/default` profile:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
conan profile new default --detect
|
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||||
```
|
|
||||||
|
|
||||||
Update the compiler settings:
|
|
||||||
|
|
||||||
```
|
|
||||||
conan profile update settings.compiler.cppstd=20 default
|
|
||||||
```
|
|
||||||
|
|
||||||
Configure Conan (1.x only) to use recipe revisions:
|
|
||||||
|
|
||||||
```
|
|
||||||
conan config set general.revisions_enabled=1
|
|
||||||
```
|
|
||||||
|
|
||||||
**Linux** developers will commonly have a default Conan [profile][] that compiles
|
|
||||||
with GCC and links with libstdc++.
|
|
||||||
If you are linking with libstdc++ (see profile setting `compiler.libcxx`),
|
|
||||||
then you will need to choose the `libstdc++11` ABI:
|
|
||||||
|
|
||||||
```
|
|
||||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
Ensure inter-operability between `boost::string_view` and `std::string_view` types:
|
|
||||||
|
|
||||||
```
|
|
||||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_BEAST_USE_STD_STRING_VIEW"]' default
|
|
||||||
conan profile update 'env.CXXFLAGS="-DBOOST_BEAST_USE_STD_STRING_VIEW"' default
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have other flags in the `conf.tools.build` or `env.CXXFLAGS` sections, make sure to retain the existing flags and append the new ones. You can check them with:
|
You can check your Conan profile by running:
|
||||||
```
|
|
||||||
conan profile show default
|
```bash
|
||||||
|
conan profile show
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Custom profile
|
||||||
|
|
||||||
**Windows** developers may need to use the x64 native build tools.
|
If the default profile does not work for you and you do not yet have a Conan
|
||||||
An easy way to do that is to run the shortcut "x64 Native Tools Command
|
profile, you can create one by running:
|
||||||
Prompt" for the version of Visual Studio that you have installed.
|
|
||||||
|
|
||||||
Windows developers must also build `rippled` and its dependencies for the x64
|
```bash
|
||||||
architecture:
|
conan profile detect
|
||||||
|
|
||||||
```
|
|
||||||
conan profile update settings.arch=x86_64 default
|
|
||||||
```
|
|
||||||
|
|
||||||
### Multiple compilers
|
|
||||||
|
|
||||||
When `/usr/bin/g++` exists on a platform, it is the default cpp compiler. This
|
|
||||||
default works for some users.
|
|
||||||
|
|
||||||
However, if this compiler cannot build rippled or its dependencies, then you can
|
|
||||||
install another compiler and set Conan and CMake to use it.
|
|
||||||
Update the `conf.tools.build:compiler_executables` setting in order to set the correct variables (`CMAKE_<LANG>_COMPILER`) in the
|
|
||||||
generated CMake toolchain file.
|
|
||||||
For example, on Ubuntu 20, you may have gcc at `/usr/bin/gcc` and g++ at `/usr/bin/g++`; if that is the case, you can select those compilers with:
|
|
||||||
```
|
|
||||||
conan profile update 'conf.tools.build:compiler_executables={"c": "/usr/bin/gcc", "cpp": "/usr/bin/g++"}' default
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Replace `/usr/bin/gcc` and `/usr/bin/g++` with paths to the desired compilers.
|
You may need to make changes to the profile to suit your environment. You can
|
||||||
|
refer to the provided `conan/profiles/default` profile for inspiration, and you
|
||||||
|
may also need to apply the required [tweaks](#conan-profile-tweaks) to this
|
||||||
|
default profile.
|
||||||
|
|
||||||
It should choose the compiler for dependencies as well,
|
### Patched recipes
|
||||||
but not all of them have a Conan recipe that respects this setting (yet).
|
|
||||||
For the rest, you can set these environment variables.
|
|
||||||
Replace `<path>` with paths to the desired compilers:
|
|
||||||
|
|
||||||
- `conan profile update env.CC=<path> default`
|
The recipes in Conan Center occasionally need to be patched for compatibility
|
||||||
- `conan profile update env.CXX=<path> default`
|
with the latest version of `rippled`. We maintain a fork of the Conan Center
|
||||||
|
[here](https://github.com/XRPLF/conan-center-index/) containing the patches.
|
||||||
|
|
||||||
Export our [Conan recipe for Snappy](./external/snappy).
|
To ensure our patched recipes are used, you must add our Conan remote at a
|
||||||
It does not explicitly link the C++ standard library,
|
higher index than the default Conan Center remote, so it is consulted first. You
|
||||||
which allows you to statically link it with GCC, if you want.
|
can do this by running:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
# Conan 1.x
|
conan remote add --index 0 xrplf "https://conan.ripplex.io"
|
||||||
conan export external/snappy snappy/1.1.10@
|
```
|
||||||
# Conan 2.x
|
|
||||||
conan export --version 1.1.10 external/snappy
|
|
||||||
```
|
|
||||||
|
|
||||||
Export our [Conan recipe for RocksDB](./external/rocksdb).
|
Alternatively, you can pull the patched recipes into the repository and use them
|
||||||
It does not override paths to dependencies when building with Visual Studio.
|
locally:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
# Conan 1.x
|
cd external
|
||||||
conan export external/rocksdb rocksdb/9.7.3@
|
git init
|
||||||
# Conan 2.x
|
git remote add origin git@github.com:XRPLF/conan-center-index.git
|
||||||
conan export --version 9.7.3 external/rocksdb
|
git sparse-checkout init
|
||||||
```
|
git sparse-checkout set recipes/snappy
|
||||||
|
git sparse-checkout add recipes/soci
|
||||||
|
git fetch origin master
|
||||||
|
git checkout master
|
||||||
|
conan export --version 1.1.10 recipes/snappy/all
|
||||||
|
conan export --version 4.0.3 recipes/soci/all
|
||||||
|
rm -rf .git
|
||||||
|
```
|
||||||
|
|
||||||
Export our [Conan recipe for SOCI](./external/soci).
|
In the case we switch to a newer version of a dependency that still requires a
|
||||||
It patches their CMake to correctly import its dependencies.
|
patch, it will be necessary for you to pull in the changes and re-export the
|
||||||
|
updated dependencies with the newer version. However, if we switch to a newer
|
||||||
|
version that no longer requires a patch, no action is required on your part, as
|
||||||
|
the new recipe will be automatically pulled from the official Conan Center.
|
||||||
|
|
||||||
```
|
### Conan profile tweaks
|
||||||
# Conan 1.x
|
|
||||||
conan export external/soci soci/4.0.3@
|
|
||||||
# Conan 2.x
|
|
||||||
conan export --version 4.0.3 external/soci
|
|
||||||
```
|
|
||||||
|
|
||||||
Export our [Conan recipe for NuDB](./external/nudb).
|
#### Missing compiler version
|
||||||
It fixes some source files to add missing `#include`s.
|
|
||||||
|
|
||||||
|
If you see an error similar to the following after running `conan profile show`:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
# Conan 1.x
|
ERROR: Invalid setting '17' is not a valid 'settings.compiler.version' value.
|
||||||
conan export external/nudb nudb/2.0.8@
|
Possible values are ['5.0', '5.1', '6.0', '6.1', '7.0', '7.3', '8.0', '8.1',
|
||||||
# Conan 2.x
|
'9.0', '9.1', '10.0', '11.0', '12.0', '13', '13.0', '13.1', '14', '14.0', '15',
|
||||||
conan export --version 2.0.8 external/nudb
|
'15.0', '16', '16.0']
|
||||||
```
|
Read "http://docs.conan.io/2/knowledge/faq.html#error-invalid-setting"
|
||||||
|
```
|
||||||
|
|
||||||
|
you need to amend the list of compiler versions in
|
||||||
|
`$(conan config home)/settings.yml`, by appending the required version number(s)
|
||||||
|
to the `version` array specific for your compiler. For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apple-clang:
|
||||||
|
version:
|
||||||
|
[
|
||||||
|
"5.0",
|
||||||
|
"5.1",
|
||||||
|
"6.0",
|
||||||
|
"6.1",
|
||||||
|
"7.0",
|
||||||
|
"7.3",
|
||||||
|
"8.0",
|
||||||
|
"8.1",
|
||||||
|
"9.0",
|
||||||
|
"9.1",
|
||||||
|
"10.0",
|
||||||
|
"11.0",
|
||||||
|
"12.0",
|
||||||
|
"13",
|
||||||
|
"13.0",
|
||||||
|
"13.1",
|
||||||
|
"14",
|
||||||
|
"14.0",
|
||||||
|
"15",
|
||||||
|
"15.0",
|
||||||
|
"16",
|
||||||
|
"16.0",
|
||||||
|
"17",
|
||||||
|
"17.0",
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multiple compilers
|
||||||
|
|
||||||
|
If you have multiple compilers installed, make sure to select the one to use in
|
||||||
|
your default Conan configuration **before** running `conan profile detect`, by
|
||||||
|
setting the `CC` and `CXX` environment variables.
|
||||||
|
|
||||||
|
For example, if you are running MacOS and have [homebrew
|
||||||
|
LLVM@18](https://formulae.brew.sh/formula/llvm@18), and want to use it as a
|
||||||
|
compiler in the new Conan profile:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export CC=$(brew --prefix llvm@18)/bin/clang
|
||||||
|
export CXX=$(brew --prefix llvm@18)/bin/clang++
|
||||||
|
conan profile detect
|
||||||
|
```
|
||||||
|
|
||||||
|
You should also explicitly set the path to the compiler in the profile file,
|
||||||
|
which helps to avoid errors when `CC` and/or `CXX` are set and disagree with the
|
||||||
|
selected Conan profile. For example:
|
||||||
|
|
||||||
|
```text
|
||||||
|
[conf]
|
||||||
|
tools.build:compiler_executables={'c':'/usr/bin/gcc','cpp':'/usr/bin/g++'}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multiple profiles
|
||||||
|
|
||||||
|
You can manage multiple Conan profiles in the directory
|
||||||
|
`$(conan config home)/profiles`, for example renaming `default` to a different
|
||||||
|
name and then creating a new `default` profile for a different compiler.
|
||||||
|
|
||||||
|
#### Select language
|
||||||
|
|
||||||
|
The default profile created by Conan will typically select different C++ dialect
|
||||||
|
than C++20 used by this project. You should set `20` in the profile line
|
||||||
|
starting with `compiler.cppstd=`. For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sed -i.bak -e 's|^compiler\.cppstd=.*$|compiler.cppstd=20|' $(conan config home)/profiles/default
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Select standard library in Linux
|
||||||
|
|
||||||
|
**Linux** developers will commonly have a default Conan [profile][] that
|
||||||
|
compiles with GCC and links with libstdc++. If you are linking with libstdc++
|
||||||
|
(see profile setting `compiler.libcxx`), then you will need to choose the
|
||||||
|
`libstdc++11` ABI:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sed -i.bak -e 's|^compiler\.libcxx=.*$|compiler.libcxx=libstdc++11|' $(conan config home)/profiles/default
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Select architecture and runtime in Windows
|
||||||
|
|
||||||
|
**Windows** developers may need to use the x64 native build tools. An easy way
|
||||||
|
to do that is to run the shortcut "x64 Native Tools Command Prompt" for the
|
||||||
|
version of Visual Studio that you have installed.
|
||||||
|
|
||||||
|
Windows developers must also build `rippled` and its dependencies for the x64
|
||||||
|
architecture:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
|
||||||
|
```
|
||||||
|
|
||||||
|
**Windows** developers also must select static runtime:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Workaround for CMake 4
|
||||||
|
|
||||||
|
If your system CMake is version 4 rather than 3, you may have to configure Conan
|
||||||
|
profile to use CMake version 3 for dependencies, by adding the following two
|
||||||
|
lines to your profile:
|
||||||
|
|
||||||
|
```text
|
||||||
|
[tool_requires]
|
||||||
|
!cmake/*: cmake/[>=3 <4]
|
||||||
|
```
|
||||||
|
|
||||||
|
This will force Conan to download and use a locally cached CMake 3 version, and
|
||||||
|
is needed because some of the dependencies used by this project do not support
|
||||||
|
CMake 4.
|
||||||
|
|
||||||
|
#### Clang workaround for grpc
|
||||||
|
|
||||||
|
If your compiler is clang, version 19 or later, or apple-clang, version 17 or
|
||||||
|
later, you may encounter a compilation error while building the `grpc`
|
||||||
|
dependency:
|
||||||
|
|
||||||
|
```text
|
||||||
|
In file included from .../lib/promise/try_seq.h:26:
|
||||||
|
.../lib/promise/detail/basic_seq.h:499:38: error: a template argument list is expected after a name prefixed by the template keyword [-Wmissing-template-arg-list-after-template-kw]
|
||||||
|
499 | Traits::template CallSeqFactory(f_, *cur_, std::move(arg)));
|
||||||
|
| ^
|
||||||
|
```
|
||||||
|
|
||||||
|
The workaround for this error is to add two lines to profile:
|
||||||
|
|
||||||
|
```text
|
||||||
|
[conf]
|
||||||
|
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Workaround for gcc 12
|
||||||
|
|
||||||
|
If your compiler is gcc, version 12, and you have enabled `werr` option, you may
|
||||||
|
encounter a compilation error such as:
|
||||||
|
|
||||||
|
```text
|
||||||
|
/usr/include/c++/12/bits/char_traits.h:435:56: error: 'void* __builtin_memcpy(void*, const void*, long unsigned int)' accessing 9223372036854775810 or more bytes at offsets [2, 9223372036854775807] and 1 may overlap up to 9223372036854775813 bytes at offset -3 [-Werror=restrict]
|
||||||
|
435 | return static_cast<char_type*>(__builtin_memcpy(__s1, __s2, __n));
|
||||||
|
| ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~
|
||||||
|
cc1plus: all warnings being treated as errors
|
||||||
|
```
|
||||||
|
|
||||||
|
The workaround for this error is to add two lines to your profile:
|
||||||
|
|
||||||
|
```text
|
||||||
|
[conf]
|
||||||
|
tools.build:cxxflags=['-Wno-restrict']
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Workaround for clang 16
|
||||||
|
|
||||||
|
If your compiler is clang, version 16, you may encounter compilation error such
|
||||||
|
as:
|
||||||
|
|
||||||
|
```text
|
||||||
|
In file included from .../boost/beast/websocket/stream.hpp:2857:
|
||||||
|
.../boost/beast/websocket/impl/read.hpp:695:17: error: call to 'async_teardown' is ambiguous
|
||||||
|
async_teardown(impl.role, impl.stream(),
|
||||||
|
^~~~~~~~~~~~~~
|
||||||
|
```
|
||||||
|
|
||||||
|
The workaround for this error is to add two lines to your profile:
|
||||||
|
|
||||||
|
```text
|
||||||
|
[conf]
|
||||||
|
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||||
|
```
|
||||||
|
|
||||||
### Build and Test
|
### Build and Test
|
||||||
|
|
||||||
@@ -224,66 +371,65 @@ It fixes some source files to add missing `#include`s.
|
|||||||
|
|
||||||
2. Use conan to generate CMake files for every configuration you want to build:
|
2. Use conan to generate CMake files for every configuration you want to build:
|
||||||
|
|
||||||
```
|
```
|
||||||
conan install .. --output-folder . --build missing --settings build_type=Release
|
conan install .. --output-folder . --build missing --settings build_type=Release
|
||||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||||
```
|
```
|
||||||
|
|
||||||
To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug`
|
To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug`
|
||||||
|
|
||||||
For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`,
|
For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`,
|
||||||
you only need to run this command once.
|
you only need to run this command once.
|
||||||
For a multi-configuration generator, e.g. `Visual Studio`, you may want to
|
For a multi-configuration generator, e.g. `Visual Studio`, you may want to
|
||||||
run it more than once.
|
run it more than once.
|
||||||
|
|
||||||
Each of these commands should also have a different `build_type` setting.
|
Each of these commands should also have a different `build_type` setting.
|
||||||
A second command with the same `build_type` setting will overwrite the files
|
A second command with the same `build_type` setting will overwrite the files
|
||||||
generated by the first. You can pass the build type on the command line with
|
generated by the first. You can pass the build type on the command line with
|
||||||
`--settings build_type=$BUILD_TYPE` or in the profile itself,
|
`--settings build_type=$BUILD_TYPE` or in the profile itself,
|
||||||
under the section `[settings]` with the key `build_type`.
|
under the section `[settings]` with the key `build_type`.
|
||||||
|
|
||||||
If you are using a Microsoft Visual C++ compiler,
|
If you are using a Microsoft Visual C++ compiler,
|
||||||
then you will need to ensure consistency between the `build_type` setting
|
then you will need to ensure consistency between the `build_type` setting
|
||||||
and the `compiler.runtime` setting.
|
and the `compiler.runtime` setting.
|
||||||
|
|
||||||
When `build_type` is `Release`, `compiler.runtime` should be `MT`.
|
When `build_type` is `Release`, `compiler.runtime` should be `MT`.
|
||||||
|
|
||||||
When `build_type` is `Debug`, `compiler.runtime` should be `MTd`.
|
When `build_type` is `Debug`, `compiler.runtime` should be `MTd`.
|
||||||
|
|
||||||
```
|
```
|
||||||
conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT
|
conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT
|
||||||
conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd
|
conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Configure CMake and pass the toolchain file generated by Conan, located at
|
3. Configure CMake and pass the toolchain file generated by Conan, located at
|
||||||
`$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`.
|
`$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`.
|
||||||
|
|
||||||
Single-config generators:
|
Single-config generators:
|
||||||
|
|
||||||
Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type]
|
Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type]
|
||||||
and make sure it matches the one of the `build_type` settings
|
and make sure it matches the one of the `build_type` settings
|
||||||
you chose in the previous step.
|
you chose in the previous step.
|
||||||
|
|
||||||
For example, to build Debug, in the next command, replace "Release" with "Debug"
|
For example, to build Debug, in the next command, replace "Release" with "Debug"
|
||||||
|
|
||||||
```
|
```
|
||||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON ..
|
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON ..
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Multi-config generators:
|
||||||
|
|
||||||
Multi-config generators:
|
```
|
||||||
|
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -Dxrpld=ON -Dtests=ON ..
|
||||||
|
```
|
||||||
|
|
||||||
```
|
**Note:** You can pass build options for `rippled` in this step.
|
||||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -Dxrpld=ON -Dtests=ON ..
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** You can pass build options for `rippled` in this step.
|
4. Build `rippled`.
|
||||||
|
|
||||||
5. Build `rippled`.
|
|
||||||
|
|
||||||
For a single-configuration generator, it will build whatever configuration
|
For a single-configuration generator, it will build whatever configuration
|
||||||
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator,
|
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, you
|
||||||
you must pass the option `--config` to select the build configuration.
|
must pass the option `--config` to select the build configuration.
|
||||||
|
|
||||||
Single-config generators:
|
Single-config generators:
|
||||||
|
|
||||||
@@ -298,24 +444,27 @@ It fixes some source files to add missing `#include`s.
|
|||||||
cmake --build . --config Debug
|
cmake --build . --config Debug
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Test rippled.
|
5. Test rippled.
|
||||||
|
|
||||||
Single-config generators:
|
Single-config generators:
|
||||||
|
|
||||||
```
|
```
|
||||||
./rippled --unittest
|
./rippled --unittest --unittest-jobs N
|
||||||
```
|
```
|
||||||
|
|
||||||
Multi-config generators:
|
Multi-config generators:
|
||||||
|
|
||||||
```
|
```
|
||||||
./Release/rippled --unittest
|
./Release/rippled --unittest --unittest-jobs N
|
||||||
./Debug/rippled --unittest
|
./Debug/rippled --unittest --unittest-jobs N
|
||||||
```
|
```
|
||||||
|
|
||||||
The location of `rippled` in your build directory depends on your CMake
|
Replace the `--unittest-jobs` parameter N with the desired unit tests
|
||||||
generator. Pass `--help` to see the rest of the command line options.
|
concurrency. Recommended setting is half of the number of available CPU
|
||||||
|
cores.
|
||||||
|
|
||||||
|
The location of `rippled` binary in your build directory depends on your
|
||||||
|
CMake generator. Pass `--help` to see the rest of the command line options.
|
||||||
|
|
||||||
## Coverage report
|
## Coverage report
|
||||||
|
|
||||||
@@ -356,7 +505,7 @@ variable in `cmake`. The specific command line used to run the `gcovr` tool will
|
|||||||
displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
|
displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
|
||||||
|
|
||||||
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
|
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
|
||||||
set to the number of available CPU cores. This may cause spurious test
|
set to the number of available CPU cores. This may cause spurious test
|
||||||
errors on Apple. Developers can override the number of unit test jobs with
|
errors on Apple. Developers can override the number of unit test jobs with
|
||||||
the `coverage_test_parallelism` variable in `cmake`.
|
the `coverage_test_parallelism` variable in `cmake`.
|
||||||
|
|
||||||
@@ -372,45 +521,56 @@ cmake --build . --target coverage
|
|||||||
After the `coverage` target is completed, the generated coverage report will be
|
After the `coverage` target is completed, the generated coverage report will be
|
||||||
stored inside the build directory, as either of:
|
stored inside the build directory, as either of:
|
||||||
|
|
||||||
- file named `coverage.`_extension_ , with a suitable extension for the report format, or
|
- file named `coverage.`_extension_, with a suitable extension for the report format, or
|
||||||
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
|
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
|
||||||
|
|
||||||
|
|
||||||
## Options
|
## Options
|
||||||
|
|
||||||
| Option | Default Value | Description |
|
| Option | Default Value | Description |
|
||||||
| --- | ---| ---|
|
| ---------- | ------------- | -------------------------------------------------------------------------- |
|
||||||
| `assert` | OFF | Enable assertions.
|
| `assert` | OFF | Enable assertions. |
|
||||||
| `coverage` | OFF | Prepare the coverage report. |
|
| `coverage` | OFF | Prepare the coverage report. |
|
||||||
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
|
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
|
||||||
| `tests` | OFF | Build tests. |
|
| `tests` | OFF | Build tests. |
|
||||||
| `unity` | ON | Configure a unity build. |
|
| `unity` | OFF | Configure a unity build. |
|
||||||
| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. |
|
| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. |
|
||||||
|
| `werr` | OFF | Treat compilation warnings as errors |
|
||||||
|
| `wextra` | OFF | Enable additional compilation warnings |
|
||||||
|
|
||||||
[Unity builds][5] may be faster for the first build
|
[Unity builds][5] may be faster for the first build
|
||||||
(at the cost of much more memory) since they concatenate sources into fewer
|
(at the cost of much more memory) since they concatenate sources into fewer
|
||||||
translation units. Non-unity builds may be faster for incremental builds,
|
translation units. Non-unity builds may be faster for incremental builds,
|
||||||
and can be helpful for detecting `#include` omissions.
|
and can be helpful for detecting `#include` omissions.
|
||||||
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
|
||||||
### Conan
|
### Conan
|
||||||
|
|
||||||
After any updates or changes to dependencies, you may need to do the following:
|
After any updates or changes to dependencies, you may need to do the following:
|
||||||
|
|
||||||
1. Remove your build directory.
|
1. Remove your build directory.
|
||||||
2. Remove the Conan cache:
|
2. Remove individual libraries from the Conan cache, e.g.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conan remove 'grpc/*'
|
||||||
```
|
```
|
||||||
rm -rf ~/.conan/data
|
|
||||||
|
**or**
|
||||||
|
|
||||||
|
Remove all libraries from Conan cache:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conan remove '*'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
3. Re-run [conan export](#patched-recipes) if needed.
|
||||||
4. Re-run [conan install](#build-and-test).
|
4. Re-run [conan install](#build-and-test).
|
||||||
|
|
||||||
|
### `protobuf/port_def.inc` file not found
|
||||||
|
|
||||||
### 'protobuf/port_def.inc' file not found
|
If `cmake --build .` results in an error due to a missing a protobuf file, then
|
||||||
|
you might have generated CMake files for a different `build_type` than the
|
||||||
If `cmake --build .` results in an error due to a missing a protobuf file, then you might have generated CMake files for a different `build_type` than the `CMAKE_BUILD_TYPE` you passed to conan.
|
`CMAKE_BUILD_TYPE` you passed to Conan.
|
||||||
|
|
||||||
```
|
```
|
||||||
/rippled/.build/pb-xrpl.libpb/xrpl/proto/ripple.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found
|
/rippled/.build/pb-xrpl.libpb/xrpl/proto/ripple.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found
|
||||||
@@ -424,70 +584,21 @@ For example, if you want to build Debug:
|
|||||||
1. For conan install, pass `--settings build_type=Debug`
|
1. For conan install, pass `--settings build_type=Debug`
|
||||||
2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug`
|
2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug`
|
||||||
|
|
||||||
|
|
||||||
### no std::result_of
|
|
||||||
|
|
||||||
If your compiler version is recent enough to have removed `std::result_of` as
|
|
||||||
part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor
|
|
||||||
definition to your build.
|
|
||||||
|
|
||||||
```
|
|
||||||
conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
|
||||||
conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
|
||||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
|
||||||
conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
|
||||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### call to 'async_teardown' is ambiguous
|
|
||||||
|
|
||||||
If you are compiling with an early version of Clang 16, then you might hit
|
|
||||||
a [regression][6] when compiling C++20 that manifests as an [error in a Boost
|
|
||||||
header][7]. You can workaround it by adding this preprocessor definition:
|
|
||||||
|
|
||||||
```
|
|
||||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default
|
|
||||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### recompile with -fPIC
|
|
||||||
|
|
||||||
If you get a linker error suggesting that you recompile Boost with
|
|
||||||
position-independent code, such as:
|
|
||||||
|
|
||||||
```
|
|
||||||
/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o):
|
|
||||||
requires unsupported dynamic reloc 11; recompile with -fPIC
|
|
||||||
```
|
|
||||||
|
|
||||||
Conan most likely downloaded a bad binary distribution of the dependency.
|
|
||||||
This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC
|
|
||||||
for Linux. The solution is to build the dependency locally by passing
|
|
||||||
`--build boost` when calling `conan install`.
|
|
||||||
|
|
||||||
```
|
|
||||||
conan install --build boost ...
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Add a Dependency
|
## Add a Dependency
|
||||||
|
|
||||||
If you want to experiment with a new package, follow these steps:
|
If you want to experiment with a new package, follow these steps:
|
||||||
|
|
||||||
1. Search for the package on [Conan Center](https://conan.io/center/).
|
1. Search for the package on [Conan Center](https://conan.io/center/).
|
||||||
2. Modify [`conanfile.py`](./conanfile.py):
|
2. Modify [`conanfile.py`](./conanfile.py):
|
||||||
- Add a version of the package to the `requires` property.
|
- Add a version of the package to the `requires` property.
|
||||||
- Change any default options for the package by adding them to the
|
- Change any default options for the package by adding them to the
|
||||||
`default_options` property (with syntax `'$package:$option': $value`).
|
`default_options` property (with syntax `'$package:$option': $value`).
|
||||||
3. Modify [`CMakeLists.txt`](./CMakeLists.txt):
|
3. Modify [`CMakeLists.txt`](./CMakeLists.txt):
|
||||||
- Add a call to `find_package($package REQUIRED)`.
|
- Add a call to `find_package($package REQUIRED)`.
|
||||||
- Link a library from the package to the target `ripple_libs`
|
- Link a library from the package to the target `ripple_libs`
|
||||||
(search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`).
|
(search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`).
|
||||||
4. Start coding! Don't forget to include whatever headers you need from the package.
|
4. Start coding! Don't forget to include whatever headers you need from the package.
|
||||||
|
|
||||||
|
|
||||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||||
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
|
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
|
||||||
[3]: https://docs.conan.io/en/latest/getting_started.html
|
[3]: https://docs.conan.io/en/latest/getting_started.html
|
||||||
|
|||||||
@@ -25,28 +25,28 @@ more dependencies listed later.
|
|||||||
**tl;dr:** The modules listed first are more independent than the modules
|
**tl;dr:** The modules listed first are more independent than the modules
|
||||||
listed later.
|
listed later.
|
||||||
|
|
||||||
| Level / Tier | Module(s) |
|
| Level / Tier | Module(s) |
|
||||||
|--------------|-----------------------------------------------|
|
| ------------ | -------------------------------------------------------------------------------------------------------- |
|
||||||
| 01 | ripple/beast ripple/unity
|
| 01 | ripple/beast ripple/unity |
|
||||||
| 02 | ripple/basics
|
| 02 | ripple/basics |
|
||||||
| 03 | ripple/json ripple/crypto
|
| 03 | ripple/json ripple/crypto |
|
||||||
| 04 | ripple/protocol
|
| 04 | ripple/protocol |
|
||||||
| 05 | ripple/core ripple/conditions ripple/consensus ripple/resource ripple/server
|
| 05 | ripple/core ripple/conditions ripple/consensus ripple/resource ripple/server |
|
||||||
| 06 | ripple/peerfinder ripple/ledger ripple/nodestore ripple/net
|
| 06 | ripple/peerfinder ripple/ledger ripple/nodestore ripple/net |
|
||||||
| 07 | ripple/shamap ripple/overlay
|
| 07 | ripple/shamap ripple/overlay |
|
||||||
| 08 | ripple/app
|
| 08 | ripple/app |
|
||||||
| 09 | ripple/rpc
|
| 09 | ripple/rpc |
|
||||||
| 10 | ripple/perflog
|
| 10 | ripple/perflog |
|
||||||
| 11 | test/jtx test/beast test/csf
|
| 11 | test/jtx test/beast test/csf |
|
||||||
| 12 | test/unit_test
|
| 12 | test/unit_test |
|
||||||
| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay
|
| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay |
|
||||||
| 14 | test
|
| 14 | test |
|
||||||
| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore
|
| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore |
|
||||||
| 16 | test/rpc test/app
|
| 16 | test/rpc test/app |
|
||||||
|
|
||||||
(Note that `test` levelization is *much* less important and *much* less
|
(Note that `test` levelization is _much_ less important and _much_ less
|
||||||
strictly enforced than `ripple` levelization, other than the requirement
|
strictly enforced than `ripple` levelization, other than the requirement
|
||||||
that `test` code should *never* be included in `ripple` code.)
|
that `test` code should _never_ be included in `ripple` code.)
|
||||||
|
|
||||||
## Validation
|
## Validation
|
||||||
|
|
||||||
@@ -59,48 +59,48 @@ the rippled source. The only caveat is that it runs much slower
|
|||||||
under Windows than in Linux. It hasn't yet been tested under MacOS.
|
under Windows than in Linux. It hasn't yet been tested under MacOS.
|
||||||
It generates many files of [results](results):
|
It generates many files of [results](results):
|
||||||
|
|
||||||
* `rawincludes.txt`: The raw dump of the `#includes`
|
- `rawincludes.txt`: The raw dump of the `#includes`
|
||||||
* `paths.txt`: A second dump grouping the source module
|
- `paths.txt`: A second dump grouping the source module
|
||||||
to the destination module, deduped, and with frequency counts.
|
to the destination module, deduped, and with frequency counts.
|
||||||
* `includes/`: A directory where each file represents a module and
|
- `includes/`: A directory where each file represents a module and
|
||||||
contains a list of modules and counts that the module _includes_.
|
contains a list of modules and counts that the module _includes_.
|
||||||
* `includedby/`: Similar to `includes/`, but the other way around. Each
|
- `includedby/`: Similar to `includes/`, but the other way around. Each
|
||||||
file represents a module and contains a list of modules and counts
|
file represents a module and contains a list of modules and counts
|
||||||
that _include_ the module.
|
that _include_ the module.
|
||||||
* [`loops.txt`](results/loops.txt): A list of direct loops detected
|
- [`loops.txt`](results/loops.txt): A list of direct loops detected
|
||||||
between modules as they actually exist, as opposed to how they are
|
between modules as they actually exist, as opposed to how they are
|
||||||
desired as described above. In a perfect repo, this file will be
|
desired as described above. In a perfect repo, this file will be
|
||||||
empty.
|
empty.
|
||||||
This file is committed to the repo, and is used by the [levelization
|
This file is committed to the repo, and is used by the [levelization
|
||||||
Github workflow](../../.github/workflows/levelization.yml) to validate
|
Github workflow](../../.github/workflows/levelization.yml) to validate
|
||||||
that nothing changed.
|
that nothing changed.
|
||||||
* [`ordering.txt`](results/ordering.txt): A list showing relationships
|
- [`ordering.txt`](results/ordering.txt): A list showing relationships
|
||||||
between modules where there are no loops as they actually exist, as
|
between modules where there are no loops as they actually exist, as
|
||||||
opposed to how they are desired as described above.
|
opposed to how they are desired as described above.
|
||||||
This file is committed to the repo, and is used by the [levelization
|
This file is committed to the repo, and is used by the [levelization
|
||||||
Github workflow](../../.github/workflows/levelization.yml) to validate
|
Github workflow](../../.github/workflows/levelization.yml) to validate
|
||||||
that nothing changed.
|
that nothing changed.
|
||||||
* [`levelization.yml`](../../.github/workflows/levelization.yml)
|
- [`levelization.yml`](../../.github/workflows/levelization.yml)
|
||||||
Github Actions workflow to test that levelization loops haven't
|
Github Actions workflow to test that levelization loops haven't
|
||||||
changed. Unfortunately, if changes are detected, it can't tell if
|
changed. Unfortunately, if changes are detected, it can't tell if
|
||||||
they are improvements or not, so if you have resolved any issues or
|
they are improvements or not, so if you have resolved any issues or
|
||||||
done anything else to improve levelization, run `levelization.sh`,
|
done anything else to improve levelization, run `levelization.sh`,
|
||||||
and commit the updated results.
|
and commit the updated results.
|
||||||
|
|
||||||
The `loops.txt` and `ordering.txt` files relate the modules
|
The `loops.txt` and `ordering.txt` files relate the modules
|
||||||
using comparison signs, which indicate the number of times each
|
using comparison signs, which indicate the number of times each
|
||||||
module is included in the other.
|
module is included in the other.
|
||||||
|
|
||||||
* `A > B` means that A should probably be at a higher level than B,
|
- `A > B` means that A should probably be at a higher level than B,
|
||||||
because B is included in A significantly more than A is included in B.
|
because B is included in A significantly more than A is included in B.
|
||||||
These results can be included in both `loops.txt` and `ordering.txt`.
|
These results can be included in both `loops.txt` and `ordering.txt`.
|
||||||
Because `ordering.txt`only includes relationships where B is not
|
Because `ordering.txt`only includes relationships where B is not
|
||||||
included in A at all, it will only include these types of results.
|
included in A at all, it will only include these types of results.
|
||||||
* `A ~= B` means that A and B are included in each other a different
|
- `A ~= B` means that A and B are included in each other a different
|
||||||
number of times, but the values are so close that the script can't
|
number of times, but the values are so close that the script can't
|
||||||
definitively say that one should be above the other. These results
|
definitively say that one should be above the other. These results
|
||||||
will only be included in `loops.txt`.
|
will only be included in `loops.txt`.
|
||||||
* `A == B` means that A and B include each other the same number of
|
- `A == B` means that A and B include each other the same number of
|
||||||
times, so the script has no clue which should be higher. These results
|
times, so the script has no clue which should be higher. These results
|
||||||
will only be included in `loops.txt`.
|
will only be included in `loops.txt`.
|
||||||
|
|
||||||
@@ -110,5 +110,5 @@ get those details locally.
|
|||||||
|
|
||||||
1. Run `levelization.sh`
|
1. Run `levelization.sh`
|
||||||
2. Grep the modules in `paths.txt`.
|
2. Grep the modules in `paths.txt`.
|
||||||
* For example, if a cycle is found `A ~= B`, simply `grep -w
|
- For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||||
A Builds/levelization/results/paths.txt | grep -w B`
|
A Builds/levelization/results/paths.txt | grep -w B`
|
||||||
|
|||||||
@@ -10,9 +10,6 @@ Loop: xrpld.app xrpld.core
|
|||||||
Loop: xrpld.app xrpld.ledger
|
Loop: xrpld.app xrpld.ledger
|
||||||
xrpld.app > xrpld.ledger
|
xrpld.app > xrpld.ledger
|
||||||
|
|
||||||
Loop: xrpld.app xrpld.net
|
|
||||||
xrpld.app > xrpld.net
|
|
||||||
|
|
||||||
Loop: xrpld.app xrpld.overlay
|
Loop: xrpld.app xrpld.overlay
|
||||||
xrpld.overlay > xrpld.app
|
xrpld.overlay > xrpld.app
|
||||||
|
|
||||||
@@ -25,15 +22,9 @@ Loop: xrpld.app xrpld.rpc
|
|||||||
Loop: xrpld.app xrpld.shamap
|
Loop: xrpld.app xrpld.shamap
|
||||||
xrpld.app > xrpld.shamap
|
xrpld.app > xrpld.shamap
|
||||||
|
|
||||||
Loop: xrpld.core xrpld.net
|
|
||||||
xrpld.net > xrpld.core
|
|
||||||
|
|
||||||
Loop: xrpld.core xrpld.perflog
|
Loop: xrpld.core xrpld.perflog
|
||||||
xrpld.perflog == xrpld.core
|
xrpld.perflog == xrpld.core
|
||||||
|
|
||||||
Loop: xrpld.net xrpld.rpc
|
|
||||||
xrpld.rpc ~= xrpld.net
|
|
||||||
|
|
||||||
Loop: xrpld.overlay xrpld.rpc
|
Loop: xrpld.overlay xrpld.rpc
|
||||||
xrpld.rpc ~= xrpld.overlay
|
xrpld.rpc ~= xrpld.overlay
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ libxrpl.basics > xrpl.basics
|
|||||||
libxrpl.crypto > xrpl.basics
|
libxrpl.crypto > xrpl.basics
|
||||||
libxrpl.json > xrpl.basics
|
libxrpl.json > xrpl.basics
|
||||||
libxrpl.json > xrpl.json
|
libxrpl.json > xrpl.json
|
||||||
|
libxrpl.net > xrpl.basics
|
||||||
|
libxrpl.net > xrpl.net
|
||||||
libxrpl.protocol > xrpl.basics
|
libxrpl.protocol > xrpl.basics
|
||||||
libxrpl.protocol > xrpl.json
|
libxrpl.protocol > xrpl.json
|
||||||
libxrpl.protocol > xrpl.protocol
|
libxrpl.protocol > xrpl.protocol
|
||||||
@@ -12,6 +14,9 @@ libxrpl.server > xrpl.basics
|
|||||||
libxrpl.server > xrpl.json
|
libxrpl.server > xrpl.json
|
||||||
libxrpl.server > xrpl.protocol
|
libxrpl.server > xrpl.protocol
|
||||||
libxrpl.server > xrpl.server
|
libxrpl.server > xrpl.server
|
||||||
|
libxrpl.telemetry > xrpl.basics
|
||||||
|
libxrpl.telemetry > xrpl.json
|
||||||
|
libxrpl.telemetry > xrpl.telemetry
|
||||||
test.app > test.jtx
|
test.app > test.jtx
|
||||||
test.app > test.rpc
|
test.app > test.rpc
|
||||||
test.app > test.toplevel
|
test.app > test.toplevel
|
||||||
@@ -56,15 +61,16 @@ test.csf > xrpl.basics
|
|||||||
test.csf > xrpld.consensus
|
test.csf > xrpld.consensus
|
||||||
test.csf > xrpl.json
|
test.csf > xrpl.json
|
||||||
test.csf > xrpl.protocol
|
test.csf > xrpl.protocol
|
||||||
|
test.csf > xrpl.telemetry
|
||||||
test.json > test.jtx
|
test.json > test.jtx
|
||||||
test.json > xrpl.json
|
test.json > xrpl.json
|
||||||
test.jtx > xrpl.basics
|
test.jtx > xrpl.basics
|
||||||
test.jtx > xrpld.app
|
test.jtx > xrpld.app
|
||||||
test.jtx > xrpld.core
|
test.jtx > xrpld.core
|
||||||
test.jtx > xrpld.ledger
|
test.jtx > xrpld.ledger
|
||||||
test.jtx > xrpld.net
|
|
||||||
test.jtx > xrpld.rpc
|
test.jtx > xrpld.rpc
|
||||||
test.jtx > xrpl.json
|
test.jtx > xrpl.json
|
||||||
|
test.jtx > xrpl.net
|
||||||
test.jtx > xrpl.protocol
|
test.jtx > xrpl.protocol
|
||||||
test.jtx > xrpl.resource
|
test.jtx > xrpl.resource
|
||||||
test.jtx > xrpl.server
|
test.jtx > xrpl.server
|
||||||
@@ -109,7 +115,6 @@ test.rpc > test.toplevel
|
|||||||
test.rpc > xrpl.basics
|
test.rpc > xrpl.basics
|
||||||
test.rpc > xrpld.app
|
test.rpc > xrpld.app
|
||||||
test.rpc > xrpld.core
|
test.rpc > xrpld.core
|
||||||
test.rpc > xrpld.net
|
|
||||||
test.rpc > xrpld.overlay
|
test.rpc > xrpld.overlay
|
||||||
test.rpc > xrpld.rpc
|
test.rpc > xrpld.rpc
|
||||||
test.rpc > xrpl.json
|
test.rpc > xrpl.json
|
||||||
@@ -132,15 +137,22 @@ test.shamap > xrpl.protocol
|
|||||||
test.toplevel > test.csf
|
test.toplevel > test.csf
|
||||||
test.toplevel > xrpl.json
|
test.toplevel > xrpl.json
|
||||||
test.unit_test > xrpl.basics
|
test.unit_test > xrpl.basics
|
||||||
|
tests.libxrpl > xrpl.basics
|
||||||
|
tests.libxrpl > xrpl.json
|
||||||
|
tests.libxrpl > xrpl.telemetry
|
||||||
xrpl.json > xrpl.basics
|
xrpl.json > xrpl.basics
|
||||||
|
xrpl.net > xrpl.basics
|
||||||
xrpl.protocol > xrpl.basics
|
xrpl.protocol > xrpl.basics
|
||||||
xrpl.protocol > xrpl.json
|
xrpl.protocol > xrpl.json
|
||||||
xrpl.resource > xrpl.basics
|
xrpl.resource > xrpl.basics
|
||||||
xrpl.resource > xrpl.json
|
xrpl.resource > xrpl.json
|
||||||
xrpl.resource > xrpl.protocol
|
xrpl.resource > xrpl.protocol
|
||||||
|
xrpl.resource > xrpl.telemetry
|
||||||
xrpl.server > xrpl.basics
|
xrpl.server > xrpl.basics
|
||||||
xrpl.server > xrpl.json
|
xrpl.server > xrpl.json
|
||||||
xrpl.server > xrpl.protocol
|
xrpl.server > xrpl.protocol
|
||||||
|
xrpl.server > xrpl.telemetry
|
||||||
|
xrpl.telemetry > xrpl.json
|
||||||
xrpld.app > test.unit_test
|
xrpld.app > test.unit_test
|
||||||
xrpld.app > xrpl.basics
|
xrpld.app > xrpl.basics
|
||||||
xrpld.app > xrpld.conditions
|
xrpld.app > xrpld.conditions
|
||||||
@@ -148,8 +160,10 @@ xrpld.app > xrpld.consensus
|
|||||||
xrpld.app > xrpld.nodestore
|
xrpld.app > xrpld.nodestore
|
||||||
xrpld.app > xrpld.perflog
|
xrpld.app > xrpld.perflog
|
||||||
xrpld.app > xrpl.json
|
xrpld.app > xrpl.json
|
||||||
|
xrpld.app > xrpl.net
|
||||||
xrpld.app > xrpl.protocol
|
xrpld.app > xrpl.protocol
|
||||||
xrpld.app > xrpl.resource
|
xrpld.app > xrpl.resource
|
||||||
|
xrpld.app > xrpl.telemetry
|
||||||
xrpld.conditions > xrpl.basics
|
xrpld.conditions > xrpl.basics
|
||||||
xrpld.conditions > xrpl.protocol
|
xrpld.conditions > xrpl.protocol
|
||||||
xrpld.consensus > xrpl.basics
|
xrpld.consensus > xrpl.basics
|
||||||
@@ -157,14 +171,12 @@ xrpld.consensus > xrpl.json
|
|||||||
xrpld.consensus > xrpl.protocol
|
xrpld.consensus > xrpl.protocol
|
||||||
xrpld.core > xrpl.basics
|
xrpld.core > xrpl.basics
|
||||||
xrpld.core > xrpl.json
|
xrpld.core > xrpl.json
|
||||||
|
xrpld.core > xrpl.net
|
||||||
xrpld.core > xrpl.protocol
|
xrpld.core > xrpl.protocol
|
||||||
|
xrpld.core > xrpl.telemetry
|
||||||
xrpld.ledger > xrpl.basics
|
xrpld.ledger > xrpl.basics
|
||||||
xrpld.ledger > xrpl.json
|
xrpld.ledger > xrpl.json
|
||||||
xrpld.ledger > xrpl.protocol
|
xrpld.ledger > xrpl.protocol
|
||||||
xrpld.net > xrpl.basics
|
|
||||||
xrpld.net > xrpl.json
|
|
||||||
xrpld.net > xrpl.protocol
|
|
||||||
xrpld.net > xrpl.resource
|
|
||||||
xrpld.nodestore > xrpl.basics
|
xrpld.nodestore > xrpl.basics
|
||||||
xrpld.nodestore > xrpld.core
|
xrpld.nodestore > xrpld.core
|
||||||
xrpld.nodestore > xrpld.unity
|
xrpld.nodestore > xrpld.unity
|
||||||
@@ -188,6 +200,7 @@ xrpld.rpc > xrpld.core
|
|||||||
xrpld.rpc > xrpld.ledger
|
xrpld.rpc > xrpld.ledger
|
||||||
xrpld.rpc > xrpld.nodestore
|
xrpld.rpc > xrpld.nodestore
|
||||||
xrpld.rpc > xrpl.json
|
xrpld.rpc > xrpl.json
|
||||||
|
xrpld.rpc > xrpl.net
|
||||||
xrpld.rpc > xrpl.protocol
|
xrpld.rpc > xrpl.protocol
|
||||||
xrpld.rpc > xrpl.resource
|
xrpld.rpc > xrpl.resource
|
||||||
xrpld.rpc > xrpl.server
|
xrpld.rpc > xrpl.server
|
||||||
|
|||||||
@@ -90,6 +90,11 @@ set_target_properties(OpenSSL::SSL PROPERTIES
|
|||||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||||
)
|
)
|
||||||
set(SECP256K1_INSTALL TRUE)
|
set(SECP256K1_INSTALL TRUE)
|
||||||
|
set(SECP256K1_BUILD_BENCHMARK FALSE)
|
||||||
|
set(SECP256K1_BUILD_TESTS FALSE)
|
||||||
|
set(SECP256K1_BUILD_EXHAUSTIVE_TESTS FALSE)
|
||||||
|
set(SECP256K1_BUILD_CTIME_TESTS FALSE)
|
||||||
|
set(SECP256K1_BUILD_EXAMPLES FALSE)
|
||||||
add_subdirectory(external/secp256k1)
|
add_subdirectory(external/secp256k1)
|
||||||
add_library(secp256k1::secp256k1 ALIAS secp256k1)
|
add_library(secp256k1::secp256k1 ALIAS secp256k1)
|
||||||
add_subdirectory(external/ed25519-donna)
|
add_subdirectory(external/ed25519-donna)
|
||||||
@@ -144,3 +149,8 @@ set(PROJECT_EXPORT_SET RippleExports)
|
|||||||
include(RippledCore)
|
include(RippledCore)
|
||||||
include(RippledInstall)
|
include(RippledInstall)
|
||||||
include(RippledValidatorKeys)
|
include(RippledValidatorKeys)
|
||||||
|
|
||||||
|
if(tests)
|
||||||
|
include(CTest)
|
||||||
|
add_subdirectory(src/tests/libxrpl)
|
||||||
|
endif()
|
||||||
|
|||||||
288
CONTRIBUTING.md
288
CONTRIBUTING.md
@@ -8,13 +8,12 @@ We assume you are familiar with the general practice of [making
|
|||||||
contributions on GitHub][contrib]. This file includes only special
|
contributions on GitHub][contrib]. This file includes only special
|
||||||
instructions specific to this project.
|
instructions specific to this project.
|
||||||
|
|
||||||
|
|
||||||
## Before you start
|
## Before you start
|
||||||
|
|
||||||
The following branches exist in the main project repository:
|
The following branches exist in the main project repository:
|
||||||
|
|
||||||
- `develop`: The latest set of unreleased features, and the most common
|
- `develop`: The latest set of unreleased features, and the most common
|
||||||
starting point for contributions.
|
starting point for contributions.
|
||||||
- `release`: The latest beta release or release candidate.
|
- `release`: The latest beta release or release candidate.
|
||||||
- `master`: The latest stable release.
|
- `master`: The latest stable release.
|
||||||
- `gh-pages`: The documentation for this project, built by Doxygen.
|
- `gh-pages`: The documentation for this project, built by Doxygen.
|
||||||
@@ -27,18 +26,18 @@ In general, external contributions should be developed in your personal
|
|||||||
[fork][forking]. Contributions from developers with write permissions
|
[fork][forking]. Contributions from developers with write permissions
|
||||||
should be done in [the main repository][rippled] in a branch with
|
should be done in [the main repository][rippled] in a branch with
|
||||||
a permitted prefix. Permitted prefixes are:
|
a permitted prefix. Permitted prefixes are:
|
||||||
* XLS-[a-zA-Z0-9]+/.+
|
|
||||||
* e.g. XLS-0033d/mpt-clarify-STEitherAmount
|
|
||||||
* [GitHub username]/.+
|
|
||||||
* e.g. JoelKatz/fix-rpc-webhook-queue
|
|
||||||
* [Organization name]/.+
|
|
||||||
* e.g. ripple/antithesis
|
|
||||||
|
|
||||||
Regardless of where the branch is created, please open a *draft* pull
|
- XLS-[a-zA-Z0-9]+/.+
|
||||||
|
- e.g. XLS-0033d/mpt-clarify-STEitherAmount
|
||||||
|
- [GitHub username]/.+
|
||||||
|
- e.g. JoelKatz/fix-rpc-webhook-queue
|
||||||
|
- [Organization name]/.+
|
||||||
|
- e.g. ripple/antithesis
|
||||||
|
|
||||||
|
Regardless of where the branch is created, please open a _draft_ pull
|
||||||
request as soon as possible after pushing the branch to Github, to
|
request as soon as possible after pushing the branch to Github, to
|
||||||
increase visibility, and ease feedback during the development process.
|
increase visibility, and ease feedback during the development process.
|
||||||
|
|
||||||
|
|
||||||
## Major contributions
|
## Major contributions
|
||||||
|
|
||||||
If your contribution is a major feature or breaking change, then you
|
If your contribution is a major feature or breaking change, then you
|
||||||
@@ -55,8 +54,8 @@ responsibility of the XLS author to update the draft to match the final
|
|||||||
implementation when its corresponding pull request is merged, unless the
|
implementation when its corresponding pull request is merged, unless the
|
||||||
author delegates that responsibility to others.
|
author delegates that responsibility to others.
|
||||||
|
|
||||||
|
|
||||||
## Before making a pull request
|
## Before making a pull request
|
||||||
|
|
||||||
(Or marking a draft pull request as ready.)
|
(Or marking a draft pull request as ready.)
|
||||||
|
|
||||||
Changes that alter transaction processing must be guarded by an
|
Changes that alter transaction processing must be guarded by an
|
||||||
@@ -73,11 +72,12 @@ automatic test run by `rippled --unittest`.
|
|||||||
Otherwise, it must be a manual test.
|
Otherwise, it must be a manual test.
|
||||||
|
|
||||||
If you create new source files, they must be organized as follows:
|
If you create new source files, they must be organized as follows:
|
||||||
* If the files are in any of the `libxrpl` modules, the headers (`.h`) must go
|
|
||||||
|
- If the files are in any of the `libxrpl` modules, the headers (`.h`) must go
|
||||||
under `include/xrpl`, and source (`.cpp`) files must go under
|
under `include/xrpl`, and source (`.cpp`) files must go under
|
||||||
`src/libxrpl`.
|
`src/libxrpl`.
|
||||||
* All other non-test files must go under `src/xrpld`.
|
- All other non-test files must go under `src/xrpld`.
|
||||||
* All test source files must go under `src/test`.
|
- All test source files must go under `src/test`.
|
||||||
|
|
||||||
The source must be formatted according to the style guide below.
|
The source must be formatted according to the style guide below.
|
||||||
|
|
||||||
@@ -87,16 +87,17 @@ Changes should be usually squashed down into a single commit.
|
|||||||
Some larger or more complicated change sets make more sense,
|
Some larger or more complicated change sets make more sense,
|
||||||
and are easier to review if organized into multiple logical commits.
|
and are easier to review if organized into multiple logical commits.
|
||||||
Either way, all commits should fit the following criteria:
|
Either way, all commits should fit the following criteria:
|
||||||
* Changes should be presented in a single commit or a logical
|
|
||||||
|
- Changes should be presented in a single commit or a logical
|
||||||
sequence of commits.
|
sequence of commits.
|
||||||
Specifically, chronological commits that simply
|
Specifically, chronological commits that simply
|
||||||
reflect the history of how the author implemented
|
reflect the history of how the author implemented
|
||||||
the change, "warts and all", are not useful to
|
the change, "warts and all", are not useful to
|
||||||
reviewers.
|
reviewers.
|
||||||
* Every commit should have a [good message](#good-commit-messages).
|
- Every commit should have a [good message](#good-commit-messages).
|
||||||
to explain a specific aspects of the change.
|
to explain a specific aspects of the change.
|
||||||
* Every commit should be signed.
|
- Every commit should be signed.
|
||||||
* Every commit should be well-formed (builds successfully,
|
- Every commit should be well-formed (builds successfully,
|
||||||
unit tests passing), as this helps to resolve merge
|
unit tests passing), as this helps to resolve merge
|
||||||
conflicts, and makes it easier to use `git bisect`
|
conflicts, and makes it easier to use `git bisect`
|
||||||
to find bugs.
|
to find bugs.
|
||||||
@@ -108,13 +109,14 @@ Refer to
|
|||||||
for general rules on writing a good commit message.
|
for general rules on writing a good commit message.
|
||||||
|
|
||||||
tl;dr
|
tl;dr
|
||||||
|
|
||||||
> 1. Separate subject from body with a blank line.
|
> 1. Separate subject from body with a blank line.
|
||||||
> 2. Limit the subject line to 50 characters.
|
> 2. Limit the subject line to 50 characters.
|
||||||
> * [...]shoot for 50 characters, but consider 72 the hard limit.
|
> - [...]shoot for 50 characters, but consider 72 the hard limit.
|
||||||
> 3. Capitalize the subject line.
|
> 3. Capitalize the subject line.
|
||||||
> 4. Do not end the subject line with a period.
|
> 4. Do not end the subject line with a period.
|
||||||
> 5. Use the imperative mood in the subject line.
|
> 5. Use the imperative mood in the subject line.
|
||||||
> * A properly formed Git commit subject line should always be able
|
> - A properly formed Git commit subject line should always be able
|
||||||
> to complete the following sentence: "If applied, this commit will
|
> to complete the following sentence: "If applied, this commit will
|
||||||
> _your subject line here_".
|
> _your subject line here_".
|
||||||
> 6. Wrap the body at 72 characters.
|
> 6. Wrap the body at 72 characters.
|
||||||
@@ -122,16 +124,17 @@ tl;dr
|
|||||||
|
|
||||||
In addition to those guidelines, please add one of the following
|
In addition to those guidelines, please add one of the following
|
||||||
prefixes to the subject line if appropriate.
|
prefixes to the subject line if appropriate.
|
||||||
* `fix:` - The primary purpose is to fix an existing bug.
|
|
||||||
* `perf:` - The primary purpose is performance improvements.
|
- `fix:` - The primary purpose is to fix an existing bug.
|
||||||
* `refactor:` - The changes refactor code without affecting
|
- `perf:` - The primary purpose is performance improvements.
|
||||||
|
- `refactor:` - The changes refactor code without affecting
|
||||||
functionality.
|
functionality.
|
||||||
* `test:` - The changes _only_ affect unit tests.
|
- `test:` - The changes _only_ affect unit tests.
|
||||||
* `docs:` - The changes _only_ affect documentation. This can
|
- `docs:` - The changes _only_ affect documentation. This can
|
||||||
include code comments in addition to `.md` files like this one.
|
include code comments in addition to `.md` files like this one.
|
||||||
* `build:` - The changes _only_ affect the build process,
|
- `build:` - The changes _only_ affect the build process,
|
||||||
including CMake and/or Conan settings.
|
including CMake and/or Conan settings.
|
||||||
* `chore:` - Other tasks that don't affect the binary, but don't fit
|
- `chore:` - Other tasks that don't affect the binary, but don't fit
|
||||||
any of the other cases. e.g. formatting, git settings, updating
|
any of the other cases. e.g. formatting, git settings, updating
|
||||||
Github Actions jobs.
|
Github Actions jobs.
|
||||||
|
|
||||||
@@ -143,9 +146,10 @@ unit tests for Feature X (#1234)`.
|
|||||||
|
|
||||||
In general, pull requests use `develop` as the base branch.
|
In general, pull requests use `develop` as the base branch.
|
||||||
The exceptions are
|
The exceptions are
|
||||||
* Fixes and improvements to a release candidate use `release` as the
|
|
||||||
|
- Fixes and improvements to a release candidate use `release` as the
|
||||||
base.
|
base.
|
||||||
* Hotfixes use `master` as the base.
|
- Hotfixes use `master` as the base.
|
||||||
|
|
||||||
If your changes are not quite ready, but you want to make it easily available
|
If your changes are not quite ready, but you want to make it easily available
|
||||||
for preliminary examination or review, you can create a "Draft" pull request.
|
for preliminary examination or review, you can create a "Draft" pull request.
|
||||||
@@ -182,11 +186,11 @@ meets a few criteria:
|
|||||||
2. All CI checks must be complete and passed. (One-off failures may
|
2. All CI checks must be complete and passed. (One-off failures may
|
||||||
be acceptable if they are related to a known issue.)
|
be acceptable if they are related to a known issue.)
|
||||||
3. The PR must have a [good commit message](#good-commit-messages).
|
3. The PR must have a [good commit message](#good-commit-messages).
|
||||||
* If the PR started with a good commit message, and it doesn't
|
- If the PR started with a good commit message, and it doesn't
|
||||||
need to be updated, the author can indicate that in a comment.
|
need to be updated, the author can indicate that in a comment.
|
||||||
* Any contributor, preferably the author, can leave a comment
|
- Any contributor, preferably the author, can leave a comment
|
||||||
suggesting a commit message.
|
suggesting a commit message.
|
||||||
* If the author squashes and rebases the code in preparation for
|
- If the author squashes and rebases the code in preparation for
|
||||||
merge, they should also ensure the commit message(s) are updated
|
merge, they should also ensure the commit message(s) are updated
|
||||||
as well.
|
as well.
|
||||||
4. The PR branch must be up to date with the base branch (usually
|
4. The PR branch must be up to date with the base branch (usually
|
||||||
@@ -208,7 +212,6 @@ This is a non-exhaustive list of recommended style guidelines. These are
|
|||||||
not always strictly enforced and serve as a way to keep the codebase
|
not always strictly enforced and serve as a way to keep the codebase
|
||||||
coherent rather than a set of _thou shalt not_ commandments.
|
coherent rather than a set of _thou shalt not_ commandments.
|
||||||
|
|
||||||
|
|
||||||
## Formatting
|
## Formatting
|
||||||
|
|
||||||
All code must conform to `clang-format` version 18,
|
All code must conform to `clang-format` version 18,
|
||||||
@@ -237,6 +240,7 @@ To download the patch file:
|
|||||||
5. Commit and push.
|
5. Commit and push.
|
||||||
|
|
||||||
You can install a pre-commit hook to automatically run `clang-format` before every commit:
|
You can install a pre-commit hook to automatically run `clang-format` before every commit:
|
||||||
|
|
||||||
```
|
```
|
||||||
pip3 install pre-commit
|
pip3 install pre-commit
|
||||||
pre-commit install
|
pre-commit install
|
||||||
@@ -267,49 +271,51 @@ locations, where the reporting of contract violations on the Antithesis
|
|||||||
platform is either not possible or not useful.
|
platform is either not possible or not useful.
|
||||||
|
|
||||||
For this reason:
|
For this reason:
|
||||||
* The locations where `assert` or `assert(false)` contracts should continue to be used:
|
|
||||||
* `constexpr` functions
|
- The locations where `assert` or `assert(false)` contracts should continue to be used:
|
||||||
* unit tests i.e. files under `src/test`
|
- `constexpr` functions
|
||||||
* unit tests-related modules (files under `beast/test` and `beast/unit_test`)
|
- unit tests i.e. files under `src/test`
|
||||||
* Outside of the listed locations, do not use `assert`; use `XRPL_ASSERT` instead,
|
- unit tests-related modules (files under `beast/test` and `beast/unit_test`)
|
||||||
|
- Outside of the listed locations, do not use `assert`; use `XRPL_ASSERT` instead,
|
||||||
giving it unique name, with the short description of the contract.
|
giving it unique name, with the short description of the contract.
|
||||||
* Outside of the listed locations, do not use `assert(false)`; use
|
- Outside of the listed locations, do not use `assert(false)`; use
|
||||||
`UNREACHABLE` instead, giving it unique name, with the description of the
|
`UNREACHABLE` instead, giving it unique name, with the description of the
|
||||||
condition being violated
|
condition being violated
|
||||||
* The contract name should start with a full name (including scope) of the
|
- The contract name should start with a full name (including scope) of the
|
||||||
function, optionally a named lambda, followed by a colon ` : ` and a brief
|
function, optionally a named lambda, followed by a colon `:` and a brief
|
||||||
(typically at most five words) description. `UNREACHABLE` contracts
|
(typically at most five words) description. `UNREACHABLE` contracts
|
||||||
can use slightly longer descriptions. If there are multiple overloads of the
|
can use slightly longer descriptions. If there are multiple overloads of the
|
||||||
function, use common sense to balance both brevity and unambiguity of the
|
function, use common sense to balance both brevity and unambiguity of the
|
||||||
function name. NOTE: the purpose of name is to provide stable means of
|
function name. NOTE: the purpose of name is to provide stable means of
|
||||||
unique identification of every contract; for this reason try to avoid elements
|
unique identification of every contract; for this reason try to avoid elements
|
||||||
which can change in some obvious refactors or when reinforcing the condition.
|
which can change in some obvious refactors or when reinforcing the condition.
|
||||||
* Contract description typically (except for `UNREACHABLE`) should describe the
|
- Contract description typically (except for `UNREACHABLE`) should describe the
|
||||||
_expected_ condition, as in "I assert that _expected_ is true".
|
_expected_ condition, as in "I assert that _expected_ is true".
|
||||||
* Contract description for `UNREACHABLE` should describe the _unexpected_
|
- Contract description for `UNREACHABLE` should describe the _unexpected_
|
||||||
situation which caused the line to have been reached.
|
situation which caused the line to have been reached.
|
||||||
* Example good name for an
|
- Example good name for an
|
||||||
`UNREACHABLE` macro `"Json::operator==(Value, Value) : invalid type"`; example
|
`UNREACHABLE` macro `"Json::operator==(Value, Value) : invalid type"`; example
|
||||||
good name for an `XRPL_ASSERT` macro `"Json::Value::asCString : valid type"`.
|
good name for an `XRPL_ASSERT` macro `"Json::Value::asCString : valid type"`.
|
||||||
* Example **bad** name
|
- Example **bad** name
|
||||||
`"RFC1751::insert(char* s, int x, int start, int length) : length is greater than or equal zero"`
|
`"RFC1751::insert(char* s, int x, int start, int length) : length is greater than or equal zero"`
|
||||||
(missing namespace, unnecessary full function signature, description too verbose).
|
(missing namespace, unnecessary full function signature, description too verbose).
|
||||||
Good name: `"ripple::RFC1751::insert : minimum length"`.
|
Good name: `"ripple::RFC1751::insert : minimum length"`.
|
||||||
* In **few** well-justified cases a non-standard name can be used, in which case a
|
- In **few** well-justified cases a non-standard name can be used, in which case a
|
||||||
comment should be placed to explain the rationale (example in `contract.cpp`)
|
comment should be placed to explain the rationale (example in `contract.cpp`)
|
||||||
* Do **not** rename a contract without a good reason (e.g. the name no longer
|
- Do **not** rename a contract without a good reason (e.g. the name no longer
|
||||||
reflects the location or the condition being checked)
|
reflects the location or the condition being checked)
|
||||||
* Do not use `std::unreachable`
|
- Do not use `std::unreachable`
|
||||||
* Do not put contracts where they can be violated by an external condition
|
- Do not put contracts where they can be violated by an external condition
|
||||||
(e.g. timing, data payload before mandatory validation etc.) as this creates
|
(e.g. timing, data payload before mandatory validation etc.) as this creates
|
||||||
bogus bug reports (and causes crashes of Debug builds)
|
bogus bug reports (and causes crashes of Debug builds)
|
||||||
|
|
||||||
## Unit Tests
|
## Unit Tests
|
||||||
|
|
||||||
To execute all unit tests:
|
To execute all unit tests:
|
||||||
|
|
||||||
```rippled --unittest --unittest-jobs=<number of cores>```
|
`rippled --unittest --unittest-jobs=<number of cores>`
|
||||||
|
|
||||||
(Note: Using multiple cores on a Mac M1 can cause spurious test failures. The
|
(Note: Using multiple cores on a Mac M1 can cause spurious test failures. The
|
||||||
cause is still under investigation. If you observe this problem, try specifying fewer jobs.)
|
cause is still under investigation. If you observe this problem, try specifying fewer jobs.)
|
||||||
|
|
||||||
To run a specific set of test suites:
|
To run a specific set of test suites:
|
||||||
@@ -317,10 +323,11 @@ To run a specific set of test suites:
|
|||||||
```
|
```
|
||||||
rippled --unittest TestSuiteName
|
rippled --unittest TestSuiteName
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: In this example, all tests with prefix `TestSuiteName` will be run, so if
|
Note: In this example, all tests with prefix `TestSuiteName` will be run, so if
|
||||||
`TestSuiteName1` and `TestSuiteName2` both exist, then both tests will run.
|
`TestSuiteName1` and `TestSuiteName2` both exist, then both tests will run.
|
||||||
Alternatively, if the unit test name finds an exact match, it will stop
|
Alternatively, if the unit test name finds an exact match, it will stop
|
||||||
doing partial matches, i.e. if a unit test with a title of `TestSuiteName`
|
doing partial matches, i.e. if a unit test with a title of `TestSuiteName`
|
||||||
exists, then no other unit test will be executed, apart from `TestSuiteName`.
|
exists, then no other unit test will be executed, apart from `TestSuiteName`.
|
||||||
|
|
||||||
## Avoid
|
## Avoid
|
||||||
@@ -336,7 +343,6 @@ exists, then no other unit test will be executed, apart from `TestSuiteName`.
|
|||||||
explanatory comments.
|
explanatory comments.
|
||||||
8. Importing new libraries unless there is a very good reason to do so.
|
8. Importing new libraries unless there is a very good reason to do so.
|
||||||
|
|
||||||
|
|
||||||
## Seek to
|
## Seek to
|
||||||
|
|
||||||
9. Extend functionality of existing code rather than creating new code.
|
9. Extend functionality of existing code rather than creating new code.
|
||||||
@@ -351,14 +357,12 @@ exists, then no other unit test will be executed, apart from `TestSuiteName`.
|
|||||||
14. Provide as many comments as you feel that a competent programmer
|
14. Provide as many comments as you feel that a competent programmer
|
||||||
would need to understand what your code does.
|
would need to understand what your code does.
|
||||||
|
|
||||||
|
|
||||||
# Maintainers
|
# Maintainers
|
||||||
|
|
||||||
Maintainers are ecosystem participants with elevated access to the repository.
|
Maintainers are ecosystem participants with elevated access to the repository.
|
||||||
They are able to push new code, make decisions on when a release should be
|
They are able to push new code, make decisions on when a release should be
|
||||||
made, etc.
|
made, etc.
|
||||||
|
|
||||||
|
|
||||||
## Adding and removing
|
## Adding and removing
|
||||||
|
|
||||||
New maintainers can be proposed by two existing maintainers, subject to a vote
|
New maintainers can be proposed by two existing maintainers, subject to a vote
|
||||||
@@ -373,47 +377,41 @@ A minimum of 60% agreement and 50% participation are required.
|
|||||||
The XRP Ledger Foundation will have the ability, for cause, to remove an
|
The XRP Ledger Foundation will have the ability, for cause, to remove an
|
||||||
existing maintainer without a vote.
|
existing maintainer without a vote.
|
||||||
|
|
||||||
|
|
||||||
## Current Maintainers
|
## Current Maintainers
|
||||||
|
|
||||||
Maintainers are users with maintain or admin access to the repo.
|
Maintainers are users with maintain or admin access to the repo.
|
||||||
|
|
||||||
* [bthomee](https://github.com/bthomee) (Ripple)
|
- [bthomee](https://github.com/bthomee) (Ripple)
|
||||||
* [intelliot](https://github.com/intelliot) (Ripple)
|
- [intelliot](https://github.com/intelliot) (Ripple)
|
||||||
* [JoelKatz](https://github.com/JoelKatz) (Ripple)
|
- [JoelKatz](https://github.com/JoelKatz) (Ripple)
|
||||||
* [nixer89](https://github.com/nixer89) (XRP Ledger Foundation)
|
- [legleux](https://github.com/legleux) (Ripple)
|
||||||
* [RichardAH](https://github.com/RichardAH) (XRP Ledger Foundation)
|
- [mankins](https://github.com/mankins) (XRP Ledger Foundation)
|
||||||
* [Silkjaer](https://github.com/Silkjaer) (XRP Ledger Foundation)
|
- [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation)
|
||||||
* [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation)
|
- [ximinez](https://github.com/ximinez) (Ripple)
|
||||||
* [ximinez](https://github.com/ximinez) (Ripple)
|
|
||||||
|
|
||||||
|
|
||||||
## Current Code Reviewers
|
## Current Code Reviewers
|
||||||
|
|
||||||
Code Reviewers are developers who have the ability to review, approve, and
|
Code Reviewers are developers who have the ability to review, approve, and
|
||||||
in some cases merge source code changes.
|
in some cases merge source code changes.
|
||||||
|
|
||||||
* [HowardHinnant](https://github.com/HowardHinnant) (Ripple)
|
- [a1q123456](https://github.com/a1q123456) (Ripple)
|
||||||
* [scottschurr](https://github.com/scottschurr) (Ripple)
|
- [Bronek](https://github.com/Bronek) (Ripple)
|
||||||
* [seelabs](https://github.com/seelabs) (Ripple)
|
- [bthomee](https://github.com/bthomee) (Ripple)
|
||||||
* [Ed Hennis](https://github.com/ximinez) (Ripple)
|
- [ckeshava](https://github.com/ckeshava) (Ripple)
|
||||||
* [mvadari](https://github.com/mvadari) (Ripple)
|
- [dangell7](https://github.com/dangell7) (XRPL Labs)
|
||||||
* [thejohnfreeman](https://github.com/thejohnfreeman) (Ripple)
|
- [godexsoft](https://github.com/godexsoft) (Ripple)
|
||||||
* [Bronek](https://github.com/Bronek) (Ripple)
|
- [gregtatcam](https://github.com/gregtatcam) (Ripple)
|
||||||
* [manojsdoshi](https://github.com/manojsdoshi) (Ripple)
|
- [kuznetsss](https://github.com/kuznetsss) (Ripple)
|
||||||
* [godexsoft](https://github.com/godexsoft) (Ripple)
|
- [lmaisons](https://github.com/lmaisons) (Ripple)
|
||||||
* [mDuo13](https://github.com/mDuo13) (Ripple)
|
- [mathbunnyru](https://github.com/mathbunnyru) (Ripple)
|
||||||
* [ckniffen](https://github.com/ckniffen) (Ripple)
|
- [mvadari](https://github.com/mvadari) (Ripple)
|
||||||
* [arihantkothari](https://github.com/arihantkothari) (Ripple)
|
- [oleks-rip](https://github.com/oleks-rip) (Ripple)
|
||||||
* [pwang200](https://github.com/pwang200) (Ripple)
|
- [PeterChen13579](https://github.com/PeterChen13579) (Ripple)
|
||||||
* [sophiax851](https://github.com/sophiax851) (Ripple)
|
- [pwang200](https://github.com/pwang200) (Ripple)
|
||||||
* [shawnxie999](https://github.com/shawnxie999) (Ripple)
|
- [q73zhao](https://github.com/q73zhao) (Ripple)
|
||||||
* [gregtatcam](https://github.com/gregtatcam) (Ripple)
|
- [shawnxie999](https://github.com/shawnxie999) (Ripple)
|
||||||
* [mtrippled](https://github.com/mtrippled) (Ripple)
|
- [Tapanito](https://github.com/Tapanito) (Ripple)
|
||||||
* [ckeshava](https://github.com/ckeshava) (Ripple)
|
- [ximinez](https://github.com/ximinez) (Ripple)
|
||||||
* [nbougalis](https://github.com/nbougalis) None
|
|
||||||
* [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation)
|
|
||||||
* [dangell7](https://github.com/dangell7) (XRPL Labs)
|
|
||||||
|
|
||||||
Developers not on this list are able and encouraged to submit feedback
|
Developers not on this list are able and encouraged to submit feedback
|
||||||
on pending code changes (open pull requests).
|
on pending code changes (open pull requests).
|
||||||
@@ -423,6 +421,7 @@ on pending code changes (open pull requests).
|
|||||||
These instructions assume you have your git upstream remotes configured
|
These instructions assume you have your git upstream remotes configured
|
||||||
to avoid accidental pushes to the main repo, and a remote group
|
to avoid accidental pushes to the main repo, and a remote group
|
||||||
specifying both of them. e.g.
|
specifying both of them. e.g.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ git remote -v | grep upstream
|
$ git remote -v | grep upstream
|
||||||
upstream https://github.com/XRPLF/rippled.git (fetch)
|
upstream https://github.com/XRPLF/rippled.git (fetch)
|
||||||
@@ -437,6 +436,7 @@ upstream upstream-push
|
|||||||
You can use the [setup-upstreams] script to set this up.
|
You can use the [setup-upstreams] script to set this up.
|
||||||
|
|
||||||
It also assumes you have a default gpg signing key set up in git. e.g.
|
It also assumes you have a default gpg signing key set up in git. e.g.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ git config user.signingkey
|
$ git config user.signingkey
|
||||||
968479A1AFF927E37D1A566BB5690EEEBB952194
|
968479A1AFF927E37D1A566BB5690EEEBB952194
|
||||||
@@ -461,8 +461,8 @@ the suggested commit message, or modify it as needed.
|
|||||||
#### Slightly more complicated pull requests
|
#### Slightly more complicated pull requests
|
||||||
|
|
||||||
Some pull requests need to be pushed to `develop` as more than one
|
Some pull requests need to be pushed to `develop` as more than one
|
||||||
commit. A PR author may *request* to merge as separate commits. They
|
commit. A PR author may _request_ to merge as separate commits. They
|
||||||
must *justify* why separate commits are needed, and *specify* how they
|
must _justify_ why separate commits are needed, and _specify_ how they
|
||||||
would like the commits to be merged. If you disagree with the author,
|
would like the commits to be merged. If you disagree with the author,
|
||||||
discuss it with them directly.
|
discuss it with them directly.
|
||||||
|
|
||||||
@@ -471,20 +471,22 @@ fast forward only merge (`--ff-only`) on the command line and push to
|
|||||||
`develop`.
|
`develop`.
|
||||||
|
|
||||||
Some examples of when separate commits are worthwhile are:
|
Some examples of when separate commits are worthwhile are:
|
||||||
|
|
||||||
1. PRs where source files are reorganized in multiple steps.
|
1. PRs where source files are reorganized in multiple steps.
|
||||||
2. PRs where the commits are mostly independent and *could* be separate
|
2. PRs where the commits are mostly independent and _could_ be separate
|
||||||
PRs, but are pulled together into one PR under a commit theme or
|
PRs, but are pulled together into one PR under a commit theme or
|
||||||
issue.
|
issue.
|
||||||
3. PRs that are complicated enough that `git bisect` would not be much
|
3. PRs that are complicated enough that `git bisect` would not be much
|
||||||
help if it determined this PR introduced a problem.
|
help if it determined this PR introduced a problem.
|
||||||
|
|
||||||
Either way, check that:
|
Either way, check that:
|
||||||
* The commits are based on the current tip of `develop`.
|
|
||||||
* The commits are clean: No merge commits (except when reverse
|
- The commits are based on the current tip of `develop`.
|
||||||
|
- The commits are clean: No merge commits (except when reverse
|
||||||
merging), no "[FOLD]" or "fixup!" messages.
|
merging), no "[FOLD]" or "fixup!" messages.
|
||||||
* All commits are signed. If the commits are not signed by the author, use
|
- All commits are signed. If the commits are not signed by the author, use
|
||||||
`git commit --amend -S` to sign them yourself.
|
`git commit --amend -S` to sign them yourself.
|
||||||
* At least one (but preferably all) of the commits has the PR number
|
- At least one (but preferably all) of the commits has the PR number
|
||||||
in the commit message.
|
in the commit message.
|
||||||
|
|
||||||
The "Create a merge commit" and "Rebase and merge" options should be
|
The "Create a merge commit" and "Rebase and merge" options should be
|
||||||
@@ -502,13 +504,13 @@ Rippled uses a linear workflow model that can be summarized as:
|
|||||||
1. In between releases, developers work against the `develop` branch.
|
1. In between releases, developers work against the `develop` branch.
|
||||||
2. Periodically, a maintainer will build and tag a beta version from
|
2. Periodically, a maintainer will build and tag a beta version from
|
||||||
`develop`, which is pushed to `release`.
|
`develop`, which is pushed to `release`.
|
||||||
* Betas are usually released every two to three weeks, though that
|
- Betas are usually released every two to three weeks, though that
|
||||||
schedule can vary depending on progress, availability, and other
|
schedule can vary depending on progress, availability, and other
|
||||||
factors.
|
factors.
|
||||||
3. When the changes in `develop` are considered stable and mature enough
|
3. When the changes in `develop` are considered stable and mature enough
|
||||||
to be ready to release, a release candidate (RC) is built and tagged
|
to be ready to release, a release candidate (RC) is built and tagged
|
||||||
from `develop`, and merged to `release`.
|
from `develop`, and merged to `release`.
|
||||||
* Further development for that release (primarily fixes) then
|
- Further development for that release (primarily fixes) then
|
||||||
continues against `release`, while other development continues on
|
continues against `release`, while other development continues on
|
||||||
`develop`. Effectively, `release` is forked from `develop`. Changes
|
`develop`. Effectively, `release` is forked from `develop`. Changes
|
||||||
to `release` must be reverse merged to `develop`.
|
to `release` must be reverse merged to `develop`.
|
||||||
@@ -543,6 +545,7 @@ Rippled uses a linear workflow model that can be summarized as:
|
|||||||
the version number, etc.
|
the version number, etc.
|
||||||
|
|
||||||
The workflow may look something like:
|
The workflow may look something like:
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch --multiple upstreams user1 user2 user3 [...]
|
git fetch --multiple upstreams user1 user2 user3 [...]
|
||||||
git checkout -B release-next --no-track upstream/develop
|
git checkout -B release-next --no-track upstream/develop
|
||||||
@@ -581,8 +584,9 @@ This includes, betas, and the first release candidate (RC).
|
|||||||
|
|
||||||
1. If you didn't create one [preparing the `develop`
|
1. If you didn't create one [preparing the `develop`
|
||||||
branch](#preparing-the-develop-branch), Ensure there is no old
|
branch](#preparing-the-develop-branch), Ensure there is no old
|
||||||
`release-next` branch hanging around. Then make a `release-next`
|
`release-next` branch hanging around. Then make a `release-next`
|
||||||
branch that only changes the version number. e.g.
|
branch that only changes the version number. e.g.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
@@ -603,25 +607,30 @@ git push upstream-push
|
|||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
git branch --set-upstream-to=upstream/release-next
|
git branch --set-upstream-to=upstream/release-next
|
||||||
```
|
```
|
||||||
You can also use the [update-version] script.
|
|
||||||
2. Create a Pull Request for `release-next` with **`develop`** as
|
You can also use the [update-version] script. 2. Create a Pull Request for `release-next` with **`develop`** as
|
||||||
the base branch.
|
the base branch.
|
||||||
1. Use the title "[TRIVIAL] Set version to X.X.X-bX".
|
|
||||||
2. Instead of the default description template, use the following:
|
1. Use the title "[TRIVIAL] Set version to X.X.X-bX".
|
||||||
|
2. Instead of the default description template, use the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
## High Level Overview of Change
|
## High Level Overview of Change
|
||||||
|
|
||||||
This PR only changes the version number. It will be merged as
|
This PR only changes the version number. It will be merged as
|
||||||
soon as Github CI actions successfully complete.
|
soon as Github CI actions successfully complete.
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Wait for CI to successfully complete, and get someone to approve
|
3. Wait for CI to successfully complete, and get someone to approve
|
||||||
the PR. (It is safe to ignore known CI issues.)
|
the PR. (It is safe to ignore known CI issues.)
|
||||||
4. Push the updated `develop` branch using your `release-next`
|
4. Push the updated `develop` branch using your `release-next`
|
||||||
branch. **Do not use the Github UI. It's important to preserve
|
branch. **Do not use the Github UI. It's important to preserve
|
||||||
commit IDs.**
|
commit IDs.**
|
||||||
|
|
||||||
```
|
```
|
||||||
git push upstream-push release-next:develop
|
git push upstream-push release-next:develop
|
||||||
```
|
```
|
||||||
|
|
||||||
5. In the unlikely event that the push fails because someone has merged
|
5. In the unlikely event that the push fails because someone has merged
|
||||||
something else in the meantime, rebase your branch onto the updated
|
something else in the meantime, rebase your branch onto the updated
|
||||||
`develop` branch, push again, and go back to step 3.
|
`develop` branch, push again, and go back to step 3.
|
||||||
@@ -630,22 +639,25 @@ git push upstream-push release-next:develop
|
|||||||
7. Once this is done, forward progress on `develop` can continue
|
7. Once this is done, forward progress on `develop` can continue
|
||||||
(other PRs may be merged).
|
(other PRs may be merged).
|
||||||
8. Now create a Pull Request for `release-next` with **`release`** as
|
8. Now create a Pull Request for `release-next` with **`release`** as
|
||||||
the base branch. Instead of the default template, reuse and update
|
the base branch. Instead of the default template, reuse and update
|
||||||
the message from the previous release. Include the following verbiage
|
the message from the previous release. Include the following verbiage
|
||||||
somewhere in the description:
|
somewhere in the description:
|
||||||
|
|
||||||
```
|
```
|
||||||
The base branch is `release`. [All releases (including
|
The base branch is `release`. [All releases (including
|
||||||
betas)](https://github.com/XRPLF/rippled/blob/develop/CONTRIBUTING.md#before-you-start)
|
betas)](https://github.com/XRPLF/rippled/blob/develop/CONTRIBUTING.md#before-you-start)
|
||||||
go in `release`. This PR branch will be pushed directly to `release` (not
|
go in `release`. This PR branch will be pushed directly to `release` (not
|
||||||
squashed or rebased, and not using the GitHub UI).
|
squashed or rebased, and not using the GitHub UI).
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur
|
7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur
|
||||||
offline, but at least one approval will be needed on the PR.
|
offline, but at least one approval will be needed on the PR.
|
||||||
* If issues are discovered during testing, simply abandon the
|
- If issues are discovered during testing, simply abandon the
|
||||||
release. It's easy to start a new release, it should be easy to
|
release. It's easy to start a new release, it should be easy to
|
||||||
abandon one. **DO NOT REUSE THE VERSION NUMBER.** e.g. If you
|
abandon one. **DO NOT REUSE THE VERSION NUMBER.** e.g. If you
|
||||||
abandon 2.4.0-b1, the next attempt will be 2.4.0-b2.
|
abandon 2.4.0-b1, the next attempt will be 2.4.0-b2.
|
||||||
8. Once everything is ready to go, push to `release`.
|
8. Once everything is ready to go, push to `release`.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
@@ -666,23 +678,28 @@ git log -1 --oneline
|
|||||||
# Other branches, including some from upstream-push, may also be
|
# Other branches, including some from upstream-push, may also be
|
||||||
# present.
|
# present.
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Tag the release, too.
|
9. Tag the release, too.
|
||||||
|
|
||||||
```
|
```
|
||||||
git tag <version number>
|
git tag <version number>
|
||||||
git push upstream-push <version number>
|
git push upstream-push <version number>
|
||||||
```
|
```
|
||||||
|
|
||||||
10. Delete the `release-next` branch on the repo. Use the Github UI or:
|
10. Delete the `release-next` branch on the repo. Use the Github UI or:
|
||||||
|
|
||||||
```
|
```
|
||||||
git push --delete upstream-push release-next
|
git push --delete upstream-push release-next
|
||||||
```
|
```
|
||||||
|
|
||||||
11. Finally [create a new release on
|
11. Finally [create a new release on
|
||||||
Github](https://github.com/XRPLF/rippled/releases).
|
Github](https://github.com/XRPLF/rippled/releases).
|
||||||
|
|
||||||
#### Release candidates after the first
|
#### Release candidates after the first
|
||||||
|
|
||||||
Once the first release candidate is [merged into
|
Once the first release candidate is [merged into
|
||||||
release](#making-the-release), then `release` and `develop` *are allowed
|
release](#making-the-release), then `release` and `develop` _are allowed
|
||||||
to diverge*.
|
to diverge_.
|
||||||
|
|
||||||
If a bug or issue is discovered in a version that has a release
|
If a bug or issue is discovered in a version that has a release
|
||||||
candidate being tested, any fix and new version will need to be applied
|
candidate being tested, any fix and new version will need to be applied
|
||||||
@@ -690,7 +707,7 @@ against `release`, then reverse-merged to `develop`. This helps keep git
|
|||||||
history as linear as possible.
|
history as linear as possible.
|
||||||
|
|
||||||
A `release-next` branch will be created from `release`, and any further
|
A `release-next` branch will be created from `release`, and any further
|
||||||
work for that release must be based on `release-next`. Specifically,
|
work for that release must be based on `release-next`. Specifically,
|
||||||
PRs must use `release-next` as the base, and those PRs will be merged
|
PRs must use `release-next` as the base, and those PRs will be merged
|
||||||
directly to `release-next` when approved. Changes should be restricted
|
directly to `release-next` when approved. Changes should be restricted
|
||||||
to bug fixes, but other changes may be necessary from time to time.
|
to bug fixes, but other changes may be necessary from time to time.
|
||||||
@@ -713,17 +730,21 @@ Once the RC is merged and tagged, it needs to be reverse merged into
|
|||||||
1. Create a branch, based on `upstream/develop`.
|
1. Create a branch, based on `upstream/develop`.
|
||||||
The branch name is not important, but could include "mergeNNNrcN".
|
The branch name is not important, but could include "mergeNNNrcN".
|
||||||
E.g. For release A.B.C-rcD, use `mergeABCrcD`.
|
E.g. For release A.B.C-rcD, use `mergeABCrcD`.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
git checkout --no-track -b mergeABCrcD upstream/develop
|
git checkout --no-track -b mergeABCrcD upstream/develop
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Merge `release` into your branch.
|
2. Merge `release` into your branch.
|
||||||
|
|
||||||
```
|
```
|
||||||
# I like the "--edit --log --verbose" parameters, but they are
|
# I like the "--edit --log --verbose" parameters, but they are
|
||||||
# not required.
|
# not required.
|
||||||
git merge upstream/release
|
git merge upstream/release
|
||||||
```
|
```
|
||||||
|
|
||||||
3. `BuildInfo.cpp` will have a conflict with the version number.
|
3. `BuildInfo.cpp` will have a conflict with the version number.
|
||||||
Resolve it with the version from `develop` - the higher version.
|
Resolve it with the version from `develop` - the higher version.
|
||||||
4. Push your branch to your repo (or `upstream` if you have permission),
|
4. Push your branch to your repo (or `upstream` if you have permission),
|
||||||
@@ -731,22 +752,27 @@ git merge upstream/release
|
|||||||
simply indicate that this is a merge of the RC. The "Context" should
|
simply indicate that this is a merge of the RC. The "Context" should
|
||||||
summarize the changes from the RC. Include the following text
|
summarize the changes from the RC. Include the following text
|
||||||
prominently:
|
prominently:
|
||||||
|
|
||||||
```
|
```
|
||||||
This PR must be merged manually using a push. Do not use the Github UI.
|
This PR must be merged manually using a push. Do not use the Github UI.
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Depending on the complexity of the changes, and/or merge conflicts,
|
5. Depending on the complexity of the changes, and/or merge conflicts,
|
||||||
the PR may need a thorough review, or just a sign-off that the
|
the PR may need a thorough review, or just a sign-off that the
|
||||||
merge was done correctly.
|
merge was done correctly.
|
||||||
6. If `develop` is updated before this PR is merged, do not merge
|
6. If `develop` is updated before this PR is merged, do not merge
|
||||||
`develop` back into your branch. Instead rebase preserving merges,
|
`develop` back into your branch. Instead rebase preserving merges,
|
||||||
or do the merge again. (See also the `rerere` git config setting.)
|
or do the merge again. (See also the `rerere` git config setting.)
|
||||||
|
|
||||||
```
|
```
|
||||||
git rebase --rebase-merges upstream/develop
|
git rebase --rebase-merges upstream/develop
|
||||||
# OR
|
# OR
|
||||||
git reset --hard upstream/develop
|
git reset --hard upstream/develop
|
||||||
git merge upstream/release
|
git merge upstream/release
|
||||||
```
|
```
|
||||||
|
|
||||||
7. When the PR is ready, push it to `develop`.
|
7. When the PR is ready, push it to `develop`.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
@@ -757,8 +783,8 @@ git push upstream-push mergeABCrcD:develop
|
|||||||
|
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
```
|
```
|
||||||
Development on `develop` can proceed as normal.
|
|
||||||
|
|
||||||
|
Development on `develop` can proceed as normal.
|
||||||
|
|
||||||
#### Final releases
|
#### Final releases
|
||||||
|
|
||||||
@@ -773,7 +799,7 @@ internally as if they were RCs (at minimum, ensuring unit tests pass,
|
|||||||
and the app starts, syncs, and stops cleanly across all three
|
and the app starts, syncs, and stops cleanly across all three
|
||||||
platforms.)
|
platforms.)
|
||||||
|
|
||||||
*If in doubt, make an RC first.*
|
_If in doubt, make an RC first._
|
||||||
|
|
||||||
The process for building a final release is very similar to [the process
|
The process for building a final release is very similar to [the process
|
||||||
for building a beta](#making-the-release), except the code will be
|
for building a beta](#making-the-release), except the code will be
|
||||||
@@ -785,20 +811,23 @@ moving from `release` to `master` instead of from `develop` to
|
|||||||
number. As above, or using the
|
number. As above, or using the
|
||||||
[update-version] script.
|
[update-version] script.
|
||||||
2. Create a Pull Request for `master-next` with **`master`** as
|
2. Create a Pull Request for `master-next` with **`master`** as
|
||||||
the base branch. Instead of the default template, reuse and update
|
the base branch. Instead of the default template, reuse and update
|
||||||
the message from the previous final release. Include the following verbiage
|
the message from the previous final release. Include the following verbiage
|
||||||
somewhere in the description:
|
somewhere in the description:
|
||||||
|
|
||||||
```
|
```
|
||||||
The base branch is `master`. This PR branch will be pushed directly to
|
The base branch is `master`. This PR branch will be pushed directly to
|
||||||
`release` and `master` (not squashed or rebased, and not using the
|
`release` and `master` (not squashed or rebased, and not using the
|
||||||
GitHub UI).
|
GitHub UI).
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur
|
7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur
|
||||||
offline, but at least one approval will be needed on the PR.
|
offline, but at least one approval will be needed on the PR.
|
||||||
* If issues are discovered during testing, close the PR, delete
|
- If issues are discovered during testing, close the PR, delete
|
||||||
`master-next`, and move development back to `release`, [issuing
|
`master-next`, and move development back to `release`, [issuing
|
||||||
more RCs as necessary](#release-candidates-after-the-first)
|
more RCs as necessary](#release-candidates-after-the-first)
|
||||||
8. Once everything is ready to go, push to `release` and `master`.
|
8. Once everything is ready to go, push to `release` and `master`.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
@@ -821,15 +850,20 @@ git log -1 --oneline
|
|||||||
# Other branches, including some from upstream-push, may also be
|
# Other branches, including some from upstream-push, may also be
|
||||||
# present.
|
# present.
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Tag the release, too.
|
9. Tag the release, too.
|
||||||
|
|
||||||
```
|
```
|
||||||
git tag <version number>
|
git tag <version number>
|
||||||
git push upstream-push <version number>
|
git push upstream-push <version number>
|
||||||
```
|
```
|
||||||
|
|
||||||
10. Delete the `master-next` branch on the repo. Use the Github UI or:
|
10. Delete the `master-next` branch on the repo. Use the Github UI or:
|
||||||
|
|
||||||
```
|
```
|
||||||
git push --delete upstream-push master-next
|
git push --delete upstream-push master-next
|
||||||
```
|
```
|
||||||
|
|
||||||
11. [Create a new release on
|
11. [Create a new release on
|
||||||
Github](https://github.com/XRPLF/rippled/releases). Be sure that
|
Github](https://github.com/XRPLF/rippled/releases). Be sure that
|
||||||
"Set as the latest release" is checked.
|
"Set as the latest release" is checked.
|
||||||
@@ -856,11 +890,13 @@ any branch. When it's ready to merge, jump to step 3 using your branch
|
|||||||
instead of `master-next`.
|
instead of `master-next`.
|
||||||
|
|
||||||
1. Create a `master-next` branch from `master`.
|
1. Create a `master-next` branch from `master`.
|
||||||
|
|
||||||
```
|
```
|
||||||
git checkout --no-track -b master-next upstream/master
|
git checkout --no-track -b master-next upstream/master
|
||||||
git push upstream-push
|
git push upstream-push
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Open any PRs for the pending hotfix using `master-next` as the base,
|
2. Open any PRs for the pending hotfix using `master-next` as the base,
|
||||||
so they can be merged directly in to it. Unlike `develop`, though,
|
so they can be merged directly in to it. Unlike `develop`, though,
|
||||||
`master-next` can be thrown away and recreated if necessary.
|
`master-next` can be thrown away and recreated if necessary.
|
||||||
@@ -868,19 +904,22 @@ git fetch upstreams
|
|||||||
steps as above, or use the
|
steps as above, or use the
|
||||||
[update-version] script.
|
[update-version] script.
|
||||||
4. Create a Pull Request for `master-next` with **`master`** as
|
4. Create a Pull Request for `master-next` with **`master`** as
|
||||||
the base branch. Instead of the default template, reuse and update
|
the base branch. Instead of the default template, reuse and update
|
||||||
the message from the previous final release. Include the following verbiage
|
the message from the previous final release. Include the following verbiage
|
||||||
somewhere in the description:
|
somewhere in the description:
|
||||||
|
|
||||||
```
|
```
|
||||||
The base branch is `master`. This PR branch will be pushed directly to
|
The base branch is `master`. This PR branch will be pushed directly to
|
||||||
`master` (not squashed or rebased, and not using the GitHub UI).
|
`master` (not squashed or rebased, and not using the GitHub UI).
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur
|
7. Sign-offs for the three platforms (Linux, Mac, Windows) usually occur
|
||||||
offline, but at least one approval will be needed on the PR.
|
offline, but at least one approval will be needed on the PR.
|
||||||
* If issues are discovered during testing, update `master-next` as
|
- If issues are discovered during testing, update `master-next` as
|
||||||
needed, but ensure that the changes are properly squashed, and the
|
needed, but ensure that the changes are properly squashed, and the
|
||||||
version setting commit remains last
|
version setting commit remains last
|
||||||
8. Once everything is ready to go, push to `master` **only**.
|
8. Once everything is ready to go, push to `master` **only**.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
@@ -901,15 +940,20 @@ git log -1 --oneline
|
|||||||
# Other branches, including some from upstream-push, may also be
|
# Other branches, including some from upstream-push, may also be
|
||||||
# present.
|
# present.
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Tag the release, too.
|
9. Tag the release, too.
|
||||||
|
|
||||||
```
|
```
|
||||||
git tag <version number>
|
git tag <version number>
|
||||||
git push upstream-push <version number>
|
git push upstream-push <version number>
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Delete the `master-next` branch on the repo.
|
9. Delete the `master-next` branch on the repo.
|
||||||
|
|
||||||
```
|
```
|
||||||
git push --delete upstream-push master-next
|
git push --delete upstream-push master-next
|
||||||
```
|
```
|
||||||
|
|
||||||
10. [Create a new release on
|
10. [Create a new release on
|
||||||
Github](https://github.com/XRPLF/rippled/releases). Be sure that
|
Github](https://github.com/XRPLF/rippled/releases). Be sure that
|
||||||
"Set as the latest release" is checked.
|
"Set as the latest release" is checked.
|
||||||
@@ -921,17 +965,21 @@ Once the hotfix is released, it needs to be reverse merged into
|
|||||||
1. Create a branch in your own repo, based on `upstream/develop`.
|
1. Create a branch in your own repo, based on `upstream/develop`.
|
||||||
The branch name is not important, but could include "mergeNNN".
|
The branch name is not important, but could include "mergeNNN".
|
||||||
E.g. For release 2.2.3, use `merge223`.
|
E.g. For release 2.2.3, use `merge223`.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
git checkout --no-track -b merge223 upstream/develop
|
git checkout --no-track -b merge223 upstream/develop
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Merge master into your branch.
|
2. Merge master into your branch.
|
||||||
|
|
||||||
```
|
```
|
||||||
# I like the "--edit --log --verbose" parameters, but they are
|
# I like the "--edit --log --verbose" parameters, but they are
|
||||||
# not required.
|
# not required.
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
```
|
```
|
||||||
|
|
||||||
3. `BuildInfo.cpp` will have a conflict with the version number.
|
3. `BuildInfo.cpp` will have a conflict with the version number.
|
||||||
Resolve it with the version from `develop` - the higher version.
|
Resolve it with the version from `develop` - the higher version.
|
||||||
4. Push your branch to your repo, and open a normal PR against
|
4. Push your branch to your repo, and open a normal PR against
|
||||||
@@ -939,22 +987,27 @@ git merge upstream/master
|
|||||||
is a merge of the hotfix version. The "Context" should summarize
|
is a merge of the hotfix version. The "Context" should summarize
|
||||||
the changes from the hotfix. Include the following text
|
the changes from the hotfix. Include the following text
|
||||||
prominently:
|
prominently:
|
||||||
|
|
||||||
```
|
```
|
||||||
This PR must be merged manually using a --ff-only merge. Do not use the Github UI.
|
This PR must be merged manually using a --ff-only merge. Do not use the Github UI.
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Depending on the complexity of the hotfix, and/or merge conflicts,
|
5. Depending on the complexity of the hotfix, and/or merge conflicts,
|
||||||
the PR may need a thorough review, or just a sign-off that the
|
the PR may need a thorough review, or just a sign-off that the
|
||||||
merge was done correctly.
|
merge was done correctly.
|
||||||
6. If `develop` is updated before this PR is merged, do not merge
|
6. If `develop` is updated before this PR is merged, do not merge
|
||||||
`develop` back into your branch. Instead rebase preserving merges,
|
`develop` back into your branch. Instead rebase preserving merges,
|
||||||
or do the merge again. (See also the `rerere` git config setting.)
|
or do the merge again. (See also the `rerere` git config setting.)
|
||||||
|
|
||||||
```
|
```
|
||||||
git rebase --rebase-merges upstream/develop
|
git rebase --rebase-merges upstream/develop
|
||||||
# OR
|
# OR
|
||||||
git reset --hard upstream/develop
|
git reset --hard upstream/develop
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
```
|
```
|
||||||
|
|
||||||
7. When the PR is ready, push it to `develop`.
|
7. When the PR is ready, push it to `develop`.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
@@ -963,6 +1016,7 @@ git log --show-signature "upstream/develop..HEAD"
|
|||||||
|
|
||||||
git push upstream-push HEAD:develop
|
git push upstream-push HEAD:develop
|
||||||
```
|
```
|
||||||
|
|
||||||
Development on `develop` can proceed as normal. It is recommended to
|
Development on `develop` can proceed as normal. It is recommended to
|
||||||
create a beta (or RC) immediately to ensure that everything worked as
|
create a beta (or RC) immediately to ensure that everything worked as
|
||||||
expected.
|
expected.
|
||||||
@@ -977,12 +1031,13 @@ a significant fraction of users, which would necessitate a hotfix / point
|
|||||||
release to that version as well as any later versions.
|
release to that version as well as any later versions.
|
||||||
|
|
||||||
This scenario would follow the same basic procedure as above,
|
This scenario would follow the same basic procedure as above,
|
||||||
except that *none* of `develop`, `release`, or `master`
|
except that _none_ of `develop`, `release`, or `master`
|
||||||
would be touched during the release process.
|
would be touched during the release process.
|
||||||
|
|
||||||
In this example, consider if version 2.1.1 needed to be patched.
|
In this example, consider if version 2.1.1 needed to be patched.
|
||||||
|
|
||||||
1. Create two branches in the main (`upstream`) repo.
|
1. Create two branches in the main (`upstream`) repo.
|
||||||
|
|
||||||
```
|
```
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
|
|
||||||
@@ -996,6 +1051,7 @@ git push upstream-push
|
|||||||
|
|
||||||
git fetch upstreams
|
git fetch upstreams
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Work continues as above, except using `master-2.1.2`as
|
2. Work continues as above, except using `master-2.1.2`as
|
||||||
the base branch for any merging, packaging, etc.
|
the base branch for any merging, packaging, etc.
|
||||||
3. After the release is tagged and packages are built, you could
|
3. After the release is tagged and packages are built, you could
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
ISC License
|
ISC License
|
||||||
|
|
||||||
Copyright (c) 2011, Arthur Britto, David Schwartz, Jed McCaleb, Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant.
|
Copyright (c) 2011, Arthur Britto, David Schwartz, Jed McCaleb, Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant.
|
||||||
Copyright (c) 2012-2020, the XRP Ledger developers.
|
Copyright (c) 2012-2020, the XRP Ledger developers.
|
||||||
@@ -14,4 +14,3 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
|||||||
24
README.md
24
README.md
@@ -1,19 +1,23 @@
|
|||||||
|
[](https://codecov.io/gh/XRPLF/rippled)
|
||||||
|
|
||||||
# The XRP Ledger
|
# The XRP Ledger
|
||||||
|
|
||||||
The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator.
|
The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator.
|
||||||
|
|
||||||
## XRP
|
## XRP
|
||||||
|
|
||||||
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
|
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
|
||||||
|
|
||||||
## rippled
|
## rippled
|
||||||
|
|
||||||
The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html).
|
The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html).
|
||||||
|
|
||||||
If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.)
|
If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.)
|
||||||
|
|
||||||
### Build from Source
|
### Build from Source
|
||||||
|
|
||||||
* [Read the build instructions in `BUILD.md`](BUILD.md)
|
- [Read the build instructions in `BUILD.md`](BUILD.md)
|
||||||
* If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues)
|
- If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues)
|
||||||
|
|
||||||
## Key Features of the XRP Ledger
|
## Key Features of the XRP Ledger
|
||||||
|
|
||||||
@@ -33,7 +37,6 @@ If you are interested in running an **API Server** (including a **Full History S
|
|||||||
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
|
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
|
||||||
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
|
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
|
||||||
|
|
||||||
|
|
||||||
## Source Code
|
## Source Code
|
||||||
|
|
||||||
Here are some good places to start learning the source code:
|
Here are some good places to start learning the source code:
|
||||||
@@ -45,7 +48,7 @@ Here are some good places to start learning the source code:
|
|||||||
### Repository Contents
|
### Repository Contents
|
||||||
|
|
||||||
| Folder | Contents |
|
| Folder | Contents |
|
||||||
|:-----------|:-------------------------------------------------|
|
| :--------- | :----------------------------------------------- |
|
||||||
| `./bin` | Scripts and data files for Ripple integrators. |
|
| `./bin` | Scripts and data files for Ripple integrators. |
|
||||||
| `./Builds` | Platform-specific guides for building `rippled`. |
|
| `./Builds` | Platform-specific guides for building `rippled`. |
|
||||||
| `./docs` | Source documentation files and doxygen config. |
|
| `./docs` | Source documentation files and doxygen config. |
|
||||||
@@ -55,15 +58,14 @@ Here are some good places to start learning the source code:
|
|||||||
Some of the directories under `src` are external repositories included using
|
Some of the directories under `src` are external repositories included using
|
||||||
git-subtree. See those directories' README files for more details.
|
git-subtree. See those directories' README files for more details.
|
||||||
|
|
||||||
|
|
||||||
## Additional Documentation
|
## Additional Documentation
|
||||||
|
|
||||||
* [XRP Ledger Dev Portal](https://xrpl.org/)
|
- [XRP Ledger Dev Portal](https://xrpl.org/)
|
||||||
* [Setup and Installation](https://xrpl.org/install-rippled.html)
|
- [Setup and Installation](https://xrpl.org/install-rippled.html)
|
||||||
* [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/)
|
- [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/)
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
|
||||||
* [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio)
|
- [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio)
|
||||||
* [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server)
|
- [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server)
|
||||||
* [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi)
|
- [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi)
|
||||||
|
|||||||
4817
RELEASENOTES.md
4817
RELEASENOTES.md
File diff suppressed because it is too large
Load Diff
14
SECURITY.md
14
SECURITY.md
@@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
For more details on operating an XRP Ledger server securely, please visit https://xrpl.org/manage-the-rippled-server.html.
|
For more details on operating an XRP Ledger server securely, please visit https://xrpl.org/manage-the-rippled-server.html.
|
||||||
|
|
||||||
|
|
||||||
# Security Policy
|
# Security Policy
|
||||||
|
|
||||||
## Supported Versions
|
## Supported Versions
|
||||||
@@ -77,13 +76,14 @@ The amount paid varies dramatically. Vulnerabilities that are harmless on their
|
|||||||
|
|
||||||
To report a qualifying bug, please send a detailed report to:
|
To report a qualifying bug, please send a detailed report to:
|
||||||
|
|
||||||
|Email Address|bugs@ripple.com |
|
| Email Address | bugs@ripple.com |
|
||||||
|:-----------:|:----------------------------------------------------|
|
| :-----------: | :-------------------------------------------------- |
|
||||||
|Short Key ID | `0xC57929BE` |
|
| Short Key ID | `0xC57929BE` |
|
||||||
|Long Key ID | `0xCD49A0AFC57929BE` |
|
| Long Key ID | `0xCD49A0AFC57929BE` |
|
||||||
|Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` |
|
| Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` |
|
||||||
|
|
||||||
|
The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is:
|
||||||
|
|
||||||
The full PGP key for this address, which is also available on several key servers (e.g. on [keys.gnupg.net](https://keys.gnupg.net)), is:
|
|
||||||
```
|
```
|
||||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt
|
mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt
|
||||||
|
|||||||
470
bin/browser.js
470
bin/browser.js
@@ -1,470 +0,0 @@
|
|||||||
#!/usr/bin/node
|
|
||||||
//
|
|
||||||
// ledger?l=L
|
|
||||||
// transaction?h=H
|
|
||||||
// ledger_entry?l=L&h=H
|
|
||||||
// account?l=L&a=A
|
|
||||||
// directory?l=L&dir_root=H&i=I
|
|
||||||
// directory?l=L&o=A&i=I // owner directory
|
|
||||||
// offer?l=L&offer=H
|
|
||||||
// offer?l=L&account=A&i=I
|
|
||||||
// ripple_state=l=L&a=A&b=A&c=C
|
|
||||||
// account_lines?l=L&a=A
|
|
||||||
//
|
|
||||||
// A=address
|
|
||||||
// C=currency 3 letter code
|
|
||||||
// H=hash
|
|
||||||
// I=index
|
|
||||||
// L=current | closed | validated | index | hash
|
|
||||||
//
|
|
||||||
|
|
||||||
var async = require("async");
|
|
||||||
var extend = require("extend");
|
|
||||||
var http = require("http");
|
|
||||||
var url = require("url");
|
|
||||||
|
|
||||||
var Remote = require("ripple-lib").Remote;
|
|
||||||
|
|
||||||
var program = process.argv[1];
|
|
||||||
|
|
||||||
var httpd_response = function (res, opts) {
|
|
||||||
var self=this;
|
|
||||||
|
|
||||||
res.statusCode = opts.statusCode;
|
|
||||||
res.end(
|
|
||||||
"<HTML>"
|
|
||||||
+ "<HEAD><TITLE>Title</TITLE></HEAD>"
|
|
||||||
+ "<BODY BACKGROUND=\"#FFFFFF\">"
|
|
||||||
+ "State:" + self.state
|
|
||||||
+ "<UL>"
|
|
||||||
+ "<LI><A HREF=\"/\">home</A>"
|
|
||||||
+ "<LI>" + html_link('r4EM4gBQfr1QgQLXSPF4r7h84qE9mb6iCC')
|
|
||||||
// + "<LI><A HREF=\""+test+"\">rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh</A>"
|
|
||||||
+ "<LI><A HREF=\"/ledger\">ledger</A>"
|
|
||||||
+ "</UL>"
|
|
||||||
+ (opts.body || '')
|
|
||||||
+ '<HR><PRE>'
|
|
||||||
+ (opts.url || '')
|
|
||||||
+ '</PRE>'
|
|
||||||
+ "</BODY>"
|
|
||||||
+ "</HTML>"
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
var html_link = function (generic) {
|
|
||||||
return '<A HREF="' + build_uri({ type: 'account', account: generic}) + '">' + generic + '</A>';
|
|
||||||
};
|
|
||||||
|
|
||||||
// Build a link to a type.
|
|
||||||
var build_uri = function (params, opts) {
|
|
||||||
var c;
|
|
||||||
|
|
||||||
if (params.type === 'account') {
|
|
||||||
c = {
|
|
||||||
pathname: 'account',
|
|
||||||
query: {
|
|
||||||
l: params.ledger,
|
|
||||||
a: params.account,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
} else if (params.type === 'ledger') {
|
|
||||||
c = {
|
|
||||||
pathname: 'ledger',
|
|
||||||
query: {
|
|
||||||
l: params.ledger,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
} else if (params.type === 'transaction') {
|
|
||||||
c = {
|
|
||||||
pathname: 'transaction',
|
|
||||||
query: {
|
|
||||||
h: params.hash,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
c = {};
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = opts || {};
|
|
||||||
|
|
||||||
c.protocol = "http";
|
|
||||||
c.hostname = opts.hostname || self.base.hostname;
|
|
||||||
c.port = opts.port || self.base.port;
|
|
||||||
|
|
||||||
return url.format(c);
|
|
||||||
};
|
|
||||||
|
|
||||||
var build_link = function (item, link) {
|
|
||||||
console.log(link);
|
|
||||||
return "<A HREF=" + link + ">" + item + "</A>";
|
|
||||||
};
|
|
||||||
|
|
||||||
var rewrite_field = function (type, obj, field, opts) {
|
|
||||||
if (field in obj) {
|
|
||||||
obj[field] = rewrite_type(type, obj[field], opts);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
var rewrite_type = function (type, obj, opts) {
|
|
||||||
if ('amount' === type) {
|
|
||||||
if ('string' === typeof obj) {
|
|
||||||
// XRP.
|
|
||||||
return '<B>' + obj + '</B>';
|
|
||||||
|
|
||||||
} else {
|
|
||||||
rewrite_field('address', obj, 'issuer', opts);
|
|
||||||
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
return build_link(
|
|
||||||
obj,
|
|
||||||
build_uri({
|
|
||||||
type: 'account',
|
|
||||||
account: obj
|
|
||||||
}, opts)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if ('address' === type) {
|
|
||||||
return build_link(
|
|
||||||
obj,
|
|
||||||
build_uri({
|
|
||||||
type: 'account',
|
|
||||||
account: obj
|
|
||||||
}, opts)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
else if ('ledger' === type) {
|
|
||||||
return build_link(
|
|
||||||
obj,
|
|
||||||
build_uri({
|
|
||||||
type: 'ledger',
|
|
||||||
ledger: obj,
|
|
||||||
}, opts)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
else if ('node' === type) {
|
|
||||||
// A node
|
|
||||||
if ('PreviousTxnID' in obj)
|
|
||||||
obj.PreviousTxnID = rewrite_type('transaction', obj.PreviousTxnID, opts);
|
|
||||||
|
|
||||||
if ('Offer' === obj.LedgerEntryType) {
|
|
||||||
if ('NewFields' in obj) {
|
|
||||||
if ('TakerGets' in obj.NewFields)
|
|
||||||
obj.NewFields.TakerGets = rewrite_type('amount', obj.NewFields.TakerGets, opts);
|
|
||||||
|
|
||||||
if ('TakerPays' in obj.NewFields)
|
|
||||||
obj.NewFields.TakerPays = rewrite_type('amount', obj.NewFields.TakerPays, opts);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.LedgerEntryType = '<B>' + obj.LedgerEntryType + '</B>';
|
|
||||||
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
else if ('transaction' === type) {
|
|
||||||
// Reference to a transaction.
|
|
||||||
return build_link(
|
|
||||||
obj,
|
|
||||||
build_uri({
|
|
||||||
type: 'transaction',
|
|
||||||
hash: obj
|
|
||||||
}, opts)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 'ERROR: ' + type;
|
|
||||||
};
|
|
||||||
|
|
||||||
var rewrite_object = function (obj, opts) {
|
|
||||||
var out = extend({}, obj);
|
|
||||||
|
|
||||||
rewrite_field('address', out, 'Account', opts);
|
|
||||||
|
|
||||||
rewrite_field('ledger', out, 'parent_hash', opts);
|
|
||||||
rewrite_field('ledger', out, 'ledger_index', opts);
|
|
||||||
rewrite_field('ledger', out, 'ledger_current_index', opts);
|
|
||||||
rewrite_field('ledger', out, 'ledger_hash', opts);
|
|
||||||
|
|
||||||
if ('ledger' in obj) {
|
|
||||||
// It's a ledger header.
|
|
||||||
out.ledger = rewrite_object(out.ledger, opts);
|
|
||||||
|
|
||||||
if ('ledger_hash' in out.ledger)
|
|
||||||
out.ledger.ledger_hash = '<B>' + out.ledger.ledger_hash + '</B>';
|
|
||||||
|
|
||||||
delete out.ledger.hash;
|
|
||||||
delete out.ledger.totalCoins;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ('TransactionType' in obj) {
|
|
||||||
// It's a transaction.
|
|
||||||
out.TransactionType = '<B>' + obj.TransactionType + '</B>';
|
|
||||||
|
|
||||||
rewrite_field('amount', out, 'TakerGets', opts);
|
|
||||||
rewrite_field('amount', out, 'TakerPays', opts);
|
|
||||||
rewrite_field('ledger', out, 'inLedger', opts);
|
|
||||||
|
|
||||||
out.meta.AffectedNodes = out.meta.AffectedNodes.map(function (node) {
|
|
||||||
var kind = 'CreatedNode' in node
|
|
||||||
? 'CreatedNode'
|
|
||||||
: 'ModifiedNode' in node
|
|
||||||
? 'ModifiedNode'
|
|
||||||
: 'DeletedNode' in node
|
|
||||||
? 'DeletedNode'
|
|
||||||
: undefined;
|
|
||||||
|
|
||||||
if (kind) {
|
|
||||||
node[kind] = rewrite_type('node', node[kind], opts);
|
|
||||||
}
|
|
||||||
return node;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
else if ('node' in obj && 'LedgerEntryType' in obj.node) {
|
|
||||||
// Its a ledger entry.
|
|
||||||
|
|
||||||
if (obj.node.LedgerEntryType === 'AccountRoot') {
|
|
||||||
rewrite_field('address', out.node, 'Account', opts);
|
|
||||||
rewrite_field('transaction', out.node, 'PreviousTxnID', opts);
|
|
||||||
rewrite_field('ledger', out.node, 'PreviousTxnLgrSeq', opts);
|
|
||||||
}
|
|
||||||
|
|
||||||
out.node.LedgerEntryType = '<B>' + out.node.LedgerEntryType + '</B>';
|
|
||||||
}
|
|
||||||
|
|
||||||
return out;
|
|
||||||
};
|
|
||||||
|
|
||||||
var augment_object = function (obj, opts, done) {
|
|
||||||
if (obj.node.LedgerEntryType == 'AccountRoot') {
|
|
||||||
var tx_hash = obj.node.PreviousTxnID;
|
|
||||||
var tx_ledger = obj.node.PreviousTxnLgrSeq;
|
|
||||||
|
|
||||||
obj.history = [];
|
|
||||||
|
|
||||||
async.whilst(
|
|
||||||
function () { return tx_hash; },
|
|
||||||
function (callback) {
|
|
||||||
// console.log("augment_object: request: %s %s", tx_hash, tx_ledger);
|
|
||||||
opts.remote.request_tx(tx_hash)
|
|
||||||
.on('success', function (m) {
|
|
||||||
tx_hash = undefined;
|
|
||||||
tx_ledger = undefined;
|
|
||||||
|
|
||||||
//console.log("augment_object: ", JSON.stringify(m));
|
|
||||||
m.meta.AffectedNodes.filter(function(n) {
|
|
||||||
// console.log("augment_object: ", JSON.stringify(n));
|
|
||||||
// if (n.ModifiedNode)
|
|
||||||
// console.log("augment_object: %s %s %s %s %s %s/%s", 'ModifiedNode' in n, n.ModifiedNode && (n.ModifiedNode.LedgerEntryType === 'AccountRoot'), n.ModifiedNode && n.ModifiedNode.FinalFields && (n.ModifiedNode.FinalFields.Account === obj.node.Account), Object.keys(n)[0], n.ModifiedNode && (n.ModifiedNode.LedgerEntryType), obj.node.Account, n.ModifiedNode && n.ModifiedNode.FinalFields && n.ModifiedNode.FinalFields.Account);
|
|
||||||
// if ('ModifiedNode' in n && n.ModifiedNode.LedgerEntryType === 'AccountRoot')
|
|
||||||
// {
|
|
||||||
// console.log("***: ", JSON.stringify(m));
|
|
||||||
// console.log("***: ", JSON.stringify(n));
|
|
||||||
// }
|
|
||||||
return 'ModifiedNode' in n
|
|
||||||
&& n.ModifiedNode.LedgerEntryType === 'AccountRoot'
|
|
||||||
&& n.ModifiedNode.FinalFields
|
|
||||||
&& n.ModifiedNode.FinalFields.Account === obj.node.Account;
|
|
||||||
})
|
|
||||||
.forEach(function (n) {
|
|
||||||
tx_hash = n.ModifiedNode.PreviousTxnID;
|
|
||||||
tx_ledger = n.ModifiedNode.PreviousTxnLgrSeq;
|
|
||||||
|
|
||||||
obj.history.push({
|
|
||||||
tx_hash: tx_hash,
|
|
||||||
tx_ledger: tx_ledger
|
|
||||||
});
|
|
||||||
console.log("augment_object: next: %s %s", tx_hash, tx_ledger);
|
|
||||||
});
|
|
||||||
|
|
||||||
callback();
|
|
||||||
})
|
|
||||||
.on('error', function (m) {
|
|
||||||
callback(m);
|
|
||||||
})
|
|
||||||
.request();
|
|
||||||
},
|
|
||||||
function (err) {
|
|
||||||
if (err) {
|
|
||||||
done();
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
async.forEach(obj.history, function (o, callback) {
|
|
||||||
opts.remote.request_account_info(obj.node.Account)
|
|
||||||
.ledger_index(o.tx_ledger)
|
|
||||||
.on('success', function (m) {
|
|
||||||
//console.log("augment_object: ", JSON.stringify(m));
|
|
||||||
o.Balance = m.account_data.Balance;
|
|
||||||
// o.account_data = m.account_data;
|
|
||||||
callback();
|
|
||||||
})
|
|
||||||
.on('error', function (m) {
|
|
||||||
o.error = m;
|
|
||||||
callback();
|
|
||||||
})
|
|
||||||
.request();
|
|
||||||
},
|
|
||||||
function (err) {
|
|
||||||
done(err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
done();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if (process.argv.length < 4 || process.argv.length > 7) {
|
|
||||||
console.log("Usage: %s ws_ip ws_port [<ip> [<port> [<start>]]]", program);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
var ws_ip = process.argv[2];
|
|
||||||
var ws_port = process.argv[3];
|
|
||||||
var ip = process.argv.length > 4 ? process.argv[4] : "127.0.0.1";
|
|
||||||
var port = process.argv.length > 5 ? process.argv[5] : "8080";
|
|
||||||
|
|
||||||
// console.log("START");
|
|
||||||
var self = this;
|
|
||||||
|
|
||||||
var remote = (new Remote({
|
|
||||||
websocket_ip: ws_ip,
|
|
||||||
websocket_port: ws_port,
|
|
||||||
trace: false
|
|
||||||
}))
|
|
||||||
.on('state', function (m) {
|
|
||||||
console.log("STATE: %s", m);
|
|
||||||
|
|
||||||
self.state = m;
|
|
||||||
})
|
|
||||||
// .once('ledger_closed', callback)
|
|
||||||
.connect()
|
|
||||||
;
|
|
||||||
|
|
||||||
self.base = {
|
|
||||||
hostname: ip,
|
|
||||||
port: port,
|
|
||||||
remote: remote,
|
|
||||||
};
|
|
||||||
|
|
||||||
// console.log("SERVE");
|
|
||||||
var server = http.createServer(function (req, res) {
|
|
||||||
var input = "";
|
|
||||||
|
|
||||||
req.setEncoding();
|
|
||||||
|
|
||||||
req.on('data', function (buffer) {
|
|
||||||
// console.log("DATA: %s", buffer);
|
|
||||||
input = input + buffer;
|
|
||||||
});
|
|
||||||
|
|
||||||
req.on('end', function () {
|
|
||||||
// console.log("URL: %s", req.url);
|
|
||||||
// console.log("HEADERS: %s", JSON.stringify(req.headers, undefined, 2));
|
|
||||||
|
|
||||||
var _parsed = url.parse(req.url, true);
|
|
||||||
var _url = JSON.stringify(_parsed, undefined, 2);
|
|
||||||
|
|
||||||
// console.log("HEADERS: %s", JSON.stringify(_parsed, undefined, 2));
|
|
||||||
if (_parsed.pathname === "/account") {
|
|
||||||
var request = remote
|
|
||||||
.request_ledger_entry('account_root')
|
|
||||||
.ledger_index(-1)
|
|
||||||
.account_root(_parsed.query.a)
|
|
||||||
.on('success', function (m) {
|
|
||||||
// console.log("account_root: %s", JSON.stringify(m, undefined, 2));
|
|
||||||
|
|
||||||
augment_object(m, self.base, function() {
|
|
||||||
httpd_response(res,
|
|
||||||
{
|
|
||||||
statusCode: 200,
|
|
||||||
url: _url,
|
|
||||||
body: "<PRE>"
|
|
||||||
+ JSON.stringify(rewrite_object(m, self.base), undefined, 2)
|
|
||||||
+ "</PRE>"
|
|
||||||
});
|
|
||||||
});
|
|
||||||
})
|
|
||||||
.request();
|
|
||||||
|
|
||||||
} else if (_parsed.pathname === "/ledger") {
|
|
||||||
var request = remote
|
|
||||||
.request_ledger(undefined, { expand: true, transactions: true })
|
|
||||||
.on('success', function (m) {
|
|
||||||
// console.log("Ledger: %s", JSON.stringify(m, undefined, 2));
|
|
||||||
|
|
||||||
httpd_response(res,
|
|
||||||
{
|
|
||||||
statusCode: 200,
|
|
||||||
url: _url,
|
|
||||||
body: "<PRE>"
|
|
||||||
+ JSON.stringify(rewrite_object(m, self.base), undefined, 2)
|
|
||||||
+"</PRE>"
|
|
||||||
});
|
|
||||||
})
|
|
||||||
|
|
||||||
if (_parsed.query.l && _parsed.query.l.length === 64) {
|
|
||||||
request.ledger_hash(_parsed.query.l);
|
|
||||||
}
|
|
||||||
else if (_parsed.query.l) {
|
|
||||||
request.ledger_index(Number(_parsed.query.l));
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
request.ledger_index(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
request.request();
|
|
||||||
|
|
||||||
} else if (_parsed.pathname === "/transaction") {
|
|
||||||
var request = remote
|
|
||||||
.request_tx(_parsed.query.h)
|
|
||||||
// .request_transaction_entry(_parsed.query.h)
|
|
||||||
// .ledger_select(_parsed.query.l)
|
|
||||||
.on('success', function (m) {
|
|
||||||
// console.log("transaction: %s", JSON.stringify(m, undefined, 2));
|
|
||||||
|
|
||||||
httpd_response(res,
|
|
||||||
{
|
|
||||||
statusCode: 200,
|
|
||||||
url: _url,
|
|
||||||
body: "<PRE>"
|
|
||||||
+ JSON.stringify(rewrite_object(m, self.base), undefined, 2)
|
|
||||||
+"</PRE>"
|
|
||||||
});
|
|
||||||
})
|
|
||||||
.on('error', function (m) {
|
|
||||||
httpd_response(res,
|
|
||||||
{
|
|
||||||
statusCode: 200,
|
|
||||||
url: _url,
|
|
||||||
body: "<PRE>"
|
|
||||||
+ 'ERROR: ' + JSON.stringify(m, undefined, 2)
|
|
||||||
+"</PRE>"
|
|
||||||
});
|
|
||||||
})
|
|
||||||
.request();
|
|
||||||
|
|
||||||
} else {
|
|
||||||
var test = build_uri({
|
|
||||||
type: 'account',
|
|
||||||
ledger: 'closed',
|
|
||||||
account: 'rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh',
|
|
||||||
}, self.base);
|
|
||||||
|
|
||||||
httpd_response(res,
|
|
||||||
{
|
|
||||||
statusCode: req.url === "/" ? 200 : 404,
|
|
||||||
url: _url,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
server.listen(port, ip, undefined,
|
|
||||||
function () {
|
|
||||||
console.log("Listening at: http://%s:%s", ip, port);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim:sw=2:sts=2:ts=8:et
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
var ripple = require('ripple-lib');
|
|
||||||
|
|
||||||
var v = {
|
|
||||||
seed: "snoPBrXtMeMyMHUVTgbuqAfg1SUTb",
|
|
||||||
addr: "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"
|
|
||||||
};
|
|
||||||
|
|
||||||
var remote = ripple.Remote.from_config({
|
|
||||||
"trusted" : true,
|
|
||||||
"websocket_ip" : "127.0.0.1",
|
|
||||||
"websocket_port" : 5006,
|
|
||||||
"websocket_ssl" : false,
|
|
||||||
"local_signing" : true
|
|
||||||
});
|
|
||||||
|
|
||||||
var tx_json = {
|
|
||||||
"Account" : v.addr,
|
|
||||||
"Amount" : "10000000",
|
|
||||||
"Destination" : "rEu2ULPiEQm1BAL8pYzmXnNX1aFX9sCks",
|
|
||||||
"Fee" : "10",
|
|
||||||
"Flags" : 0,
|
|
||||||
"Sequence" : 3,
|
|
||||||
"TransactionType" : "Payment"
|
|
||||||
|
|
||||||
//"SigningPubKey": '0396941B22791A448E5877A44CE98434DB217D6FB97D63F0DAD23BE49ED45173C9'
|
|
||||||
};
|
|
||||||
|
|
||||||
remote.on('connected', function () {
|
|
||||||
var req = remote.request_sign(v.seed, tx_json);
|
|
||||||
req.message.debug_signing = true;
|
|
||||||
req.on('success', function (result) {
|
|
||||||
console.log("SERVER RESULT");
|
|
||||||
console.log(result);
|
|
||||||
|
|
||||||
var sim = {};
|
|
||||||
var tx = remote.transaction();
|
|
||||||
tx.tx_json = tx_json;
|
|
||||||
tx._secret = v.seed;
|
|
||||||
tx.complete();
|
|
||||||
var unsigned = tx.serialize().to_hex();
|
|
||||||
tx.sign();
|
|
||||||
|
|
||||||
sim.tx_blob = tx.serialize().to_hex();
|
|
||||||
sim.tx_json = tx.tx_json;
|
|
||||||
sim.tx_signing_hash = tx.signing_hash().to_hex();
|
|
||||||
sim.tx_unsigned = unsigned;
|
|
||||||
|
|
||||||
console.log("\nLOCAL RESULT");
|
|
||||||
console.log(sim);
|
|
||||||
|
|
||||||
remote.connect(false);
|
|
||||||
});
|
|
||||||
req.on('error', function (err) {
|
|
||||||
if (err.error === "remoteError" && err.remote.error === "srcActNotFound") {
|
|
||||||
console.log("Please fund account "+v.addr+" to run this test.");
|
|
||||||
} else {
|
|
||||||
console.log('error', err);
|
|
||||||
}
|
|
||||||
remote.connect(false);
|
|
||||||
});
|
|
||||||
req.request();
|
|
||||||
|
|
||||||
});
|
|
||||||
remote.connect();
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
#!/usr/bin/node
|
|
||||||
//
|
|
||||||
// Returns a Gravatar style hash as per: http://en.gravatar.com/site/implement/hash/
|
|
||||||
//
|
|
||||||
|
|
||||||
if (3 != process.argv.length) {
|
|
||||||
process.stderr.write("Usage: " + process.argv[1] + " email_address\n\nReturns gravatar style hash.\n");
|
|
||||||
process.exit(1);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
var md5 = require('crypto').createHash('md5');
|
|
||||||
|
|
||||||
md5.update(process.argv[2].trim().toLowerCase());
|
|
||||||
|
|
||||||
process.stdout.write(md5.digest('hex') + "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim:sw=2:sts=2:ts=8:et
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
#!/usr/bin/node
|
|
||||||
//
|
|
||||||
// This program allows IE 9 ripple-clients to make websocket connections to
|
|
||||||
// rippled using flash. As IE 9 does not have websocket support, this required
|
|
||||||
// if you wish to support IE 9 ripple-clients.
|
|
||||||
//
|
|
||||||
// http://www.lightsphere.com/dev/articles/flash_socket_policy.html
|
|
||||||
//
|
|
||||||
// For better security, be sure to set the Port below to the port of your
|
|
||||||
// [websocket_public_port].
|
|
||||||
//
|
|
||||||
|
|
||||||
var net = require("net"),
|
|
||||||
port = "*",
|
|
||||||
domains = ["*:"+port]; // Domain:Port
|
|
||||||
|
|
||||||
net.createServer(
|
|
||||||
function(socket) {
|
|
||||||
socket.write("<?xml version='1.0' ?>\n");
|
|
||||||
socket.write("<!DOCTYPE cross-domain-policy SYSTEM 'http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd'>\n");
|
|
||||||
socket.write("<cross-domain-policy>\n");
|
|
||||||
domains.forEach(
|
|
||||||
function(domain) {
|
|
||||||
var parts = domain.split(':');
|
|
||||||
socket.write("\t<allow-access-from domain='" + parts[0] + "' to-ports='" + parts[1] + "' />\n");
|
|
||||||
}
|
|
||||||
);
|
|
||||||
socket.write("</cross-domain-policy>\n");
|
|
||||||
socket.end();
|
|
||||||
}
|
|
||||||
).listen(843);
|
|
||||||
@@ -1,150 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# This script generates information about your rippled installation
|
|
||||||
# and system. It can be used to help debug issues that you may face
|
|
||||||
# in your installation. While this script endeavors to not display any
|
|
||||||
# sensitive information, it is recommended that you read the output
|
|
||||||
# before sharing with any third parties.
|
|
||||||
|
|
||||||
|
|
||||||
rippled_exe=/opt/ripple/bin/rippled
|
|
||||||
conf_file=/etc/opt/ripple/rippled.cfg
|
|
||||||
|
|
||||||
while getopts ":e:c:" opt; do
|
|
||||||
case $opt in
|
|
||||||
e)
|
|
||||||
rippled_exe=${OPTARG}
|
|
||||||
;;
|
|
||||||
c)
|
|
||||||
conf_file=${OPTARG}
|
|
||||||
;;
|
|
||||||
\?)
|
|
||||||
echo "Invalid option: -$OPTARG"
|
|
||||||
exit -1
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
tmp_loc=$(mktemp -d --tmpdir ripple_info.XXXXX)
|
|
||||||
chmod 751 ${tmp_loc}
|
|
||||||
awk_prog=${tmp_loc}/cfg.awk
|
|
||||||
summary_out=${tmp_loc}/rippled_info.md
|
|
||||||
printf "# rippled report info\n\n> generated at %s\n" "$(date -R)" > ${summary_out}
|
|
||||||
|
|
||||||
function log_section {
|
|
||||||
printf "\n## %s\n" "$*" >> ${summary_out}
|
|
||||||
|
|
||||||
while read -r l; do
|
|
||||||
echo " $l" >> ${summary_out}
|
|
||||||
done </dev/stdin
|
|
||||||
}
|
|
||||||
|
|
||||||
function join_by {
|
|
||||||
local IFS="$1"; shift; echo "$*";
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ -f ${conf_file} ]] ; then
|
|
||||||
exclude=( ips ips_fixed node_seed validation_seed validator_token )
|
|
||||||
cleaned_conf=${tmp_loc}/cleaned_rippled_cfg.txt
|
|
||||||
cat << 'EOP' >> ${awk_prog}
|
|
||||||
BEGIN {FS="[[:space:]]*=[[:space:]]*"; skip=0; db_path=""; print > OUT_FILE; split(exl,exa,"|")}
|
|
||||||
/^#/ {next}
|
|
||||||
save==2 && /^[[:space:]]*$/ {next}
|
|
||||||
/^\[.+\]$/ {
|
|
||||||
section=tolower(gensub(/^\[[[:space:]]*([a-zA-Z_]+)[[:space:]]*\]$/, "\\1", "g"))
|
|
||||||
skip = 0
|
|
||||||
for (i in exa) {
|
|
||||||
if (section == exa[i])
|
|
||||||
skip = 1
|
|
||||||
}
|
|
||||||
if (section == "database_path")
|
|
||||||
save = 1
|
|
||||||
}
|
|
||||||
skip==1 {next}
|
|
||||||
save==2 {save=0; db_path=$0}
|
|
||||||
save==1 {save=2}
|
|
||||||
$1 ~ /password/ {$0=$1"=<redacted>"}
|
|
||||||
{print >> OUT_FILE}
|
|
||||||
END {print db_path}
|
|
||||||
EOP
|
|
||||||
|
|
||||||
db=$(\
|
|
||||||
sed -r -e 's/\<s[[:alnum:]]{28}\>/<redactedsecret>/g;s/^[[:space:]]*//;s/[[:space:]]*$//' ${conf_file} |\
|
|
||||||
awk -v OUT_FILE=${cleaned_conf} -v exl="$(join_by '|' "${exclude[@]}")" -f ${awk_prog})
|
|
||||||
rm ${awk_prog}
|
|
||||||
cat ${cleaned_conf} | log_section "cleaned config file"
|
|
||||||
rm ${cleaned_conf}
|
|
||||||
echo "${db}" | log_section "database path"
|
|
||||||
df ${db} | log_section "df: database"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Send output from this script to a log file
|
|
||||||
## this captures any messages
|
|
||||||
## or errors from the script itself
|
|
||||||
|
|
||||||
log_file=${tmp_loc}/get_info.log
|
|
||||||
exec 3>&1 1>>${log_file} 2>&1
|
|
||||||
|
|
||||||
## Send all stdout files to /tmp
|
|
||||||
|
|
||||||
if [[ -x ${rippled_exe} ]] ; then
|
|
||||||
pgrep rippled && \
|
|
||||||
${rippled_exe} --conf ${conf_file} \
|
|
||||||
-- server_info | log_section "server info"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat /proc/meminfo | log_section "meminfo"
|
|
||||||
cat /proc/swaps | log_section "swap space"
|
|
||||||
ulimit -a | log_section "ulimit"
|
|
||||||
|
|
||||||
if command -v lshw >/dev/null 2>&1 ; then
|
|
||||||
lshw 2>/dev/null | log_section "hardware info"
|
|
||||||
else
|
|
||||||
lscpu > ${tmp_loc}/hw_info.txt
|
|
||||||
hwinfo >> ${tmp_loc}/hw_info.txt
|
|
||||||
lspci >> ${tmp_loc}/hw_info.txt
|
|
||||||
lsblk >> ${tmp_loc}/hw_info.txt
|
|
||||||
cat ${tmp_loc}/hw_info.txt | log_section "hardware info"
|
|
||||||
rm ${tmp_loc}/hw_info.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
if command -v iostat >/dev/null 2>&1 ; then
|
|
||||||
iostat -t -d -x 2 6 | log_section "iostat"
|
|
||||||
fi
|
|
||||||
|
|
||||||
df -h | log_section "free disk space"
|
|
||||||
drives=($(df | awk '$1 ~ /^\/dev\// {print $1}' | xargs -n 1 basename))
|
|
||||||
block_devs=($(ls /sys/block/))
|
|
||||||
for d in "${drives[@]}"; do
|
|
||||||
for dev in "${block_devs[@]}"; do
|
|
||||||
#echo "D: [$d], DEV: [$dev]"
|
|
||||||
if [[ $d =~ $dev ]]; then
|
|
||||||
# this file (if exists) has 0 for SSD and 1 for HDD
|
|
||||||
if [[ "$(cat /sys/block/${dev}/queue/rotational 2>/dev/null)" == 0 ]] ; then
|
|
||||||
echo "${d} : SSD" >> ${tmp_loc}/is_ssd.txt
|
|
||||||
else
|
|
||||||
echo "${d} : NO SSD" >> ${tmp_loc}/is_ssd.txt
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ -f ${tmp_loc}/is_ssd.txt ]] ; then
|
|
||||||
cat ${tmp_loc}/is_ssd.txt | log_section "SSD"
|
|
||||||
rm ${tmp_loc}/is_ssd.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat ${log_file} | log_section "script log"
|
|
||||||
|
|
||||||
cat << MSG | tee /dev/fd/3
|
|
||||||
####################################################
|
|
||||||
rippled info has been gathered. Please copy the
|
|
||||||
contents of ${summary_out}
|
|
||||||
to a github gist at https://gist.github.com/
|
|
||||||
|
|
||||||
PLEASE REVIEW THIS FILE FOR ANY SENSITIVE DATA
|
|
||||||
BEFORE POSTING! We have tried our best to omit
|
|
||||||
any sensitive information from this file, but you
|
|
||||||
should verify before posting.
|
|
||||||
####################################################
|
|
||||||
MSG
|
|
||||||
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
#!/usr/bin/node
|
|
||||||
//
|
|
||||||
// Returns hex of lowercasing a string.
|
|
||||||
//
|
|
||||||
|
|
||||||
var stringToHex = function (s) {
|
|
||||||
return Array.prototype.map.call(s, function (c) {
|
|
||||||
var b = c.charCodeAt(0);
|
|
||||||
|
|
||||||
return b < 16 ? "0" + b.toString(16) : b.toString(16);
|
|
||||||
}).join("");
|
|
||||||
};
|
|
||||||
|
|
||||||
if (3 != process.argv.length) {
|
|
||||||
process.stderr.write("Usage: " + process.argv[1] + " string\n\nReturns hex of lowercasing string.\n");
|
|
||||||
process.exit(1);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
process.stdout.write(stringToHex(process.argv[2].toLowerCase()) + "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim:sw=2:sts=2:ts=8:et
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
#!/usr/bin/node
|
|
||||||
//
|
|
||||||
// This is a tool to issue JSON-RPC requests from the command line.
|
|
||||||
//
|
|
||||||
// This can be used to test a JSON-RPC server.
|
|
||||||
//
|
|
||||||
// Requires: npm simple-jsonrpc
|
|
||||||
//
|
|
||||||
|
|
||||||
var jsonrpc = require('simple-jsonrpc');
|
|
||||||
|
|
||||||
var program = process.argv[1];
|
|
||||||
|
|
||||||
if (5 !== process.argv.length) {
|
|
||||||
console.log("Usage: %s <URL> <method> <json>", program);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
var url = process.argv[2];
|
|
||||||
var method = process.argv[3];
|
|
||||||
var json_raw = process.argv[4];
|
|
||||||
var json;
|
|
||||||
|
|
||||||
try {
|
|
||||||
json = JSON.parse(json_raw);
|
|
||||||
}
|
|
||||||
catch (e) {
|
|
||||||
console.log("JSON parse error: %s", e.message);
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
|
|
||||||
var client = jsonrpc.client(url);
|
|
||||||
|
|
||||||
client.call(method, json,
|
|
||||||
function (result) {
|
|
||||||
console.log(JSON.stringify(result, undefined, 2));
|
|
||||||
},
|
|
||||||
function (error) {
|
|
||||||
console.log(JSON.stringify(error, undefined, 2));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim:sw=2:sts=2:ts=8:et
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
#!/usr/bin/node
|
|
||||||
//
|
|
||||||
// This is a tool to listen for JSON-RPC requests at an IP and port.
|
|
||||||
//
|
|
||||||
// This will report the request to console and echo back the request as the response.
|
|
||||||
//
|
|
||||||
|
|
||||||
var http = require("http");
|
|
||||||
|
|
||||||
var program = process.argv[1];
|
|
||||||
|
|
||||||
if (4 !== process.argv.length) {
|
|
||||||
console.log("Usage: %s <ip> <port>", program);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
var ip = process.argv[2];
|
|
||||||
var port = process.argv[3];
|
|
||||||
|
|
||||||
var server = http.createServer(function (req, res) {
|
|
||||||
console.log("CONNECT");
|
|
||||||
var input = "";
|
|
||||||
|
|
||||||
req.setEncoding();
|
|
||||||
|
|
||||||
req.on('data', function (buffer) {
|
|
||||||
// console.log("DATA: %s", buffer);
|
|
||||||
input = input + buffer;
|
|
||||||
});
|
|
||||||
|
|
||||||
req.on('end', function () {
|
|
||||||
// console.log("END");
|
|
||||||
|
|
||||||
var json_req;
|
|
||||||
|
|
||||||
console.log("URL: %s", req.url);
|
|
||||||
console.log("HEADERS: %s", JSON.stringify(req.headers, undefined, 2));
|
|
||||||
|
|
||||||
try {
|
|
||||||
json_req = JSON.parse(input);
|
|
||||||
|
|
||||||
console.log("REQ: %s", JSON.stringify(json_req, undefined, 2));
|
|
||||||
}
|
|
||||||
catch (e) {
|
|
||||||
console.log("BAD JSON: %s", e.message);
|
|
||||||
|
|
||||||
json_req = { error : e.message }
|
|
||||||
}
|
|
||||||
|
|
||||||
res.statusCode = 200;
|
|
||||||
res.end(JSON.stringify({
|
|
||||||
jsonrpc: "2.0",
|
|
||||||
result: { request : json_req },
|
|
||||||
id: req.id
|
|
||||||
}));
|
|
||||||
});
|
|
||||||
|
|
||||||
req.on('close', function () {
|
|
||||||
console.log("CLOSE");
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
server.listen(port, ip, undefined,
|
|
||||||
function () {
|
|
||||||
console.log("Listening at: %s:%s", ip, port);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim:sw=2:sts=2:ts=8:et
|
|
||||||
218
bin/physical.sh
218
bin/physical.sh
@@ -1,218 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
|
|
||||||
marker_base=985c80fbc6131f3a8cedd0da7e8af98dfceb13c7
|
|
||||||
marker_commit=${1:-${marker_base}}
|
|
||||||
|
|
||||||
if [ $(git merge-base ${marker_commit} ${marker_base}) != ${marker_base} ]; then
|
|
||||||
echo "first marker commit not an ancestor: ${marker_commit}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $(git merge-base ${marker_commit} HEAD) != $(git rev-parse --verify ${marker_commit}) ]; then
|
|
||||||
echo "given marker commit not an ancestor: ${marker_commit}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -e Builds/CMake ]; then
|
|
||||||
echo move CMake
|
|
||||||
git mv Builds/CMake cmake
|
|
||||||
git add --update .
|
|
||||||
git commit -m 'Move CMake directory' --author 'Pretty Printer <cpp@ripple.com>'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -e src/ripple ]; then
|
|
||||||
|
|
||||||
echo move protocol buffers
|
|
||||||
mkdir -p include/xrpl
|
|
||||||
if [ -e src/ripple/proto ]; then
|
|
||||||
git mv src/ripple/proto include/xrpl
|
|
||||||
fi
|
|
||||||
|
|
||||||
extract_list() {
|
|
||||||
git show ${marker_commit}:Builds/CMake/RippledCore.cmake | \
|
|
||||||
awk "/END ${1}/ { p = 0 } p && /src\/ripple/; /BEGIN ${1}/ { p = 1 }" | \
|
|
||||||
sed -e 's#src/ripple/##' -e 's#[^a-z]\+$##'
|
|
||||||
}
|
|
||||||
|
|
||||||
move_files() {
|
|
||||||
oldroot="$1"; shift
|
|
||||||
newroot="$1"; shift
|
|
||||||
detail="$1"; shift
|
|
||||||
files=("$@")
|
|
||||||
for file in ${files[@]}; do
|
|
||||||
if [ ! -e ${oldroot}/${file} ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
dir=$(dirname ${file})
|
|
||||||
if [ $(basename ${dir}) == 'details' ]; then
|
|
||||||
dir=$(dirname ${dir})
|
|
||||||
fi
|
|
||||||
if [ $(basename ${dir}) == 'impl' ]; then
|
|
||||||
dir="$(dirname ${dir})/${detail}"
|
|
||||||
fi
|
|
||||||
mkdir -p ${newroot}/${dir}
|
|
||||||
git mv ${oldroot}/${file} ${newroot}/${dir}
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
echo move libxrpl headers
|
|
||||||
files=$(extract_list 'LIBXRPL HEADERS')
|
|
||||||
files+=(
|
|
||||||
basics/SlabAllocator.h
|
|
||||||
|
|
||||||
beast/asio/io_latency_probe.h
|
|
||||||
beast/container/aged_container.h
|
|
||||||
beast/container/aged_container_utility.h
|
|
||||||
beast/container/aged_map.h
|
|
||||||
beast/container/aged_multimap.h
|
|
||||||
beast/container/aged_multiset.h
|
|
||||||
beast/container/aged_set.h
|
|
||||||
beast/container/aged_unordered_map.h
|
|
||||||
beast/container/aged_unordered_multimap.h
|
|
||||||
beast/container/aged_unordered_multiset.h
|
|
||||||
beast/container/aged_unordered_set.h
|
|
||||||
beast/container/detail/aged_associative_container.h
|
|
||||||
beast/container/detail/aged_container_iterator.h
|
|
||||||
beast/container/detail/aged_ordered_container.h
|
|
||||||
beast/container/detail/aged_unordered_container.h
|
|
||||||
beast/container/detail/empty_base_optimization.h
|
|
||||||
beast/core/LockFreeStack.h
|
|
||||||
beast/insight/Collector.h
|
|
||||||
beast/insight/Counter.h
|
|
||||||
beast/insight/CounterImpl.h
|
|
||||||
beast/insight/Event.h
|
|
||||||
beast/insight/EventImpl.h
|
|
||||||
beast/insight/Gauge.h
|
|
||||||
beast/insight/GaugeImpl.h
|
|
||||||
beast/insight/Group.h
|
|
||||||
beast/insight/Groups.h
|
|
||||||
beast/insight/Hook.h
|
|
||||||
beast/insight/HookImpl.h
|
|
||||||
beast/insight/Insight.h
|
|
||||||
beast/insight/Meter.h
|
|
||||||
beast/insight/MeterImpl.h
|
|
||||||
beast/insight/NullCollector.h
|
|
||||||
beast/insight/StatsDCollector.h
|
|
||||||
beast/test/fail_counter.h
|
|
||||||
beast/test/fail_stream.h
|
|
||||||
beast/test/pipe_stream.h
|
|
||||||
beast/test/sig_wait.h
|
|
||||||
beast/test/string_iostream.h
|
|
||||||
beast/test/string_istream.h
|
|
||||||
beast/test/string_ostream.h
|
|
||||||
beast/test/test_allocator.h
|
|
||||||
beast/test/yield_to.h
|
|
||||||
beast/utility/hash_pair.h
|
|
||||||
beast/utility/maybe_const.h
|
|
||||||
beast/utility/temp_dir.h
|
|
||||||
|
|
||||||
# included by only json/impl/json_assert.h
|
|
||||||
json/json_errors.h
|
|
||||||
|
|
||||||
protocol/PayChan.h
|
|
||||||
protocol/RippleLedgerHash.h
|
|
||||||
protocol/messages.h
|
|
||||||
protocol/st.h
|
|
||||||
)
|
|
||||||
files+=(
|
|
||||||
basics/README.md
|
|
||||||
crypto/README.md
|
|
||||||
json/README.md
|
|
||||||
protocol/README.md
|
|
||||||
resource/README.md
|
|
||||||
)
|
|
||||||
move_files src/ripple include/xrpl detail ${files[@]}
|
|
||||||
|
|
||||||
echo move libxrpl sources
|
|
||||||
files=$(extract_list 'LIBXRPL SOURCES')
|
|
||||||
move_files src/ripple src/libxrpl "" ${files[@]}
|
|
||||||
|
|
||||||
echo check leftovers
|
|
||||||
dirs=$(cd include/xrpl; ls -d */)
|
|
||||||
dirs=$(cd src/ripple; ls -d ${dirs} 2>/dev/null || true)
|
|
||||||
files="$(cd src/ripple; find ${dirs} -type f)"
|
|
||||||
if [ -n "${files}" ]; then
|
|
||||||
echo "leftover files:"
|
|
||||||
echo ${files}
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo remove empty directories
|
|
||||||
empty_dirs="$(cd src/ripple; find ${dirs} -depth -type d)"
|
|
||||||
for dir in ${empty_dirs[@]}; do
|
|
||||||
if [ -e ${dir} ]; then
|
|
||||||
rmdir ${dir}
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo move xrpld sources
|
|
||||||
files=$(
|
|
||||||
extract_list 'XRPLD SOURCES'
|
|
||||||
cd src/ripple
|
|
||||||
find * -regex '.*\.\(h\|ipp\|md\|pu\|uml\|png\)'
|
|
||||||
)
|
|
||||||
move_files src/ripple src/xrpld detail ${files[@]}
|
|
||||||
|
|
||||||
files="$(cd src/ripple; find . -type f)"
|
|
||||||
if [ -n "${files}" ]; then
|
|
||||||
echo "leftover files:"
|
|
||||||
echo ${files}
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf src/ripple
|
|
||||||
|
|
||||||
echo rename .hpp to .h
|
|
||||||
find include src -name '*.hpp' -exec bash -c 'f="{}"; git mv "${f}" "${f%hpp}h"' \;
|
|
||||||
|
|
||||||
echo move PerfLog.h
|
|
||||||
if [ -e include/xrpl/basics/PerfLog.h ]; then
|
|
||||||
git mv include/xrpl/basics/PerfLog.h src/xrpld/perflog
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Make sure all protobuf includes have the correct prefix.
|
|
||||||
protobuf_replace='s:^#include\s*["<].*org/xrpl\([^">]\+\)[">]:#include <xrpl/proto/org/xrpl\1>:'
|
|
||||||
# Make sure first-party includes use angle brackets and .h extension.
|
|
||||||
ripple_replace='s:include\s*["<]ripple/\(.*\)\.h\(pp\)\?[">]:include <ripple/\1.h>:'
|
|
||||||
beast_replace='s:include\s*<beast/:include <xrpl/beast/:'
|
|
||||||
# Rename impl directories to detail.
|
|
||||||
impl_rename='s:\(<xrpl.*\)/impl\(/details\)\?/:\1/detail/:'
|
|
||||||
|
|
||||||
echo rewrite includes in libxrpl
|
|
||||||
find include/xrpl src/libxrpl -type f -exec sed -i \
|
|
||||||
-e "${protobuf_replace}" \
|
|
||||||
-e "${ripple_replace}" \
|
|
||||||
-e "${beast_replace}" \
|
|
||||||
-e 's:^#include <ripple/:#include <xrpl/:' \
|
|
||||||
-e "${impl_rename}" \
|
|
||||||
{} +
|
|
||||||
|
|
||||||
echo rewrite includes in xrpld
|
|
||||||
# # https://www.baeldung.com/linux/join-multiple-lines
|
|
||||||
libxrpl_dirs="$(cd include/xrpl; ls -d1 */ | sed 's:/$::')"
|
|
||||||
# libxrpl_dirs='a\nb\nc\n'
|
|
||||||
readarray -t libxrpl_dirs <<< "${libxrpl_dirs}"
|
|
||||||
# libxrpl_dirs=(a b c)
|
|
||||||
libxrpl_dirs=$(printf -v txt '%s\\|' "${libxrpl_dirs[@]}"; echo "${txt%\\|}")
|
|
||||||
# libxrpl_dirs='a\|b\|c'
|
|
||||||
find src/xrpld src/test -type f -exec sed -i \
|
|
||||||
-e "${protobuf_replace}" \
|
|
||||||
-e "${ripple_replace}" \
|
|
||||||
-e "${beast_replace}" \
|
|
||||||
-e "s:^#include <ripple/basics/PerfLog.h>:#include <xrpld/perflog/PerfLog.h>:" \
|
|
||||||
-e "s:^#include <ripple/\(${libxrpl_dirs}\)/:#include <xrpl/\1/:" \
|
|
||||||
-e 's:^#include <ripple/:#include <xrpld/:' \
|
|
||||||
-e "${impl_rename}" \
|
|
||||||
{} +
|
|
||||||
|
|
||||||
git commit -m 'Rearrange sources' --author 'Pretty Printer <cpp@ripple.com>'
|
|
||||||
find include src -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format-10 -i {} +
|
|
||||||
git add --update .
|
|
||||||
git commit -m 'Rewrite includes' --author 'Pretty Printer <cpp@ripple.com>'
|
|
||||||
./Builds/levelization/levelization.sh
|
|
||||||
git add --update .
|
|
||||||
git commit -m 'Recompute loops' --author 'Pretty Printer <cpp@ripple.com>'
|
|
||||||
252
bin/rlint.js
252
bin/rlint.js
@@ -1,252 +0,0 @@
|
|||||||
#!/usr/bin/node
|
|
||||||
|
|
||||||
var async = require('async');
|
|
||||||
var Remote = require('ripple-lib').Remote;
|
|
||||||
var Transaction = require('ripple-lib').Transaction;
|
|
||||||
var UInt160 = require('ripple-lib').UInt160;
|
|
||||||
var Amount = require('ripple-lib').Amount;
|
|
||||||
|
|
||||||
var book_key = function (book) {
|
|
||||||
return book.taker_pays.currency
|
|
||||||
+ ":" + book.taker_pays.issuer
|
|
||||||
+ ":" + book.taker_gets.currency
|
|
||||||
+ ":" + book.taker_gets.issuer;
|
|
||||||
};
|
|
||||||
|
|
||||||
var book_key_cross = function (book) {
|
|
||||||
return book.taker_gets.currency
|
|
||||||
+ ":" + book.taker_gets.issuer
|
|
||||||
+ ":" + book.taker_pays.currency
|
|
||||||
+ ":" + book.taker_pays.issuer;
|
|
||||||
};
|
|
||||||
|
|
||||||
var ledger_verify = function (ledger) {
|
|
||||||
var dir_nodes = ledger.accountState.filter(function (entry) {
|
|
||||||
return entry.LedgerEntryType === 'DirectoryNode' // Only directories
|
|
||||||
&& entry.index === entry.RootIndex // Only root nodes
|
|
||||||
&& 'TakerGetsCurrency' in entry; // Only offer directories
|
|
||||||
});
|
|
||||||
|
|
||||||
var books = {};
|
|
||||||
|
|
||||||
dir_nodes.forEach(function (node) {
|
|
||||||
var book = {
|
|
||||||
taker_gets: {
|
|
||||||
currency: UInt160.from_generic(node.TakerGetsCurrency).to_json(),
|
|
||||||
issuer: UInt160.from_generic(node.TakerGetsIssuer).to_json()
|
|
||||||
},
|
|
||||||
taker_pays: {
|
|
||||||
currency: UInt160.from_generic(node.TakerPaysCurrency).to_json(),
|
|
||||||
issuer: UInt160.from_generic(node.TakerPaysIssuer).to_json()
|
|
||||||
},
|
|
||||||
quality: Amount.from_quality(node.RootIndex),
|
|
||||||
index: node.RootIndex
|
|
||||||
};
|
|
||||||
|
|
||||||
books[book_key(book)] = book;
|
|
||||||
|
|
||||||
// console.log(JSON.stringify(node, undefined, 2));
|
|
||||||
});
|
|
||||||
|
|
||||||
// console.log(JSON.stringify(dir_entry, undefined, 2));
|
|
||||||
console.log("#%s books: %s", ledger.ledger_index, Object.keys(books).length);
|
|
||||||
|
|
||||||
Object.keys(books).forEach(function (key) {
|
|
||||||
var book = books[key];
|
|
||||||
var key_cross = book_key_cross(book);
|
|
||||||
var book_cross = books[key_cross];
|
|
||||||
|
|
||||||
if (book && book_cross && !book_cross.done)
|
|
||||||
{
|
|
||||||
var book_cross_quality_inverted = Amount.from_json("1.0/1/1").divide(book_cross.quality);
|
|
||||||
|
|
||||||
if (book_cross_quality_inverted.compareTo(book.quality) >= 0)
|
|
||||||
{
|
|
||||||
// Crossing books
|
|
||||||
console.log("crossing: #%s :: %s :: %s :: %s :: %s :: %s :: %s", ledger.ledger_index, key, book.quality.to_text(), book_cross.quality.to_text(), book_cross_quality_inverted.to_text(),
|
|
||||||
book.index, book_cross.index);
|
|
||||||
}
|
|
||||||
|
|
||||||
book_cross.done = true;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
var ripple_selfs = {};
|
|
||||||
|
|
||||||
var accounts = {};
|
|
||||||
var counts = {};
|
|
||||||
|
|
||||||
ledger.accountState.forEach(function (entry) {
|
|
||||||
if (entry.LedgerEntryType === 'Offer')
|
|
||||||
{
|
|
||||||
counts[entry.Account] = (counts[entry.Account] || 0) + 1;
|
|
||||||
}
|
|
||||||
else if (entry.LedgerEntryType === 'RippleState')
|
|
||||||
{
|
|
||||||
if (entry.Flags & (0x10000 | 0x40000))
|
|
||||||
{
|
|
||||||
counts[entry.LowLimit.issuer] = (counts[entry.LowLimit.issuer] || 0) + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.Flags & (0x20000 | 0x80000))
|
|
||||||
{
|
|
||||||
counts[entry.HighLimit.issuer] = (counts[entry.HighLimit.issuer] || 0) + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.HighLimit.issuer === entry.LowLimit.issuer)
|
|
||||||
ripple_selfs[entry.Account] = entry;
|
|
||||||
}
|
|
||||||
else if (entry.LedgerEntryType == 'AccountRoot')
|
|
||||||
{
|
|
||||||
accounts[entry.Account] = entry;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
var low = 0; // Accounts with too low a count.
|
|
||||||
var high = 0;
|
|
||||||
var missing_accounts = 0; // Objects with no referencing account.
|
|
||||||
var missing_objects = 0; // Accounts specifying an object but having none.
|
|
||||||
|
|
||||||
Object.keys(counts).forEach(function (account) {
|
|
||||||
if (account in accounts)
|
|
||||||
{
|
|
||||||
if (counts[account] !== accounts[account].OwnerCount)
|
|
||||||
{
|
|
||||||
if (counts[account] < accounts[account].OwnerCount)
|
|
||||||
{
|
|
||||||
high += 1;
|
|
||||||
console.log("%s: high count %s/%s", account, counts[account], accounts[account].OwnerCount);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
low += 1;
|
|
||||||
console.log("%s: low count %s/%s", account, counts[account], accounts[account].OwnerCount);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
missing_accounts += 1;
|
|
||||||
|
|
||||||
console.log("%s: missing : count %s", account, counts[account]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Object.keys(accounts).forEach(function (account) {
|
|
||||||
if (!('OwnerCount' in accounts[account]))
|
|
||||||
{
|
|
||||||
console.log("%s: bad entry : %s", account, JSON.stringify(accounts[account], undefined, 2));
|
|
||||||
}
|
|
||||||
else if (!(account in counts) && accounts[account].OwnerCount)
|
|
||||||
{
|
|
||||||
missing_objects += 1;
|
|
||||||
|
|
||||||
console.log("%s: no objects : %s/%s", account, 0, accounts[account].OwnerCount);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (low)
|
|
||||||
console.log("counts too low = %s", low);
|
|
||||||
|
|
||||||
if (high)
|
|
||||||
console.log("counts too high = %s", high);
|
|
||||||
|
|
||||||
if (missing_objects)
|
|
||||||
console.log("missing_objects = %s", missing_objects);
|
|
||||||
|
|
||||||
if (missing_accounts)
|
|
||||||
console.log("missing_accounts = %s", missing_accounts);
|
|
||||||
|
|
||||||
if (Object.keys(ripple_selfs).length)
|
|
||||||
console.log("RippleState selfs = %s", Object.keys(ripple_selfs).length);
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
var ledger_request = function (remote, ledger_index, done) {
|
|
||||||
remote.request_ledger(undefined, {
|
|
||||||
accounts: true,
|
|
||||||
expand: true,
|
|
||||||
})
|
|
||||||
.ledger_index(ledger_index)
|
|
||||||
.on('success', function (m) {
|
|
||||||
// console.log("ledger: ", ledger_index);
|
|
||||||
// console.log("ledger: ", JSON.stringify(m, undefined, 2));
|
|
||||||
done(m.ledger);
|
|
||||||
})
|
|
||||||
.on('error', function (m) {
|
|
||||||
console.log("error");
|
|
||||||
done();
|
|
||||||
})
|
|
||||||
.request();
|
|
||||||
};
|
|
||||||
|
|
||||||
var usage = function () {
|
|
||||||
console.log("rlint.js _websocket_ip_ _websocket_port_ ");
|
|
||||||
};
|
|
||||||
|
|
||||||
var finish = function (remote) {
|
|
||||||
remote.disconnect();
|
|
||||||
|
|
||||||
// XXX Because remote.disconnect() doesn't work:
|
|
||||||
process.exit();
|
|
||||||
};
|
|
||||||
|
|
||||||
console.log("args: ", process.argv.length);
|
|
||||||
console.log("args: ", process.argv);
|
|
||||||
|
|
||||||
if (process.argv.length < 4) {
|
|
||||||
usage();
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
var remote = Remote.from_config({
|
|
||||||
websocket_ip: process.argv[2],
|
|
||||||
websocket_port: process.argv[3],
|
|
||||||
})
|
|
||||||
.once('ledger_closed', function (m) {
|
|
||||||
console.log("ledger_closed: ", JSON.stringify(m, undefined, 2));
|
|
||||||
|
|
||||||
if (process.argv.length === 5) {
|
|
||||||
var ledger_index = process.argv[4];
|
|
||||||
|
|
||||||
ledger_request(remote, ledger_index, function (l) {
|
|
||||||
if (l) {
|
|
||||||
ledger_verify(l);
|
|
||||||
}
|
|
||||||
|
|
||||||
finish(remote);
|
|
||||||
});
|
|
||||||
|
|
||||||
} else if (process.argv.length === 6) {
|
|
||||||
var ledger_start = Number(process.argv[4]);
|
|
||||||
var ledger_end = Number(process.argv[5]);
|
|
||||||
var ledger_cursor = ledger_end;
|
|
||||||
|
|
||||||
async.whilst(
|
|
||||||
function () {
|
|
||||||
return ledger_start <= ledger_cursor && ledger_cursor <=ledger_end;
|
|
||||||
},
|
|
||||||
function (callback) {
|
|
||||||
// console.log(ledger_cursor);
|
|
||||||
|
|
||||||
ledger_request(remote, ledger_cursor, function (l) {
|
|
||||||
if (l) {
|
|
||||||
ledger_verify(l);
|
|
||||||
}
|
|
||||||
|
|
||||||
--ledger_cursor;
|
|
||||||
|
|
||||||
callback();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
function (error) {
|
|
||||||
finish(remote);
|
|
||||||
});
|
|
||||||
|
|
||||||
} else {
|
|
||||||
finish(remote);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.connect();
|
|
||||||
}
|
|
||||||
|
|
||||||
// vim:sw=2:sts=2:ts=8:et
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -exu
|
|
||||||
|
|
||||||
: ${TRAVIS_BUILD_DIR:=""}
|
|
||||||
: ${VCPKG_DIR:=".vcpkg"}
|
|
||||||
export VCPKG_ROOT=${VCPKG_DIR}
|
|
||||||
: ${VCPKG_DEFAULT_TRIPLET:="x64-windows-static"}
|
|
||||||
|
|
||||||
export VCPKG_DEFAULT_TRIPLET
|
|
||||||
|
|
||||||
EXE="vcpkg"
|
|
||||||
if [[ -z ${COMSPEC:-} ]]; then
|
|
||||||
EXE="${EXE}.exe"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -d "${VCPKG_DIR}" && -x "${VCPKG_DIR}/${EXE}" && -d "${VCPKG_DIR}/installed" ]] ; then
|
|
||||||
echo "Using cached vcpkg at ${VCPKG_DIR}"
|
|
||||||
${VCPKG_DIR}/${EXE} list
|
|
||||||
else
|
|
||||||
if [[ -d "${VCPKG_DIR}" ]] ; then
|
|
||||||
rm -rf "${VCPKG_DIR}"
|
|
||||||
fi
|
|
||||||
git clone --branch 2021.04.30 https://github.com/Microsoft/vcpkg.git ${VCPKG_DIR}
|
|
||||||
pushd ${VCPKG_DIR}
|
|
||||||
BSARGS=()
|
|
||||||
if [[ "$(uname)" == "Darwin" ]] ; then
|
|
||||||
BSARGS+=(--allowAppleClang)
|
|
||||||
fi
|
|
||||||
if [[ -z ${COMSPEC:-} ]]; then
|
|
||||||
chmod +x ./bootstrap-vcpkg.sh
|
|
||||||
time ./bootstrap-vcpkg.sh "${BSARGS[@]}"
|
|
||||||
else
|
|
||||||
time ./bootstrap-vcpkg.bat
|
|
||||||
fi
|
|
||||||
popd
|
|
||||||
fi
|
|
||||||
|
|
||||||
# TODO: bring boost in this way as well ?
|
|
||||||
# NOTE: can pin specific ports to a commit/version like this:
|
|
||||||
# git checkout <SOME COMMIT HASH> ports/boost
|
|
||||||
if [ $# -eq 0 ]; then
|
|
||||||
echo "No extra packages specified..."
|
|
||||||
PKGS=()
|
|
||||||
else
|
|
||||||
PKGS=( "$@" )
|
|
||||||
fi
|
|
||||||
for LIB in "${PKGS[@]}"; do
|
|
||||||
time ${VCPKG_DIR}/${EXE} --clean-after-build install ${LIB}
|
|
||||||
done
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
|
|
||||||
# NOTE: must be sourced from a shell so it can export vars
|
|
||||||
|
|
||||||
cat << BATCH > ./getenv.bat
|
|
||||||
CALL %*
|
|
||||||
ENV
|
|
||||||
BATCH
|
|
||||||
|
|
||||||
while read line ; do
|
|
||||||
IFS='"' read x path arg <<<"${line}"
|
|
||||||
if [ -f "${path}" ] ; then
|
|
||||||
echo "FOUND: $path"
|
|
||||||
export VCINSTALLDIR=$(./getenv.bat "${path}" ${arg} | grep "^VCINSTALLDIR=" | sed -E "s/^VCINSTALLDIR=//g")
|
|
||||||
if [ "${VCINSTALLDIR}" != "" ] ; then
|
|
||||||
echo "USING ${VCINSTALLDIR}"
|
|
||||||
export LIB=$(./getenv.bat "${path}" ${arg} | grep "^LIB=" | sed -E "s/^LIB=//g")
|
|
||||||
export LIBPATH=$(./getenv.bat "${path}" ${arg} | grep "^LIBPATH=" | sed -E "s/^LIBPATH=//g")
|
|
||||||
export INCLUDE=$(./getenv.bat "${path}" ${arg} | grep "^INCLUDE=" | sed -E "s/^INCLUDE=//g")
|
|
||||||
ADDPATH=$(./getenv.bat "${path}" ${arg} | grep "^PATH=" | sed -E "s/^PATH=//g")
|
|
||||||
export PATH="${ADDPATH}:${PATH}"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done <<EOL
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Auxiliary/Build/vcvarsall.bat" x86_amd64
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Auxiliary/Build/vcvarsall.bat" x86_amd64
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvarsall.bat" x86_amd64
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Auxiliary/Build/vcvarsall.bat" x86_amd64
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio 15.0/VC/vcvarsall.bat" amd64
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/vcvarsall.bat" amd64
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio 13.0/VC/vcvarsall.bat" amd64
|
|
||||||
"C:/Program Files (x86)/Microsoft Visual Studio 12.0/VC/vcvarsall.bat" amd64
|
|
||||||
EOL
|
|
||||||
# TODO: update the list above as needed to support newer versions of msvc tools
|
|
||||||
|
|
||||||
rm -f getenv.bat
|
|
||||||
|
|
||||||
if [ "${VCINSTALLDIR}" = "" ] ; then
|
|
||||||
echo "No compatible visual studio found!"
|
|
||||||
fi
|
|
||||||
@@ -1,246 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
"""A script to test rippled in an infinite loop of start-sync-stop.
|
|
||||||
|
|
||||||
- Requires Python 3.7+.
|
|
||||||
- Can be stopped with SIGINT.
|
|
||||||
- Has no dependencies outside the standard library.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
assert sys.version_info.major == 3 and sys.version_info.minor >= 7
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import asyncio
|
|
||||||
import configparser
|
|
||||||
import contextlib
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
import platform
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import urllib.error
|
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
# Enable asynchronous subprocesses on Windows. The default changed in 3.8.
|
|
||||||
# https://docs.python.org/3.7/library/asyncio-platforms.html#subprocess-support-on-windows
|
|
||||||
if (platform.system() == 'Windows' and sys.version_info.major == 3
|
|
||||||
and sys.version_info.minor < 8):
|
|
||||||
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
|
||||||
|
|
||||||
DEFAULT_EXE = 'rippled'
|
|
||||||
DEFAULT_CONFIGURATION_FILE = 'rippled.cfg'
|
|
||||||
# Number of seconds to wait before forcefully terminating.
|
|
||||||
PATIENCE = 120
|
|
||||||
# Number of contiguous seconds in a sync state to be considered synced.
|
|
||||||
DEFAULT_SYNC_DURATION = 60
|
|
||||||
# Number of seconds between polls of state.
|
|
||||||
DEFAULT_POLL_INTERVAL = 5
|
|
||||||
SYNC_STATES = ('full', 'validating', 'proposing')
|
|
||||||
|
|
||||||
|
|
||||||
def read_config(config_file):
|
|
||||||
# strict = False: Allow duplicate keys, e.g. [rpc_startup].
|
|
||||||
# allow_no_value = True: Allow keys with no values. Generally, these
|
|
||||||
# instances use the "key" as the value, and the section name is the key,
|
|
||||||
# e.g. [debug_logfile].
|
|
||||||
# delimiters = ('='): Allow ':' as a character in Windows paths. Some of
|
|
||||||
# our "keys" are actually values, and we don't want to split them on ':'.
|
|
||||||
config = configparser.ConfigParser(
|
|
||||||
strict=False,
|
|
||||||
allow_no_value=True,
|
|
||||||
delimiters=('='),
|
|
||||||
)
|
|
||||||
config.read(config_file)
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def to_list(value, separator=','):
|
|
||||||
"""Parse a list from a delimited string value."""
|
|
||||||
return [s.strip() for s in value.split(separator) if s]
|
|
||||||
|
|
||||||
|
|
||||||
def find_log_file(config_file):
|
|
||||||
"""Try to figure out what log file the user has chosen. Raises all kinds
|
|
||||||
of exceptions if there is any possibility of ambiguity."""
|
|
||||||
config = read_config(config_file)
|
|
||||||
values = list(config['debug_logfile'].keys())
|
|
||||||
if len(values) < 1:
|
|
||||||
raise ValueError(
|
|
||||||
f'no [debug_logfile] in configuration file: {config_file}')
|
|
||||||
if len(values) > 1:
|
|
||||||
raise ValueError(
|
|
||||||
f'too many [debug_logfile] in configuration file: {config_file}')
|
|
||||||
return values[0]
|
|
||||||
|
|
||||||
|
|
||||||
def find_http_port(config_file):
|
|
||||||
config = read_config(config_file)
|
|
||||||
names = list(config['server'].keys())
|
|
||||||
for name in names:
|
|
||||||
server = config[name]
|
|
||||||
if 'http' in to_list(server.get('protocol', '')):
|
|
||||||
return int(server['port'])
|
|
||||||
raise ValueError(f'no server in [server] for "http" protocol')
|
|
||||||
|
|
||||||
|
|
||||||
@contextlib.asynccontextmanager
|
|
||||||
async def rippled(exe=DEFAULT_EXE, config_file=DEFAULT_CONFIGURATION_FILE):
|
|
||||||
"""A context manager for a rippled process."""
|
|
||||||
# Start the server.
|
|
||||||
process = await asyncio.create_subprocess_exec(
|
|
||||||
str(exe),
|
|
||||||
'--conf',
|
|
||||||
str(config_file),
|
|
||||||
stdout=subprocess.DEVNULL,
|
|
||||||
stderr=subprocess.DEVNULL,
|
|
||||||
)
|
|
||||||
logging.info(f'rippled started with pid {process.pid}')
|
|
||||||
try:
|
|
||||||
yield process
|
|
||||||
finally:
|
|
||||||
# Ask it to stop.
|
|
||||||
logging.info(f'asking rippled (pid: {process.pid}) to stop')
|
|
||||||
start = time.time()
|
|
||||||
process.terminate()
|
|
||||||
|
|
||||||
# Wait nicely.
|
|
||||||
try:
|
|
||||||
await asyncio.wait_for(process.wait(), PATIENCE)
|
|
||||||
except asyncio.TimeoutError:
|
|
||||||
# Ask the operating system to kill it.
|
|
||||||
logging.warning(f'killing rippled ({process.pid})')
|
|
||||||
try:
|
|
||||||
process.kill()
|
|
||||||
except ProcessLookupError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
code = await process.wait()
|
|
||||||
end = time.time()
|
|
||||||
logging.info(
|
|
||||||
f'rippled stopped after {end - start:.1f} seconds with code {code}'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def sync(
|
|
||||||
port,
|
|
||||||
*,
|
|
||||||
duration=DEFAULT_SYNC_DURATION,
|
|
||||||
interval=DEFAULT_POLL_INTERVAL,
|
|
||||||
):
|
|
||||||
"""Poll rippled on an interval until it has been synced for a duration."""
|
|
||||||
start = time.perf_counter()
|
|
||||||
while (time.perf_counter() - start) < duration:
|
|
||||||
await asyncio.sleep(interval)
|
|
||||||
|
|
||||||
request = urllib.request.Request(
|
|
||||||
f'http://127.0.0.1:{port}',
|
|
||||||
data=json.dumps({
|
|
||||||
'method': 'server_state'
|
|
||||||
}).encode(),
|
|
||||||
headers={'Content-Type': 'application/json'},
|
|
||||||
)
|
|
||||||
with urllib.request.urlopen(request) as response:
|
|
||||||
try:
|
|
||||||
body = json.loads(response.read())
|
|
||||||
except urllib.error.HTTPError as cause:
|
|
||||||
logging.warning(f'server_state returned not JSON: {cause}')
|
|
||||||
start = time.perf_counter()
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
state = body['result']['state']['server_state']
|
|
||||||
except KeyError as cause:
|
|
||||||
logging.warning(f'server_state response missing key: {cause.key}')
|
|
||||||
start = time.perf_counter()
|
|
||||||
continue
|
|
||||||
logging.info(f'server_state: {state}')
|
|
||||||
if state not in SYNC_STATES:
|
|
||||||
# Require a contiguous sync state.
|
|
||||||
start = time.perf_counter()
|
|
||||||
|
|
||||||
|
|
||||||
async def loop(test,
|
|
||||||
*,
|
|
||||||
exe=DEFAULT_EXE,
|
|
||||||
config_file=DEFAULT_CONFIGURATION_FILE):
|
|
||||||
"""
|
|
||||||
Start-test-stop rippled in an infinite loop.
|
|
||||||
|
|
||||||
Moves log to a different file after each iteration.
|
|
||||||
"""
|
|
||||||
log_file = find_log_file(config_file)
|
|
||||||
id = 0
|
|
||||||
while True:
|
|
||||||
logging.info(f'iteration: {id}')
|
|
||||||
async with rippled(exe, config_file) as process:
|
|
||||||
start = time.perf_counter()
|
|
||||||
exited = asyncio.create_task(process.wait())
|
|
||||||
tested = asyncio.create_task(test())
|
|
||||||
# Try to sync as long as the process is running.
|
|
||||||
done, pending = await asyncio.wait(
|
|
||||||
{exited, tested},
|
|
||||||
return_when=asyncio.FIRST_COMPLETED,
|
|
||||||
)
|
|
||||||
if done == {exited}:
|
|
||||||
code = exited.result()
|
|
||||||
logging.warning(
|
|
||||||
f'server halted for unknown reason with code {code}')
|
|
||||||
else:
|
|
||||||
assert done == {tested}
|
|
||||||
assert tested.exception() is None
|
|
||||||
end = time.perf_counter()
|
|
||||||
logging.info(f'synced after {end - start:.0f} seconds')
|
|
||||||
os.replace(log_file, f'debug.{id}.log')
|
|
||||||
id += 1
|
|
||||||
|
|
||||||
|
|
||||||
logging.basicConfig(
|
|
||||||
format='%(asctime)s %(levelname)-8s %(message)s',
|
|
||||||
level=logging.INFO,
|
|
||||||
datefmt='%Y-%m-%d %H:%M:%S',
|
|
||||||
)
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
||||||
parser.add_argument(
|
|
||||||
'rippled',
|
|
||||||
type=Path,
|
|
||||||
nargs='?',
|
|
||||||
default=DEFAULT_EXE,
|
|
||||||
help='Path to rippled.',
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
'--conf',
|
|
||||||
type=Path,
|
|
||||||
default=DEFAULT_CONFIGURATION_FILE,
|
|
||||||
help='Path to configuration file.',
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
'--duration',
|
|
||||||
type=int,
|
|
||||||
default=DEFAULT_SYNC_DURATION,
|
|
||||||
help='Number of contiguous seconds required in a synchronized state.',
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
'--interval',
|
|
||||||
type=int,
|
|
||||||
default=DEFAULT_POLL_INTERVAL,
|
|
||||||
help='Number of seconds to wait between polls of state.',
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
port = find_http_port(args.conf)
|
|
||||||
|
|
||||||
|
|
||||||
def test():
|
|
||||||
return sync(port, duration=args.duration, interval=args.interval)
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
asyncio.run(loop(test, exe=args.rippled, config_file=args.conf))
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
# Squelch the message. This is a normal mode of exit.
|
|
||||||
pass
|
|
||||||
133
bin/stop-test.js
133
bin/stop-test.js
@@ -1,133 +0,0 @@
|
|||||||
/* -------------------------------- REQUIRES -------------------------------- */
|
|
||||||
|
|
||||||
var child = require("child_process");
|
|
||||||
var assert = require("assert");
|
|
||||||
|
|
||||||
/* --------------------------------- CONFIG --------------------------------- */
|
|
||||||
|
|
||||||
if (process.argv[2] == null) {
|
|
||||||
[
|
|
||||||
'Usage: ',
|
|
||||||
'',
|
|
||||||
' `node bin/stop-test.js i,j [rippled_path] [rippled_conf]`',
|
|
||||||
'',
|
|
||||||
' Launch rippled and stop it after n seconds for all n in [i, j}',
|
|
||||||
' For all even values of n launch rippled with `--fg`',
|
|
||||||
' For values of n where n % 3 == 0 launch rippled with `--fg`\n',
|
|
||||||
'Examples: ',
|
|
||||||
'',
|
|
||||||
' $ node bin/stop-test.js 5,10',
|
|
||||||
(' $ node bin/stop-test.js 1,4 ' +
|
|
||||||
'build/clang.debug/rippled $HOME/.confs/rippled.cfg')
|
|
||||||
]
|
|
||||||
.forEach(function(l){console.log(l)});
|
|
||||||
|
|
||||||
process.exit();
|
|
||||||
} else {
|
|
||||||
var testRange = process.argv[2].split(',').map(Number);
|
|
||||||
var rippledPath = process.argv[3] || 'build/rippled'
|
|
||||||
var rippledConf = process.argv[4] || 'rippled.cfg'
|
|
||||||
}
|
|
||||||
|
|
||||||
var options = {
|
|
||||||
env: process.env,
|
|
||||||
stdio: 'ignore' // we could dump the child io when it fails abnormally
|
|
||||||
};
|
|
||||||
|
|
||||||
// default args
|
|
||||||
var conf_args = ['--conf='+rippledConf];
|
|
||||||
var start_args = conf_args.concat([/*'--net'*/])
|
|
||||||
var stop_args = conf_args.concat(['stop']);
|
|
||||||
|
|
||||||
/* --------------------------------- HELPERS -------------------------------- */
|
|
||||||
|
|
||||||
function start(args) {
|
|
||||||
return child.spawn(rippledPath, args, options);
|
|
||||||
}
|
|
||||||
function stop(rippled) { child.execFile(rippledPath, stop_args, options)}
|
|
||||||
function secs_l8r(ms, f) {setTimeout(f, ms * 1000); }
|
|
||||||
|
|
||||||
function show_results_and_exit(results) {
|
|
||||||
console.log(JSON.stringify(results, undefined, 2));
|
|
||||||
process.exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
var timeTakes = function (range) {
|
|
||||||
function sumRange(n) {return (n+1) * n /2}
|
|
||||||
var ret = sumRange(range[1]);
|
|
||||||
if (range[0] > 1) {
|
|
||||||
ret = ret - sumRange(range[0] - 1)
|
|
||||||
}
|
|
||||||
var stopping = (range[1] - range[0]) * 0.5;
|
|
||||||
return ret + stopping;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ---------------------------------- TEST ---------------------------------- */
|
|
||||||
|
|
||||||
console.log("Test will take ~%s seconds", timeTakes(testRange));
|
|
||||||
|
|
||||||
(function oneTest(n /* seconds */, results) {
|
|
||||||
if (n >= testRange[1]) {
|
|
||||||
// show_results_and_exit(results);
|
|
||||||
console.log(JSON.stringify(results, undefined, 2));
|
|
||||||
oneTest(testRange[0], []);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
var args = start_args;
|
|
||||||
if (n % 2 == 0) {args = args.concat(['--fg'])}
|
|
||||||
if (n % 3 == 0) {args = args.concat(['--net'])}
|
|
||||||
|
|
||||||
var result = {args: args, alive_for: n};
|
|
||||||
results.push(result);
|
|
||||||
|
|
||||||
console.log("\nLaunching `%s` with `%s` for %d seconds",
|
|
||||||
rippledPath, JSON.stringify(args), n);
|
|
||||||
|
|
||||||
rippled = start(args);
|
|
||||||
console.log("Rippled pid: %d", rippled.pid);
|
|
||||||
|
|
||||||
// defaults
|
|
||||||
var b4StopSent = false;
|
|
||||||
var stopSent = false;
|
|
||||||
var stop_took = null;
|
|
||||||
|
|
||||||
rippled.once('exit', function(){
|
|
||||||
if (!stopSent && !b4StopSent) {
|
|
||||||
console.warn('\nRippled exited itself b4 stop issued');
|
|
||||||
process.exit();
|
|
||||||
};
|
|
||||||
|
|
||||||
// The io handles close AFTER exit, may have implications for
|
|
||||||
// `stdio:'inherit'` option to `child.spawn`.
|
|
||||||
rippled.once('close', function() {
|
|
||||||
result.stop_took = (+new Date() - stop_took) / 1000; // seconds
|
|
||||||
console.log("Stopping after %d seconds took %s seconds",
|
|
||||||
n, result.stop_took);
|
|
||||||
oneTest(n+1, results);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
secs_l8r(n, function(){
|
|
||||||
console.log("Stopping rippled after %d seconds", n);
|
|
||||||
|
|
||||||
// possible race here ?
|
|
||||||
// seems highly unlikely, but I was having issues at one point
|
|
||||||
b4StopSent=true;
|
|
||||||
stop_took = (+new Date());
|
|
||||||
// when does `exit` actually get sent?
|
|
||||||
stop();
|
|
||||||
stopSent=true;
|
|
||||||
|
|
||||||
// Sometimes we want to attach with a debugger.
|
|
||||||
if (process.env.ABORT_TESTS_ON_STALL != null) {
|
|
||||||
// We wait 30 seconds, and if it hasn't stopped, we abort the process
|
|
||||||
secs_l8r(30, function() {
|
|
||||||
if (result.stop_took == null) {
|
|
||||||
console.log("rippled has stalled");
|
|
||||||
process.exit();
|
|
||||||
};
|
|
||||||
});
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}(testRange[0], []));
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
/**
|
|
||||||
* bin/update_bintypes.js
|
|
||||||
*
|
|
||||||
* This unholy abomination of a script generates the JavaScript file
|
|
||||||
* src/js/bintypes.js from various parts of the C++ source code.
|
|
||||||
*
|
|
||||||
* This should *NOT* be part of any automatic build process unless the C++
|
|
||||||
* source data are brought into a more easily parseable format. Until then,
|
|
||||||
* simply run this script manually and fix as needed.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// XXX: Process LedgerFormats.(h|cpp) as well.
|
|
||||||
|
|
||||||
var filenameProto = __dirname + '/../src/cpp/ripple/SerializeProto.h',
|
|
||||||
filenameTxFormatsH = __dirname + '/../src/cpp/ripple/TransactionFormats.h',
|
|
||||||
filenameTxFormats = __dirname + '/../src/cpp/ripple/TransactionFormats.cpp';
|
|
||||||
|
|
||||||
var fs = require('fs');
|
|
||||||
|
|
||||||
var output = [];
|
|
||||||
|
|
||||||
// Stage 1: Get the field types and codes from SerializeProto.h
|
|
||||||
var types = {},
|
|
||||||
fields = {};
|
|
||||||
String(fs.readFileSync(filenameProto)).split('\n').forEach(function (line) {
|
|
||||||
line = line.replace(/^\s+|\s+$/g, '').replace(/\s+/g, '');
|
|
||||||
if (!line.length || line.slice(0, 2) === '//' || line.slice(-1) !== ')') return;
|
|
||||||
|
|
||||||
var tmp = line.slice(0, -1).split('('),
|
|
||||||
type = tmp[0],
|
|
||||||
opts = tmp[1].split(',');
|
|
||||||
|
|
||||||
if (type === 'TYPE') types[opts[1]] = [opts[0], +opts[2]];
|
|
||||||
else if (type === 'FIELD') fields[opts[0]] = [types[opts[1]][0], +opts[2]];
|
|
||||||
});
|
|
||||||
|
|
||||||
output.push('var ST = require("./serializedtypes");');
|
|
||||||
output.push('');
|
|
||||||
output.push('var REQUIRED = exports.REQUIRED = 0,');
|
|
||||||
output.push(' OPTIONAL = exports.OPTIONAL = 1,');
|
|
||||||
output.push(' DEFAULT = exports.DEFAULT = 2;');
|
|
||||||
output.push('');
|
|
||||||
|
|
||||||
function pad(s, n) { while (s.length < n) s += ' '; return s; }
|
|
||||||
function padl(s, n) { while (s.length < n) s = ' '+s; return s; }
|
|
||||||
|
|
||||||
Object.keys(types).forEach(function (type) {
|
|
||||||
output.push(pad('ST.'+types[type][0]+'.id', 25) + ' = '+types[type][1]+';');
|
|
||||||
});
|
|
||||||
output.push('');
|
|
||||||
|
|
||||||
// Stage 2: Get the transaction type IDs from TransactionFormats.h
|
|
||||||
var ttConsts = {};
|
|
||||||
String(fs.readFileSync(filenameTxFormatsH)).split('\n').forEach(function (line) {
|
|
||||||
var regex = /tt([A-Z_]+)\s+=\s+([0-9-]+)/;
|
|
||||||
var match = line.match(regex);
|
|
||||||
if (match) ttConsts[match[1]] = +match[2];
|
|
||||||
});
|
|
||||||
|
|
||||||
// Stage 3: Get the transaction formats from TransactionFormats.cpp
|
|
||||||
var base = [],
|
|
||||||
sections = [],
|
|
||||||
current = base;
|
|
||||||
String(fs.readFileSync(filenameTxFormats)).split('\n').forEach(function (line) {
|
|
||||||
line = line.replace(/^\s+|\s+$/g, '').replace(/\s+/g, '');
|
|
||||||
|
|
||||||
var d_regex = /DECLARE_TF\(([A-Za-z]+),tt([A-Z_]+)/;
|
|
||||||
var d_match = line.match(d_regex);
|
|
||||||
|
|
||||||
var s_regex = /SOElement\(sf([a-z]+),SOE_(REQUIRED|OPTIONAL|DEFAULT)/i;
|
|
||||||
var s_match = line.match(s_regex);
|
|
||||||
|
|
||||||
if (d_match) sections.push(current = [d_match[1], ttConsts[d_match[2]]]);
|
|
||||||
else if (s_match) current.push([s_match[1], s_match[2]]);
|
|
||||||
});
|
|
||||||
|
|
||||||
function removeFinalComma(arr) {
|
|
||||||
arr[arr.length-1] = arr[arr.length-1].slice(0, -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
output.push('var base = [');
|
|
||||||
base.forEach(function (field) {
|
|
||||||
var spec = fields[field[0]];
|
|
||||||
output.push(' [ '+
|
|
||||||
pad("'"+field[0]+"'", 21)+', '+
|
|
||||||
pad(field[1], 8)+', '+
|
|
||||||
padl(""+spec[1], 2)+', '+
|
|
||||||
'ST.'+pad(spec[0], 3)+
|
|
||||||
' ],');
|
|
||||||
});
|
|
||||||
removeFinalComma(output);
|
|
||||||
output.push('];');
|
|
||||||
output.push('');
|
|
||||||
|
|
||||||
|
|
||||||
output.push('exports.tx = {');
|
|
||||||
sections.forEach(function (section) {
|
|
||||||
var name = section.shift(),
|
|
||||||
ttid = section.shift();
|
|
||||||
|
|
||||||
output.push(' '+name+': ['+ttid+'].concat(base, [');
|
|
||||||
section.forEach(function (field) {
|
|
||||||
var spec = fields[field[0]];
|
|
||||||
output.push(' [ '+
|
|
||||||
pad("'"+field[0]+"'", 21)+', '+
|
|
||||||
pad(field[1], 8)+', '+
|
|
||||||
padl(""+spec[1], 2)+', '+
|
|
||||||
'ST.'+pad(spec[0], 3)+
|
|
||||||
' ],');
|
|
||||||
});
|
|
||||||
removeFinalComma(output);
|
|
||||||
output.push(' ]),');
|
|
||||||
});
|
|
||||||
removeFinalComma(output);
|
|
||||||
output.push('};');
|
|
||||||
output.push('');
|
|
||||||
|
|
||||||
console.log(output.join('\n'));
|
|
||||||
|
|
||||||
@@ -16,13 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF)
|
|||||||
target_compile_definitions (common
|
target_compile_definitions (common
|
||||||
INTERFACE
|
INTERFACE
|
||||||
$<$<CONFIG:Debug>:DEBUG _DEBUG>
|
$<$<CONFIG:Debug>:DEBUG _DEBUG>
|
||||||
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
|
#[===[
|
||||||
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
|
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
|
||||||
# defined, so no need to add it explicitly except for
|
explicitly except for the special case of (profile ON) and (assert OFF).
|
||||||
# this special case of (profile ON) and (assert OFF)
|
Presumably this is because we don't want profile builds asserting unless
|
||||||
# -- presumably this is because we don't want profile
|
asserts were specifically requested.
|
||||||
# builds asserting unless asserts were specifically
|
]===]
|
||||||
# requested
|
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
|
||||||
|
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
|
||||||
|
OPENSSL_SUPPRESS_DEPRECATED
|
||||||
|
)
|
||||||
|
|
||||||
if (MSVC)
|
if (MSVC)
|
||||||
# remove existing exception flag since we set it to -EHa
|
# remove existing exception flag since we set it to -EHa
|
||||||
@@ -90,28 +93,16 @@ if (MSVC)
|
|||||||
-errorreport:none
|
-errorreport:none
|
||||||
-machine:X64)
|
-machine:X64)
|
||||||
else ()
|
else ()
|
||||||
# HACK : because these need to come first, before any warning demotion
|
|
||||||
string (APPEND CMAKE_CXX_FLAGS " -Wall -Wdeprecated")
|
|
||||||
if (wextra)
|
|
||||||
string (APPEND CMAKE_CXX_FLAGS " -Wextra -Wno-unused-parameter")
|
|
||||||
endif ()
|
|
||||||
# not MSVC
|
|
||||||
target_compile_options (common
|
target_compile_options (common
|
||||||
INTERFACE
|
INTERFACE
|
||||||
|
-Wall
|
||||||
|
-Wdeprecated
|
||||||
|
$<$<BOOL:${is_clang}>:-Wno-deprecated-declarations>
|
||||||
|
$<$<BOOL:${wextra}>:-Wextra -Wno-unused-parameter>
|
||||||
$<$<BOOL:${werr}>:-Werror>
|
$<$<BOOL:${werr}>:-Werror>
|
||||||
$<$<COMPILE_LANGUAGE:CXX>:
|
|
||||||
-frtti
|
|
||||||
-Wnon-virtual-dtor
|
|
||||||
>
|
|
||||||
-Wno-sign-compare
|
|
||||||
-Wno-char-subscripts
|
|
||||||
-Wno-format
|
|
||||||
-Wno-unused-local-typedefs
|
|
||||||
-fstack-protector
|
-fstack-protector
|
||||||
$<$<BOOL:${is_gcc}>:
|
-Wno-sign-compare
|
||||||
-Wno-unused-but-set-variable
|
-Wno-unused-but-set-variable
|
||||||
-Wno-deprecated
|
|
||||||
>
|
|
||||||
$<$<NOT:$<CONFIG:Debug>>:-fno-strict-aliasing>
|
$<$<NOT:$<CONFIG:Debug>>:-fno-strict-aliasing>
|
||||||
# tweak gcc optimization for debug
|
# tweak gcc optimization for debug
|
||||||
$<$<AND:$<BOOL:${is_gcc}>,$<CONFIG:Debug>>:-O0>
|
$<$<AND:$<BOOL:${is_gcc}>,$<CONFIG:Debug>>:-O0>
|
||||||
|
|||||||
@@ -51,6 +51,8 @@ target_link_libraries(xrpl.libpb
|
|||||||
# TODO: Clean up the number of library targets later.
|
# TODO: Clean up the number of library targets later.
|
||||||
add_library(xrpl.imports.main INTERFACE)
|
add_library(xrpl.imports.main INTERFACE)
|
||||||
|
|
||||||
|
find_package(RapidJSON)
|
||||||
|
|
||||||
target_link_libraries(xrpl.imports.main
|
target_link_libraries(xrpl.imports.main
|
||||||
INTERFACE
|
INTERFACE
|
||||||
LibArchive::LibArchive
|
LibArchive::LibArchive
|
||||||
@@ -75,6 +77,7 @@ add_module(xrpl beast)
|
|||||||
target_link_libraries(xrpl.libxrpl.beast PUBLIC
|
target_link_libraries(xrpl.libxrpl.beast PUBLIC
|
||||||
xrpl.imports.main
|
xrpl.imports.main
|
||||||
xrpl.libpb
|
xrpl.libpb
|
||||||
|
rapidjson
|
||||||
)
|
)
|
||||||
|
|
||||||
# Level 02
|
# Level 02
|
||||||
@@ -85,6 +88,7 @@ target_link_libraries(xrpl.libxrpl.basics PUBLIC xrpl.libxrpl.beast)
|
|||||||
add_module(xrpl json)
|
add_module(xrpl json)
|
||||||
target_link_libraries(xrpl.libxrpl.json PUBLIC xrpl.libxrpl.basics)
|
target_link_libraries(xrpl.libxrpl.json PUBLIC xrpl.libxrpl.basics)
|
||||||
|
|
||||||
|
|
||||||
add_module(xrpl crypto)
|
add_module(xrpl crypto)
|
||||||
target_link_libraries(xrpl.libxrpl.crypto PUBLIC xrpl.libxrpl.basics)
|
target_link_libraries(xrpl.libxrpl.crypto PUBLIC xrpl.libxrpl.basics)
|
||||||
|
|
||||||
@@ -99,6 +103,15 @@ target_link_libraries(xrpl.libxrpl.protocol PUBLIC
|
|||||||
add_module(xrpl resource)
|
add_module(xrpl resource)
|
||||||
target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
|
target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
|
||||||
|
|
||||||
|
# Level 06
|
||||||
|
add_module(xrpl net)
|
||||||
|
target_link_libraries(xrpl.libxrpl.net PUBLIC
|
||||||
|
xrpl.libxrpl.basics
|
||||||
|
xrpl.libxrpl.json
|
||||||
|
xrpl.libxrpl.protocol
|
||||||
|
xrpl.libxrpl.resource
|
||||||
|
)
|
||||||
|
|
||||||
add_module(xrpl server)
|
add_module(xrpl server)
|
||||||
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
|
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
|
||||||
|
|
||||||
@@ -121,6 +134,7 @@ target_link_modules(xrpl PUBLIC
|
|||||||
protocol
|
protocol
|
||||||
resource
|
resource
|
||||||
server
|
server
|
||||||
|
net
|
||||||
)
|
)
|
||||||
|
|
||||||
# All headers in libxrpl are in modules.
|
# All headers in libxrpl are in modules.
|
||||||
|
|||||||
@@ -53,9 +53,9 @@ set(download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake")
|
|||||||
file(WRITE
|
file(WRITE
|
||||||
"${download_script}"
|
"${download_script}"
|
||||||
"file(DOWNLOAD \
|
"file(DOWNLOAD \
|
||||||
http://upload.cppreference.com/mwiki/images/b/b2/html_book_20190607.zip \
|
https://github.com/PeterFeicht/cppreference-doc/releases/download/v20250209/html-book-20250209.zip \
|
||||||
${CMAKE_BINARY_DIR}/docs/cppreference.zip \
|
${CMAKE_BINARY_DIR}/docs/cppreference.zip \
|
||||||
EXPECTED_HASH MD5=82b3a612d7d35a83e3cb1195a63689ab \
|
EXPECTED_HASH MD5=bda585f72fbca4b817b29a3d5746567b \
|
||||||
)\n \
|
)\n \
|
||||||
execute_process( \
|
execute_process( \
|
||||||
COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \
|
COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ install (
|
|||||||
xrpl.libxrpl.protocol
|
xrpl.libxrpl.protocol
|
||||||
xrpl.libxrpl.resource
|
xrpl.libxrpl.resource
|
||||||
xrpl.libxrpl.server
|
xrpl.libxrpl.server
|
||||||
|
xrpl.libxrpl.net
|
||||||
xrpl.libxrpl
|
xrpl.libxrpl
|
||||||
antithesis-sdk-cpp
|
antithesis-sdk-cpp
|
||||||
EXPORT RippleExports
|
EXPORT RippleExports
|
||||||
|
|||||||
@@ -2,16 +2,6 @@
|
|||||||
convenience variables and sanity checks
|
convenience variables and sanity checks
|
||||||
#]===================================================================]
|
#]===================================================================]
|
||||||
|
|
||||||
include(ProcessorCount)
|
|
||||||
|
|
||||||
if (NOT ep_procs)
|
|
||||||
ProcessorCount(ep_procs)
|
|
||||||
if (ep_procs GREATER 1)
|
|
||||||
# never use more than half of cores for EP builds
|
|
||||||
math (EXPR ep_procs "${ep_procs} / 2")
|
|
||||||
message (STATUS "Using ${ep_procs} cores for ExternalProject builds.")
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||||
|
|
||||||
set (CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE)
|
set (CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE)
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ if(tests)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option(unity "Creates a build using UNITY support in cmake. This is the default" ON)
|
option(unity "Creates a build using UNITY support in cmake." OFF)
|
||||||
if(unity)
|
if(unity)
|
||||||
if(NOT is_ci)
|
if(NOT is_ci)
|
||||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ find_package(Boost 1.82 REQUIRED
|
|||||||
COMPONENTS
|
COMPONENTS
|
||||||
chrono
|
chrono
|
||||||
container
|
container
|
||||||
context
|
|
||||||
coroutine
|
coroutine
|
||||||
date_time
|
date_time
|
||||||
filesystem
|
filesystem
|
||||||
@@ -24,7 +23,7 @@ endif()
|
|||||||
|
|
||||||
target_link_libraries(ripple_boost
|
target_link_libraries(ripple_boost
|
||||||
INTERFACE
|
INTERFACE
|
||||||
Boost::boost
|
Boost::headers
|
||||||
Boost::chrono
|
Boost::chrono
|
||||||
Boost::container
|
Boost::container
|
||||||
Boost::coroutine
|
Boost::coroutine
|
||||||
|
|||||||
41
cmake/xrpl_add_test.cmake
Normal file
41
cmake/xrpl_add_test.cmake
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
include(isolate_headers)
|
||||||
|
|
||||||
|
function(xrpl_add_test name)
|
||||||
|
set(target ${PROJECT_NAME}.test.${name})
|
||||||
|
|
||||||
|
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
|
||||||
|
)
|
||||||
|
add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
|
||||||
|
|
||||||
|
isolate_headers(
|
||||||
|
${target}
|
||||||
|
"${CMAKE_SOURCE_DIR}"
|
||||||
|
"${CMAKE_SOURCE_DIR}/tests/${name}"
|
||||||
|
PRIVATE
|
||||||
|
)
|
||||||
|
|
||||||
|
# Make sure the test isn't optimized away in unity builds
|
||||||
|
set_target_properties(${target} PROPERTIES
|
||||||
|
UNITY_BUILD_MODE GROUP
|
||||||
|
UNITY_BUILD_BATCH_SIZE 0) # Adjust as needed
|
||||||
|
|
||||||
|
add_test(NAME ${target} COMMAND ${target})
|
||||||
|
set_tests_properties(
|
||||||
|
${target} PROPERTIES
|
||||||
|
FIXTURES_REQUIRED ${target}_fixture
|
||||||
|
)
|
||||||
|
|
||||||
|
add_test(
|
||||||
|
NAME ${target}.build
|
||||||
|
COMMAND
|
||||||
|
${CMAKE_COMMAND}
|
||||||
|
--build ${CMAKE_BINARY_DIR}
|
||||||
|
--config $<CONFIG>
|
||||||
|
--target ${target}
|
||||||
|
)
|
||||||
|
set_tests_properties(${target}.build PROPERTIES
|
||||||
|
FIXTURES_SETUP ${target}_fixture
|
||||||
|
)
|
||||||
|
endfunction()
|
||||||
34
conan/profiles/default
Normal file
34
conan/profiles/default
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{% set os = detect_api.detect_os() %}
|
||||||
|
{% set arch = detect_api.detect_arch() %}
|
||||||
|
{% set compiler, version, compiler_exe = detect_api.detect_default_compiler() %}
|
||||||
|
{% set compiler_version = version %}
|
||||||
|
{% if os == "Linux" %}
|
||||||
|
{% set compiler_version = detect_api.default_compiler_version(compiler, version) %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
[settings]
|
||||||
|
os={{ os }}
|
||||||
|
arch={{ arch }}
|
||||||
|
build_type=Debug
|
||||||
|
compiler={{compiler}}
|
||||||
|
compiler.version={{ compiler_version }}
|
||||||
|
compiler.cppstd=20
|
||||||
|
{% if os == "Windows" %}
|
||||||
|
compiler.runtime=static
|
||||||
|
{% else %}
|
||||||
|
compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
[conf]
|
||||||
|
{% if compiler == "clang" and compiler_version >= 19 %}
|
||||||
|
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
||||||
|
{% endif %}
|
||||||
|
{% if compiler == "apple-clang" and compiler_version >= 17 %}
|
||||||
|
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
||||||
|
{% endif %}
|
||||||
|
{% if compiler == "gcc" and compiler_version < 13 %}
|
||||||
|
tools.build:cxxflags=['-Wno-restrict']
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
[tool_requires]
|
||||||
|
!cmake/*: cmake/[>=3 <4]
|
||||||
55
conanfile.py
55
conanfile.py
@@ -1,4 +1,4 @@
|
|||||||
from conan import ConanFile
|
from conan import ConanFile, __version__ as conan_version
|
||||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@@ -24,18 +24,21 @@ class Xrpl(ConanFile):
|
|||||||
}
|
}
|
||||||
|
|
||||||
requires = [
|
requires = [
|
||||||
'date/3.0.3',
|
|
||||||
'grpc/1.50.1',
|
'grpc/1.50.1',
|
||||||
'libarchive/3.7.6',
|
'libarchive/3.8.1',
|
||||||
'nudb/2.0.8',
|
'nudb/2.0.9',
|
||||||
'openssl/1.1.1v',
|
'openssl/3.5.2',
|
||||||
'soci/4.0.3',
|
'soci/4.0.3',
|
||||||
'xxhash/0.8.2',
|
|
||||||
'zlib/1.3.1',
|
'zlib/1.3.1',
|
||||||
|
"rapidjson/1.1.0"
|
||||||
|
]
|
||||||
|
|
||||||
|
test_requires = [
|
||||||
|
'doctest/2.4.11',
|
||||||
]
|
]
|
||||||
|
|
||||||
tool_requires = [
|
tool_requires = [
|
||||||
'protobuf/3.21.9',
|
'protobuf/3.21.12',
|
||||||
]
|
]
|
||||||
|
|
||||||
default_options = {
|
default_options = {
|
||||||
@@ -87,30 +90,34 @@ class Xrpl(ConanFile):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def set_version(self):
|
def set_version(self):
|
||||||
path = f'{self.recipe_folder}/src/libxrpl/protocol/BuildInfo.cpp'
|
if self.version is None:
|
||||||
regex = r'versionString\s?=\s?\"(.*)\"'
|
path = f'{self.recipe_folder}/src/libxrpl/protocol/BuildInfo.cpp'
|
||||||
with open(path, 'r') as file:
|
regex = r'versionString\s?=\s?\"(.*)\"'
|
||||||
matches = (re.search(regex, line) for line in file)
|
with open(path, encoding='utf-8') as file:
|
||||||
match = next(m for m in matches if m)
|
matches = (re.search(regex, line) for line in file)
|
||||||
self.version = match.group(1)
|
match = next(m for m in matches if m)
|
||||||
|
self.version = match.group(1)
|
||||||
|
|
||||||
def configure(self):
|
def configure(self):
|
||||||
if self.settings.compiler == 'apple-clang':
|
if self.settings.compiler == 'apple-clang':
|
||||||
self.options['boost'].visibility = 'global'
|
self.options['boost'].visibility = 'global'
|
||||||
|
|
||||||
def requirements(self):
|
def requirements(self):
|
||||||
self.requires('boost/1.83.0', force=True)
|
# Conan 2 requires transitive headers to be specified
|
||||||
|
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
|
||||||
|
self.requires('boost/1.86.0', force=True, **transitive_headers_opt)
|
||||||
|
self.requires('date/3.0.4', **transitive_headers_opt)
|
||||||
self.requires('lz4/1.10.0', force=True)
|
self.requires('lz4/1.10.0', force=True)
|
||||||
self.requires('protobuf/3.21.9', force=True)
|
self.requires('protobuf/3.21.12', force=True)
|
||||||
self.requires('sqlite3/3.47.0', force=True)
|
self.requires('sqlite3/3.49.1', force=True)
|
||||||
if self.options.jemalloc:
|
if self.options.jemalloc:
|
||||||
self.requires('jemalloc/5.3.0')
|
self.requires('jemalloc/5.3.0')
|
||||||
if self.options.rocksdb:
|
if self.options.rocksdb:
|
||||||
self.requires('rocksdb/9.7.3')
|
self.requires('rocksdb/10.0.1')
|
||||||
|
self.requires('xxhash/0.8.3', **transitive_headers_opt)
|
||||||
|
|
||||||
exports_sources = (
|
exports_sources = (
|
||||||
'CMakeLists.txt',
|
'CMakeLists.txt',
|
||||||
'bin/getRippledInfo',
|
|
||||||
'cfg/*',
|
'cfg/*',
|
||||||
'cmake/*',
|
'cmake/*',
|
||||||
'external/*',
|
'external/*',
|
||||||
@@ -161,7 +168,17 @@ class Xrpl(ConanFile):
|
|||||||
# `include/`, not `include/ripple/proto/`.
|
# `include/`, not `include/ripple/proto/`.
|
||||||
libxrpl.includedirs = ['include', 'include/ripple/proto']
|
libxrpl.includedirs = ['include', 'include/ripple/proto']
|
||||||
libxrpl.requires = [
|
libxrpl.requires = [
|
||||||
'boost::boost',
|
'boost::headers',
|
||||||
|
'boost::chrono',
|
||||||
|
'boost::container',
|
||||||
|
'boost::coroutine',
|
||||||
|
'boost::date_time',
|
||||||
|
'boost::filesystem',
|
||||||
|
'boost::json',
|
||||||
|
'boost::program_options',
|
||||||
|
'boost::regex',
|
||||||
|
'boost::system',
|
||||||
|
'boost::thread',
|
||||||
'date::date',
|
'date::date',
|
||||||
'grpc::grpc++',
|
'grpc::grpc++',
|
||||||
'libarchive::libarchive',
|
'libarchive::libarchive',
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ the ledger (so the entire network has the same view). This will help the network
|
|||||||
see which validators are **currently** unreliable, and adjust their quorum
|
see which validators are **currently** unreliable, and adjust their quorum
|
||||||
calculation accordingly.
|
calculation accordingly.
|
||||||
|
|
||||||
*Improving the liveness of the network is the main motivation for the negative UNL.*
|
_Improving the liveness of the network is the main motivation for the negative UNL._
|
||||||
|
|
||||||
### Targeted Faults
|
### Targeted Faults
|
||||||
|
|
||||||
@@ -53,16 +53,17 @@ even if the number of remaining validators gets to 60%. Say we have a network
|
|||||||
with 10 validators on the UNL and everything is operating correctly. The quorum
|
with 10 validators on the UNL and everything is operating correctly. The quorum
|
||||||
required for this network would be 8 (80% of 10). When validators fail, the
|
required for this network would be 8 (80% of 10). When validators fail, the
|
||||||
quorum required would be as low as 6 (60% of 10), which is the absolute
|
quorum required would be as low as 6 (60% of 10), which is the absolute
|
||||||
***minimum quorum***. We need the absolute minimum quorum to be strictly greater
|
**_minimum quorum_**. We need the absolute minimum quorum to be strictly greater
|
||||||
than 50% of the original UNL so that there cannot be two partitions of
|
than 50% of the original UNL so that there cannot be two partitions of
|
||||||
well-behaved nodes headed in different directions. We arbitrarily choose 60% as
|
well-behaved nodes headed in different directions. We arbitrarily choose 60% as
|
||||||
the minimum quorum to give a margin of safety.
|
the minimum quorum to give a margin of safety.
|
||||||
|
|
||||||
Consider these events in the absence of negative UNL:
|
Consider these events in the absence of negative UNL:
|
||||||
|
|
||||||
1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum
|
1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum
|
||||||
1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum
|
1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum
|
||||||
1. 5:00pm - validator3 fails, votes vs. quorum: 7 < 8, we don’t have quorum
|
1. 5:00pm - validator3 fails, votes vs. quorum: 7 < 8, we don’t have quorum
|
||||||
* **network cannot validate new ledgers with 3 failed validators**
|
- **network cannot validate new ledgers with 3 failed validators**
|
||||||
|
|
||||||
We're below 80% agreement, so new ledgers cannot be validated. This is how the
|
We're below 80% agreement, so new ledgers cannot be validated. This is how the
|
||||||
XRP Ledger operates today, but if the negative UNL was enabled, the events would
|
XRP Ledger operates today, but if the negative UNL was enabled, the events would
|
||||||
@@ -70,18 +71,20 @@ happen as follows. (Please note that the events below are from a simplified
|
|||||||
version of our protocol.)
|
version of our protocol.)
|
||||||
|
|
||||||
1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum
|
1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum
|
||||||
1. 1:40pm - network adds validator1 to negative UNL, quorum changes to ceil(9 * 0.8), or 8
|
1. 1:40pm - network adds validator1 to negative UNL, quorum changes to ceil(9 \* 0.8), or 8
|
||||||
1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum
|
1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum
|
||||||
1. 3:40pm - network adds validator2 to negative UNL, quorum changes to ceil(8 * 0.8), or 7
|
1. 3:40pm - network adds validator2 to negative UNL, quorum changes to ceil(8 \* 0.8), or 7
|
||||||
1. 5:00pm - validator3 fails, votes vs. quorum: 7 >= 7, we have quorum
|
1. 5:00pm - validator3 fails, votes vs. quorum: 7 >= 7, we have quorum
|
||||||
1. 5:40pm - network adds validator3 to negative UNL, quorum changes to ceil(7 * 0.8), or 6
|
1. 5:40pm - network adds validator3 to negative UNL, quorum changes to ceil(7 \* 0.8), or 6
|
||||||
1. 7:00pm - validator4 fails, votes vs. quorum: 6 >= 6, we have quorum
|
1. 7:00pm - validator4 fails, votes vs. quorum: 6 >= 6, we have quorum
|
||||||
* **network can still validate new ledgers with 4 failed validators**
|
- **network can still validate new ledgers with 4 failed validators**
|
||||||
|
|
||||||
## External Interactions
|
## External Interactions
|
||||||
|
|
||||||
### Message Format Changes
|
### Message Format Changes
|
||||||
|
|
||||||
This proposal will:
|
This proposal will:
|
||||||
|
|
||||||
1. add a new pseudo-transaction type
|
1. add a new pseudo-transaction type
|
||||||
1. add the negative UNL to the ledger data structure.
|
1. add the negative UNL to the ledger data structure.
|
||||||
|
|
||||||
@@ -89,19 +92,20 @@ Any tools or systems that rely on the format of this data will have to be
|
|||||||
updated.
|
updated.
|
||||||
|
|
||||||
### Amendment
|
### Amendment
|
||||||
|
|
||||||
This feature **will** need an amendment to activate.
|
This feature **will** need an amendment to activate.
|
||||||
|
|
||||||
## Design
|
## Design
|
||||||
|
|
||||||
This section discusses the following topics about the Negative UNL design:
|
This section discusses the following topics about the Negative UNL design:
|
||||||
|
|
||||||
* [Negative UNL protocol overview](#Negative-UNL-Protocol-Overview)
|
- [Negative UNL protocol overview](#Negative-UNL-Protocol-Overview)
|
||||||
* [Validator reliability measurement](#Validator-Reliability-Measurement)
|
- [Validator reliability measurement](#Validator-Reliability-Measurement)
|
||||||
* [Format Changes](#Format-Changes)
|
- [Format Changes](#Format-Changes)
|
||||||
* [Negative UNL maintenance](#Negative-UNL-Maintenance)
|
- [Negative UNL maintenance](#Negative-UNL-Maintenance)
|
||||||
* [Quorum size calculation](#Quorum-Size-Calculation)
|
- [Quorum size calculation](#Quorum-Size-Calculation)
|
||||||
* [Filter validation messages](#Filter-Validation-Messages)
|
- [Filter validation messages](#Filter-Validation-Messages)
|
||||||
* [High level sequence diagram of code
|
- [High level sequence diagram of code
|
||||||
changes](#High-Level-Sequence-Diagram-of-Code-Changes)
|
changes](#High-Level-Sequence-Diagram-of-Code-Changes)
|
||||||
|
|
||||||
### Negative UNL Protocol Overview
|
### Negative UNL Protocol Overview
|
||||||
@@ -114,9 +118,9 @@ with V in their UNL adjust the quorum and V’s validation message is not counte
|
|||||||
when verifying if a ledger is fully validated. V’s flow of messages and network
|
when verifying if a ledger is fully validated. V’s flow of messages and network
|
||||||
interactions, however, will remain the same.
|
interactions, however, will remain the same.
|
||||||
|
|
||||||
We define the ***effective UNL** = original UNL - negative UNL*, and the
|
We define the **\*effective UNL** = original UNL - negative UNL\*, and the
|
||||||
***effective quorum*** as the quorum of the *effective UNL*. And we set
|
**_effective quorum_** as the quorum of the _effective UNL_. And we set
|
||||||
*effective quorum = Ceiling(80% * effective UNL)*.
|
_effective quorum = Ceiling(80% _ effective UNL)\*.
|
||||||
|
|
||||||
### Validator Reliability Measurement
|
### Validator Reliability Measurement
|
||||||
|
|
||||||
@@ -126,16 +130,16 @@ measure about its validators, but we have chosen ledger validation messages.
|
|||||||
This is because every validator shall send one and only one signed validation
|
This is because every validator shall send one and only one signed validation
|
||||||
message per ledger. This keeps the measurement simple and removes
|
message per ledger. This keeps the measurement simple and removes
|
||||||
timing/clock-sync issues. A node will measure the percentage of agreeing
|
timing/clock-sync issues. A node will measure the percentage of agreeing
|
||||||
validation messages (*PAV*) received from each validator on the node's UNL. Note
|
validation messages (_PAV_) received from each validator on the node's UNL. Note
|
||||||
that the node will only count the validation messages that agree with its own
|
that the node will only count the validation messages that agree with its own
|
||||||
validations.
|
validations.
|
||||||
|
|
||||||
We define the **PAV** as the **P**ercentage of **A**greed **V**alidation
|
We define the **PAV** as the **P**ercentage of **A**greed **V**alidation
|
||||||
messages received for the last N ledgers, where N = 256 by default.
|
messages received for the last N ledgers, where N = 256 by default.
|
||||||
|
|
||||||
When the PAV drops below the ***low-water mark***, the validator is considered
|
When the PAV drops below the **_low-water mark_**, the validator is considered
|
||||||
unreliable, and is a candidate to be disabled by being added to the negative
|
unreliable, and is a candidate to be disabled by being added to the negative
|
||||||
UNL. A validator must have a PAV higher than the ***high-water mark*** to be
|
UNL. A validator must have a PAV higher than the **_high-water mark_** to be
|
||||||
re-enabled. The validator is re-enabled by removing it from the negative UNL. In
|
re-enabled. The validator is re-enabled by removing it from the negative UNL. In
|
||||||
the implementation, we plan to set the low-water mark as 50% and the high-water
|
the implementation, we plan to set the low-water mark as 50% and the high-water
|
||||||
mark as 80%.
|
mark as 80%.
|
||||||
@@ -143,22 +147,24 @@ mark as 80%.
|
|||||||
### Format Changes
|
### Format Changes
|
||||||
|
|
||||||
The negative UNL component in a ledger contains three fields.
|
The negative UNL component in a ledger contains three fields.
|
||||||
* ***NegativeUNL***: The current negative UNL, a list of unreliable validators.
|
|
||||||
* ***ToDisable***: The validator to be added to the negative UNL on the next
|
- **_NegativeUNL_**: The current negative UNL, a list of unreliable validators.
|
||||||
|
- **_ToDisable_**: The validator to be added to the negative UNL on the next
|
||||||
flag ledger.
|
flag ledger.
|
||||||
* ***ToReEnable***: The validator to be removed from the negative UNL on the
|
- **_ToReEnable_**: The validator to be removed from the negative UNL on the
|
||||||
next flag ledger.
|
next flag ledger.
|
||||||
|
|
||||||
All three fields are optional. When the *ToReEnable* field exists, the
|
All three fields are optional. When the _ToReEnable_ field exists, the
|
||||||
*NegativeUNL* field cannot be empty.
|
_NegativeUNL_ field cannot be empty.
|
||||||
|
|
||||||
A new pseudo-transaction, ***UNLModify***, is added. It has three fields
|
A new pseudo-transaction, **_UNLModify_**, is added. It has three fields
|
||||||
* ***Disabling***: A flag indicating whether the modification is to disable or
|
|
||||||
|
- **_Disabling_**: A flag indicating whether the modification is to disable or
|
||||||
to re-enable a validator.
|
to re-enable a validator.
|
||||||
* ***Seq***: The ledger sequence number.
|
- **_Seq_**: The ledger sequence number.
|
||||||
* ***Validator***: The validator to be disabled or re-enabled.
|
- **_Validator_**: The validator to be disabled or re-enabled.
|
||||||
|
|
||||||
There would be at most one *disable* `UNLModify` and one *re-enable* `UNLModify`
|
There would be at most one _disable_ `UNLModify` and one _re-enable_ `UNLModify`
|
||||||
transaction per flag ledger. The full machinery is described further on.
|
transaction per flag ledger. The full machinery is described further on.
|
||||||
|
|
||||||
### Negative UNL Maintenance
|
### Negative UNL Maintenance
|
||||||
@@ -167,19 +173,19 @@ The negative UNL can only be modified on the flag ledgers. If a validator's
|
|||||||
reliability status changes, it takes two flag ledgers to modify the negative
|
reliability status changes, it takes two flag ledgers to modify the negative
|
||||||
UNL. Let's see an example of the algorithm:
|
UNL. Let's see an example of the algorithm:
|
||||||
|
|
||||||
* Ledger seq = 100: A validator V goes offline.
|
- Ledger seq = 100: A validator V goes offline.
|
||||||
* Ledger seq = 256: This is a flag ledger, and V's reliability measurement *PAV*
|
- Ledger seq = 256: This is a flag ledger, and V's reliability measurement _PAV_
|
||||||
is lower than the low-water mark. Other validators add `UNLModify`
|
is lower than the low-water mark. Other validators add `UNLModify`
|
||||||
pseudo-transactions `{true, 256, V}` to the transaction set which goes through
|
pseudo-transactions `{true, 256, V}` to the transaction set which goes through
|
||||||
the consensus. Then the pseudo-transaction is applied to the negative UNL
|
the consensus. Then the pseudo-transaction is applied to the negative UNL
|
||||||
ledger component by setting `ToDisable = V`.
|
ledger component by setting `ToDisable = V`.
|
||||||
* Ledger seq = 257 ~ 511: The negative UNL ledger component is copied from the
|
- Ledger seq = 257 ~ 511: The negative UNL ledger component is copied from the
|
||||||
parent ledger.
|
parent ledger.
|
||||||
* Ledger seq=512: This is a flag ledger, and the negative UNL is updated
|
- Ledger seq=512: This is a flag ledger, and the negative UNL is updated
|
||||||
`NegativeUNL = NegativeUNL + ToDisable`.
|
`NegativeUNL = NegativeUNL + ToDisable`.
|
||||||
|
|
||||||
The negative UNL may have up to `MaxNegativeListed = floor(original UNL * 25%)`
|
The negative UNL may have up to `MaxNegativeListed = floor(original UNL * 25%)`
|
||||||
validators. The 25% is because of 75% * 80% = 60%, where 75% = 100% - 25%, 80%
|
validators. The 25% is because of 75% \* 80% = 60%, where 75% = 100% - 25%, 80%
|
||||||
is the quorum of the effective UNL, and 60% is the absolute minimum quorum of
|
is the quorum of the effective UNL, and 60% is the absolute minimum quorum of
|
||||||
the original UNL. Adding more than 25% validators to the negative UNL does not
|
the original UNL. Adding more than 25% validators to the negative UNL does not
|
||||||
improve the liveness of the network, because adding more validators to the
|
improve the liveness of the network, because adding more validators to the
|
||||||
@@ -187,52 +193,43 @@ negative UNL cannot lower the effective quorum.
|
|||||||
|
|
||||||
The following is the detailed algorithm:
|
The following is the detailed algorithm:
|
||||||
|
|
||||||
* **If** the ledger seq = x is a flag ledger
|
- **If** the ledger seq = x is a flag ledger
|
||||||
|
1. Compute `NegativeUNL = NegativeUNL + ToDisable - ToReEnable` if they
|
||||||
|
exist in the parent ledger
|
||||||
|
|
||||||
1. Compute `NegativeUNL = NegativeUNL + ToDisable - ToReEnable` if they
|
1. Try to find a candidate to disable if `sizeof NegativeUNL < MaxNegativeListed`
|
||||||
exist in the parent ledger
|
|
||||||
|
|
||||||
1. Try to find a candidate to disable if `sizeof NegativeUNL < MaxNegativeListed`
|
1. Find a validator V that has a _PAV_ lower than the low-water
|
||||||
|
mark, but is not in `NegativeUNL`.
|
||||||
|
|
||||||
1. Find a validator V that has a *PAV* lower than the low-water
|
1. If two or more are found, their public keys are XORed with the hash
|
||||||
mark, but is not in `NegativeUNL`.
|
of the parent ledger and the one with the lowest XOR result is chosen.
|
||||||
|
1. If V is found, create a `UNLModify` pseudo-transaction
|
||||||
|
`TxDisableValidator = {true, x, V}`
|
||||||
|
1. Try to find a candidate to re-enable if `sizeof NegativeUNL > 0`:
|
||||||
|
1. Find a validator U that is in `NegativeUNL` and has a _PAV_ higher
|
||||||
|
than the high-water mark.
|
||||||
|
1. If U is not found, try to find one in `NegativeUNL` but not in the
|
||||||
|
local _UNL_.
|
||||||
|
1. If two or more are found, their public keys are XORed with the hash
|
||||||
|
of the parent ledger and the one with the lowest XOR result is chosen.
|
||||||
|
1. If U is found, create a `UNLModify` pseudo-transaction
|
||||||
|
`TxReEnableValidator = {false, x, U}`
|
||||||
|
|
||||||
1. If two or more are found, their public keys are XORed with the hash
|
1. If any `UNLModify` pseudo-transactions are created, add them to the
|
||||||
of the parent ledger and the one with the lowest XOR result is chosen.
|
transaction set. The transaction set goes through the consensus algorithm.
|
||||||
|
1. If have enough support, the `UNLModify` pseudo-transactions remain in the
|
||||||
1. If V is found, create a `UNLModify` pseudo-transaction
|
transaction set agreed by the validators. Then the pseudo-transactions are
|
||||||
`TxDisableValidator = {true, x, V}`
|
applied to the ledger:
|
||||||
|
|
||||||
1. Try to find a candidate to re-enable if `sizeof NegativeUNL > 0`:
|
|
||||||
|
|
||||||
1. Find a validator U that is in `NegativeUNL` and has a *PAV* higher
|
|
||||||
than the high-water mark.
|
|
||||||
|
|
||||||
1. If U is not found, try to find one in `NegativeUNL` but not in the
|
|
||||||
local *UNL*.
|
|
||||||
|
|
||||||
1. If two or more are found, their public keys are XORed with the hash
|
|
||||||
of the parent ledger and the one with the lowest XOR result is chosen.
|
|
||||||
|
|
||||||
1. If U is found, create a `UNLModify` pseudo-transaction
|
|
||||||
`TxReEnableValidator = {false, x, U}`
|
|
||||||
|
|
||||||
1. If any `UNLModify` pseudo-transactions are created, add them to the
|
|
||||||
transaction set. The transaction set goes through the consensus algorithm.
|
|
||||||
|
|
||||||
1. If have enough support, the `UNLModify` pseudo-transactions remain in the
|
|
||||||
transaction set agreed by the validators. Then the pseudo-transactions are
|
|
||||||
applied to the ledger:
|
|
||||||
|
|
||||||
1. If have `TxDisableValidator`, set `ToDisable=TxDisableValidator.V`.
|
|
||||||
Else clear `ToDisable`.
|
|
||||||
|
|
||||||
1. If have `TxReEnableValidator`, set
|
|
||||||
`ToReEnable=TxReEnableValidator.U`. Else clear `ToReEnable`.
|
|
||||||
|
|
||||||
* **Else** (not a flag ledger)
|
|
||||||
|
|
||||||
1. Copy the negative UNL ledger component from the parent ledger
|
1. If have `TxDisableValidator`, set `ToDisable=TxDisableValidator.V`.
|
||||||
|
Else clear `ToDisable`.
|
||||||
|
|
||||||
|
1. If have `TxReEnableValidator`, set
|
||||||
|
`ToReEnable=TxReEnableValidator.U`. Else clear `ToReEnable`.
|
||||||
|
|
||||||
|
- **Else** (not a flag ledger)
|
||||||
|
1. Copy the negative UNL ledger component from the parent ledger
|
||||||
|
|
||||||
The negative UNL is stored on each ledger because we don't know when a validator
|
The negative UNL is stored on each ledger because we don't know when a validator
|
||||||
may reconnect to the network. If the negative UNL was stored only on every flag
|
may reconnect to the network. If the negative UNL was stored only on every flag
|
||||||
@@ -273,31 +270,26 @@ not counted when checking if the ledger is fully validated.
|
|||||||
The diagram below is the sequence of one round of consensus. Classes and
|
The diagram below is the sequence of one round of consensus. Classes and
|
||||||
components with non-trivial changes are colored green.
|
components with non-trivial changes are colored green.
|
||||||
|
|
||||||
* The `ValidatorList` class is modified to compute the quorum of the effective
|
- The `ValidatorList` class is modified to compute the quorum of the effective
|
||||||
UNL.
|
UNL.
|
||||||
|
|
||||||
* The `Validations` class provides an interface for querying the validation
|
- The `Validations` class provides an interface for querying the validation
|
||||||
messages from trusted validators.
|
messages from trusted validators.
|
||||||
|
|
||||||
* The `ConsensusAdaptor` component:
|
- The `ConsensusAdaptor` component:
|
||||||
|
- The `RCLConsensus::Adaptor` class is modified for creating `UNLModify`
|
||||||
* The `RCLConsensus::Adaptor` class is modified for creating `UNLModify`
|
Pseudo-Transactions.
|
||||||
Pseudo-Transactions.
|
- The `Change` class is modified for applying `UNLModify`
|
||||||
|
Pseudo-Transactions.
|
||||||
* The `Change` class is modified for applying `UNLModify`
|
- The `Ledger` class is modified for creating and adjusting the negative UNL
|
||||||
Pseudo-Transactions.
|
ledger component.
|
||||||
|
- The `LedgerMaster` class is modified for filtering out validation messages
|
||||||
* The `Ledger` class is modified for creating and adjusting the negative UNL
|
from negative UNL validators when verifying if a ledger is fully
|
||||||
ledger component.
|
validated.
|
||||||
|
|
||||||
* The `LedgerMaster` class is modified for filtering out validation messages
|
|
||||||
from negative UNL validators when verifying if a ledger is fully
|
|
||||||
validated.
|
|
||||||
|
|
||||||

|
Changes")
|
||||||
|
|
||||||
|
|
||||||
## Roads Not Taken
|
## Roads Not Taken
|
||||||
|
|
||||||
### Use a Mechanism Like Fee Voting to Process UNLModify Pseudo-Transactions
|
### Use a Mechanism Like Fee Voting to Process UNLModify Pseudo-Transactions
|
||||||
@@ -311,7 +303,7 @@ and different quorums for the same ledger. As a result, the network's safety is
|
|||||||
impacted.
|
impacted.
|
||||||
|
|
||||||
This updated version does not impact safety though operates a bit more slowly.
|
This updated version does not impact safety though operates a bit more slowly.
|
||||||
The negative UNL modifications in the *UNLModify* pseudo-transaction approved by
|
The negative UNL modifications in the _UNLModify_ pseudo-transaction approved by
|
||||||
the consensus will take effect at the next flag ledger. The extra time of the
|
the consensus will take effect at the next flag ledger. The extra time of the
|
||||||
256 ledgers should be enough for nodes to be in sync of the negative UNL
|
256 ledgers should be enough for nodes to be in sync of the negative UNL
|
||||||
modifications.
|
modifications.
|
||||||
@@ -334,29 +326,28 @@ expiration approach cannot be simply applied.
|
|||||||
### Validator Reliability Measurement and Flag Ledger Frequency
|
### Validator Reliability Measurement and Flag Ledger Frequency
|
||||||
|
|
||||||
If the ledger time is about 4.5 seconds and the low-water mark is 50%, then in
|
If the ledger time is about 4.5 seconds and the low-water mark is 50%, then in
|
||||||
the worst case, it takes 48 minutes *((0.5 * 256 + 256 + 256) * 4.5 / 60 = 48)*
|
the worst case, it takes 48 minutes _((0.5 _ 256 + 256 + 256) _ 4.5 / 60 = 48)_
|
||||||
to put an offline validator on the negative UNL. We considered lowering the flag
|
to put an offline validator on the negative UNL. We considered lowering the flag
|
||||||
ledger frequency so that the negative UNL can be more responsive. We also
|
ledger frequency so that the negative UNL can be more responsive. We also
|
||||||
considered decoupling the reliability measurement and flag ledger frequency to
|
considered decoupling the reliability measurement and flag ledger frequency to
|
||||||
be more flexible. In practice, however, their benefits are not clear.
|
be more flexible. In practice, however, their benefits are not clear.
|
||||||
|
|
||||||
|
|
||||||
## New Attack Vectors
|
## New Attack Vectors
|
||||||
|
|
||||||
A group of malicious validators may try to frame a reliable validator and put it
|
A group of malicious validators may try to frame a reliable validator and put it
|
||||||
on the negative UNL. But they cannot succeed. Because:
|
on the negative UNL. But they cannot succeed. Because:
|
||||||
|
|
||||||
1. A reliable validator sends a signed validation message every ledger. A
|
1. A reliable validator sends a signed validation message every ledger. A
|
||||||
sufficient peer-to-peer network will propagate the validation messages to other
|
sufficient peer-to-peer network will propagate the validation messages to other
|
||||||
validators. The validators will decide if another validator is reliable or not
|
validators. The validators will decide if another validator is reliable or not
|
||||||
only by its local observation of the validation messages received. So an honest
|
only by its local observation of the validation messages received. So an honest
|
||||||
validator’s vote on another validator’s reliability is accurate.
|
validator’s vote on another validator’s reliability is accurate.
|
||||||
|
|
||||||
1. Given the votes are accurate, and one vote per validator, an honest validator
|
1. Given the votes are accurate, and one vote per validator, an honest validator
|
||||||
will not create a UNLModify transaction of a reliable validator.
|
will not create a UNLModify transaction of a reliable validator.
|
||||||
|
|
||||||
1. A validator can be added to a negative UNL only through a UNLModify
|
1. A validator can be added to a negative UNL only through a UNLModify
|
||||||
transaction.
|
transaction.
|
||||||
|
|
||||||
Assuming the group of malicious validators is less than the quorum, they cannot
|
Assuming the group of malicious validators is less than the quorum, they cannot
|
||||||
frame a reliable validator.
|
frame a reliable validator.
|
||||||
@@ -365,32 +356,32 @@ frame a reliable validator.
|
|||||||
|
|
||||||
The bullet points below briefly summarize the current proposal:
|
The bullet points below briefly summarize the current proposal:
|
||||||
|
|
||||||
* The motivation of the negative UNL is to improve the liveness of the network.
|
- The motivation of the negative UNL is to improve the liveness of the network.
|
||||||
|
|
||||||
* The targeted faults are the ones frequently observed in the production
|
- The targeted faults are the ones frequently observed in the production
|
||||||
network.
|
network.
|
||||||
|
|
||||||
* Validators propose negative UNL candidates based on their local measurements.
|
- Validators propose negative UNL candidates based on their local measurements.
|
||||||
|
|
||||||
* The absolute minimum quorum is 60% of the original UNL.
|
- The absolute minimum quorum is 60% of the original UNL.
|
||||||
|
|
||||||
* The format of the ledger is changed, and a new *UNLModify* pseudo-transaction
|
- The format of the ledger is changed, and a new _UNLModify_ pseudo-transaction
|
||||||
is added. Any tools or systems that rely on the format of these data will have
|
is added. Any tools or systems that rely on the format of these data will have
|
||||||
to be updated.
|
to be updated.
|
||||||
|
|
||||||
* The negative UNL can only be modified on the flag ledgers.
|
- The negative UNL can only be modified on the flag ledgers.
|
||||||
|
|
||||||
* At most one validator can be added to the negative UNL at a flag ledger.
|
- At most one validator can be added to the negative UNL at a flag ledger.
|
||||||
|
|
||||||
* At most one validator can be removed from the negative UNL at a flag ledger.
|
- At most one validator can be removed from the negative UNL at a flag ledger.
|
||||||
|
|
||||||
* If a validator's reliability status changes, it takes two flag ledgers to
|
- If a validator's reliability status changes, it takes two flag ledgers to
|
||||||
modify the negative UNL.
|
modify the negative UNL.
|
||||||
|
|
||||||
* The quorum is the larger of 80% of the effective UNL and 60% of the original
|
- The quorum is the larger of 80% of the effective UNL and 60% of the original
|
||||||
UNL.
|
UNL.
|
||||||
|
|
||||||
* If a validator is on the negative UNL, its validation messages are ignored
|
- If a validator is on the negative UNL, its validation messages are ignored
|
||||||
when the local node verifies if a ledger is fully validated.
|
when the local node verifies if a ledger is fully validated.
|
||||||
|
|
||||||
## FAQ
|
## FAQ
|
||||||
@@ -415,7 +406,7 @@ lower quorum size while keeping the network safe.
|
|||||||
validator removed from the negative UNL? </h3>
|
validator removed from the negative UNL? </h3>
|
||||||
|
|
||||||
A validator’s reliability is measured by other validators. If a validator
|
A validator’s reliability is measured by other validators. If a validator
|
||||||
becomes unreliable, at a flag ledger, other validators propose *UNLModify*
|
becomes unreliable, at a flag ledger, other validators propose _UNLModify_
|
||||||
pseudo-transactions which vote the validator to add to the negative UNL during
|
pseudo-transactions which vote the validator to add to the negative UNL during
|
||||||
the consensus session. If agreed, the validator is added to the negative UNL at
|
the consensus session. If agreed, the validator is added to the negative UNL at
|
||||||
the next flag ledger. The mechanism of removing a validator from the negative
|
the next flag ledger. The mechanism of removing a validator from the negative
|
||||||
@@ -423,32 +414,32 @@ UNL is the same.
|
|||||||
|
|
||||||
### Question: Given a negative UNL, what happens if the UNL changes?
|
### Question: Given a negative UNL, what happens if the UNL changes?
|
||||||
|
|
||||||
Answer: Let’s consider the cases:
|
Answer: Let’s consider the cases:
|
||||||
|
|
||||||
1. A validator is added to the UNL, and it is already in the negative UNL. This
|
1. A validator is added to the UNL, and it is already in the negative UNL. This
|
||||||
case could happen when not all the nodes have the same UNL. Note that the
|
case could happen when not all the nodes have the same UNL. Note that the
|
||||||
negative UNL on the ledger lists unreliable nodes that are not necessarily the
|
negative UNL on the ledger lists unreliable nodes that are not necessarily the
|
||||||
validators for everyone.
|
validators for everyone.
|
||||||
|
|
||||||
In this case, the liveness is affected negatively. Because the minimum
|
In this case, the liveness is affected negatively. Because the minimum
|
||||||
quorum could be larger but the usable validators are not increased.
|
quorum could be larger but the usable validators are not increased.
|
||||||
|
|
||||||
1. A validator is removed from the UNL, and it is in the negative UNL.
|
1. A validator is removed from the UNL, and it is in the negative UNL.
|
||||||
|
|
||||||
In this case, the liveness is affected positively. Because the quorum could
|
In this case, the liveness is affected positively. Because the quorum could
|
||||||
be smaller but the usable validators are not reduced.
|
be smaller but the usable validators are not reduced.
|
||||||
|
|
||||||
1. A validator is added to the UNL, and it is not in the negative UNL.
|
1. A validator is added to the UNL, and it is not in the negative UNL.
|
||||||
1. A validator is removed from the UNL, and it is not in the negative UNL.
|
1. A validator is removed from the UNL, and it is not in the negative UNL.
|
||||||
|
|
||||||
Case 3 and 4 are not affected by the negative UNL protocol.
|
Case 3 and 4 are not affected by the negative UNL protocol.
|
||||||
|
|
||||||
### Question: Can we simply lower the quorum to 60% without the negative UNL?
|
### Question: Can we simply lower the quorum to 60% without the negative UNL?
|
||||||
|
|
||||||
Answer: No, because the negative UNL approach is safer.
|
Answer: No, because the negative UNL approach is safer.
|
||||||
|
|
||||||
First let’s compare the two approaches intuitively, (1) the *negative UNL*
|
First let’s compare the two approaches intuitively, (1) the _negative UNL_
|
||||||
approach, and (2) *lower quorum*: simply lowering the quorum from 80% to 60%
|
approach, and (2) _lower quorum_: simply lowering the quorum from 80% to 60%
|
||||||
without the negative UNL. The negative UNL approach uses consensus to come up
|
without the negative UNL. The negative UNL approach uses consensus to come up
|
||||||
with a list of unreliable validators, which are then removed from the effective
|
with a list of unreliable validators, which are then removed from the effective
|
||||||
UNL temporarily. With this approach, the list of unreliable validators is agreed
|
UNL temporarily. With this approach, the list of unreliable validators is agreed
|
||||||
@@ -462,75 +453,75 @@ Next we compare the two approaches quantitatively with examples, and apply
|
|||||||
Theorem 8 of [Analysis of the XRP Ledger Consensus
|
Theorem 8 of [Analysis of the XRP Ledger Consensus
|
||||||
Protocol](https://arxiv.org/abs/1802.07242) paper:
|
Protocol](https://arxiv.org/abs/1802.07242) paper:
|
||||||
|
|
||||||
*XRP LCP guarantees fork safety if **O<sub>i,j</sub> > n<sub>j</sub> / 2 +
|
_XRP LCP guarantees fork safety if **O<sub>i,j</sub> > n<sub>j</sub> / 2 +
|
||||||
n<sub>i</sub> − q<sub>i</sub> + t<sub>i,j</sub>** for every pair of nodes
|
n<sub>i</sub> − q<sub>i</sub> + t<sub>i,j</sub>** for every pair of nodes
|
||||||
P<sub>i</sub>, P<sub>j</sub>,*
|
P<sub>i</sub>, P<sub>j</sub>,_
|
||||||
|
|
||||||
where *O<sub>i,j</sub>* is the overlapping requirement, n<sub>j</sub> and
|
where _O<sub>i,j</sub>_ is the overlapping requirement, n<sub>j</sub> and
|
||||||
n<sub>i</sub> are UNL sizes, q<sub>i</sub> is the quorum size of P<sub>i</sub>,
|
n<sub>i</sub> are UNL sizes, q<sub>i</sub> is the quorum size of P<sub>i</sub>,
|
||||||
*t<sub>i,j</sub> = min(t<sub>i</sub>, t<sub>j</sub>, O<sub>i,j</sub>)*, and
|
_t<sub>i,j</sub> = min(t<sub>i</sub>, t<sub>j</sub>, O<sub>i,j</sub>)_, and
|
||||||
t<sub>i</sub> and t<sub>j</sub> are the number of faults can be tolerated by
|
t<sub>i</sub> and t<sub>j</sub> are the number of faults can be tolerated by
|
||||||
P<sub>i</sub> and P<sub>j</sub>.
|
P<sub>i</sub> and P<sub>j</sub>.
|
||||||
|
|
||||||
We denote *UNL<sub>i</sub>* as *P<sub>i</sub>'s UNL*, and *|UNL<sub>i</sub>|* as
|
We denote _UNL<sub>i</sub>_ as _P<sub>i</sub>'s UNL_, and _|UNL<sub>i</sub>|_ as
|
||||||
the size of *P<sub>i</sub>'s UNL*.
|
the size of _P<sub>i</sub>'s UNL_.
|
||||||
|
|
||||||
Assuming *|UNL<sub>i</sub>| = |UNL<sub>j</sub>|*, let's consider the following
|
Assuming _|UNL<sub>i</sub>| = |UNL<sub>j</sub>|_, let's consider the following
|
||||||
three cases:
|
three cases:
|
||||||
|
|
||||||
1. With 80% quorum and 20% faults, *O<sub>i,j</sub> > 100% / 2 + 100% - 80% +
|
1. With 80% quorum and 20% faults, _O<sub>i,j</sub> > 100% / 2 + 100% - 80% +
|
||||||
20% = 90%*. I.e. fork safety requires > 90% UNL overlaps. This is one of the
|
20% = 90%_. I.e. fork safety requires > 90% UNL overlaps. This is one of the
|
||||||
results in the analysis paper.
|
results in the analysis paper.
|
||||||
|
|
||||||
1. If the quorum is 60%, the relationship between the overlapping requirement
|
1. If the quorum is 60%, the relationship between the overlapping requirement
|
||||||
and the faults that can be tolerated is *O<sub>i,j</sub> > 90% +
|
and the faults that can be tolerated is _O<sub>i,j</sub> > 90% +
|
||||||
t<sub>i,j</sub>*. Under the same overlapping condition (i.e. 90%), to guarantee
|
t<sub>i,j</sub>_. Under the same overlapping condition (i.e. 90%), to guarantee
|
||||||
the fork safety, the network cannot tolerate any faults. So under the same
|
the fork safety, the network cannot tolerate any faults. So under the same
|
||||||
overlapping condition, if the quorum is simply lowered, the network can tolerate
|
overlapping condition, if the quorum is simply lowered, the network can tolerate
|
||||||
fewer faults.
|
fewer faults.
|
||||||
|
|
||||||
1. With the negative UNL approach, we want to argue that the inequation
|
1. With the negative UNL approach, we want to argue that the inequation
|
||||||
*O<sub>i,j</sub> > n<sub>j</sub> / 2 + n<sub>i</sub> − q<sub>i</sub> +
|
_O<sub>i,j</sub> > n<sub>j</sub> / 2 + n<sub>i</sub> − q<sub>i</sub> +
|
||||||
t<sub>i,j</sub>* is always true to guarantee fork safety, while the negative UNL
|
t<sub>i,j</sub>_ is always true to guarantee fork safety, while the negative UNL
|
||||||
protocol runs, i.e. the effective quorum is lowered without weakening the
|
protocol runs, i.e. the effective quorum is lowered without weakening the
|
||||||
network's fault tolerance. To make the discussion easier, we rewrite the
|
network's fault tolerance. To make the discussion easier, we rewrite the
|
||||||
inequation as *O<sub>i,j</sub> > n<sub>j</sub> / 2 + (n<sub>i</sub> −
|
inequation as _O<sub>i,j</sub> > n<sub>j</sub> / 2 + (n<sub>i</sub> −
|
||||||
q<sub>i</sub>) + min(t<sub>i</sub>, t<sub>j</sub>)*, where O<sub>i,j</sub> is
|
q<sub>i</sub>) + min(t<sub>i</sub>, t<sub>j</sub>)_, where O<sub>i,j</sub> is
|
||||||
dropped from the definition of t<sub>i,j</sub> because *O<sub>i,j</sub> >
|
dropped from the definition of t<sub>i,j</sub> because _O<sub>i,j</sub> >
|
||||||
min(t<sub>i</sub>, t<sub>j</sub>)* always holds under the parameters we will
|
min(t<sub>i</sub>, t<sub>j</sub>)_ always holds under the parameters we will
|
||||||
use. Assuming a validator V is added to the negative UNL, now let's consider the
|
use. Assuming a validator V is added to the negative UNL, now let's consider the
|
||||||
4 cases:
|
4 cases:
|
||||||
|
|
||||||
1. V is not on UNL<sub>i</sub> nor UNL<sub>j</sub>
|
1. V is not on UNL<sub>i</sub> nor UNL<sub>j</sub>
|
||||||
|
|
||||||
The inequation holds because none of the variables change.
|
The inequation holds because none of the variables change.
|
||||||
|
|
||||||
1. V is on UNL<sub>i</sub> but not on UNL<sub>j</sub>
|
1. V is on UNL<sub>i</sub> but not on UNL<sub>j</sub>
|
||||||
|
|
||||||
The value of *(n<sub>i</sub> − q<sub>i</sub>)* is smaller. The value of
|
The value of *(n<sub>i</sub> − q<sub>i</sub>)* is smaller. The value of
|
||||||
*min(t<sub>i</sub>, t<sub>j</sub>)* could be smaller too. Other
|
*min(t<sub>i</sub>, t<sub>j</sub>)* could be smaller too. Other
|
||||||
variables do not change. Overall, the left side of the inequation does
|
variables do not change. Overall, the left side of the inequation does
|
||||||
not change, but the right side is smaller. So the inequation holds.
|
not change, but the right side is smaller. So the inequation holds.
|
||||||
|
|
||||||
1. V is not on UNL<sub>i</sub> but on UNL<sub>j</sub>
|
|
||||||
|
|
||||||
The value of *n<sub>j</sub> / 2* is smaller. The value of
|
1. V is not on UNL<sub>i</sub> but on UNL<sub>j</sub>
|
||||||
*min(t<sub>i</sub>, t<sub>j</sub>)* could be smaller too. Other
|
|
||||||
variables do not change. Overall, the left side of the inequation does
|
|
||||||
not change, but the right side is smaller. So the inequation holds.
|
|
||||||
|
|
||||||
1. V is on both UNL<sub>i</sub> and UNL<sub>j</sub>
|
|
||||||
|
|
||||||
The value of *O<sub>i,j</sub>* is reduced by 1. The values of
|
The value of *n<sub>j</sub> / 2* is smaller. The value of
|
||||||
*n<sub>j</sub> / 2*, *(n<sub>i</sub> − q<sub>i</sub>)*, and
|
*min(t<sub>i</sub>, t<sub>j</sub>)* could be smaller too. Other
|
||||||
*min(t<sub>i</sub>, t<sub>j</sub>)* are reduced by 0.5, 0.2, and 1
|
variables do not change. Overall, the left side of the inequation does
|
||||||
respectively. The right side is reduced by 1.7. Overall, the left side
|
not change, but the right side is smaller. So the inequation holds.
|
||||||
of the inequation is reduced by 1, and the right side is reduced by 1.7.
|
|
||||||
So the inequation holds.
|
|
||||||
|
|
||||||
The inequation holds for all the cases. So with the negative UNL approach,
|
1. V is on both UNL<sub>i</sub> and UNL<sub>j</sub>
|
||||||
the network's fork safety is preserved, while the quorum is lowered that
|
|
||||||
increases the network's liveness.
|
The value of *O<sub>i,j</sub>* is reduced by 1. The values of
|
||||||
|
*n<sub>j</sub> / 2*, *(n<sub>i</sub> − q<sub>i</sub>)*, and
|
||||||
|
*min(t<sub>i</sub>, t<sub>j</sub>)* are reduced by 0.5, 0.2, and 1
|
||||||
|
respectively. The right side is reduced by 1.7. Overall, the left side
|
||||||
|
of the inequation is reduced by 1, and the right side is reduced by 1.7.
|
||||||
|
So the inequation holds.
|
||||||
|
|
||||||
|
The inequation holds for all the cases. So with the negative UNL approach,
|
||||||
|
the network's fork safety is preserved, while the quorum is lowered that
|
||||||
|
increases the network's liveness.
|
||||||
|
|
||||||
<h3> Question: We have observed that occasionally a validator wanders off on its
|
<h3> Question: We have observed that occasionally a validator wanders off on its
|
||||||
own chain. How is this case handled by the negative UNL algorithm? </h3>
|
own chain. How is this case handled by the negative UNL algorithm? </h3>
|
||||||
@@ -565,11 +556,11 @@ will be used after that. We want to see the test cases still pass with real
|
|||||||
network delay. A test case specifies:
|
network delay. A test case specifies:
|
||||||
|
|
||||||
1. a UNL with different number of validators for different test cases,
|
1. a UNL with different number of validators for different test cases,
|
||||||
1. a network with zero or more non-validator nodes,
|
1. a network with zero or more non-validator nodes,
|
||||||
1. a sequence of validator reliability change events (by killing/restarting
|
1. a sequence of validator reliability change events (by killing/restarting
|
||||||
nodes, or by running modified rippled that does not send all validation
|
nodes, or by running modified rippled that does not send all validation
|
||||||
messages),
|
messages),
|
||||||
1. the correct outcomes.
|
1. the correct outcomes.
|
||||||
|
|
||||||
For all the test cases, the correct outcomes are verified by examining logs. We
|
For all the test cases, the correct outcomes are verified by examining logs. We
|
||||||
will grep the log to see if the correct negative UNLs are generated, and whether
|
will grep the log to see if the correct negative UNLs are generated, and whether
|
||||||
@@ -579,6 +570,7 @@ timing parameters of rippled will be changed to have faster ledger time. Most if
|
|||||||
not all test cases do not need client transactions.
|
not all test cases do not need client transactions.
|
||||||
|
|
||||||
For example, the test cases for the prototype:
|
For example, the test cases for the prototype:
|
||||||
|
|
||||||
1. A 10-validator UNL.
|
1. A 10-validator UNL.
|
||||||
1. The network does not have other nodes.
|
1. The network does not have other nodes.
|
||||||
1. The validators will be started from the genesis. Once they start to produce
|
1. The validators will be started from the genesis. Once they start to produce
|
||||||
@@ -587,11 +579,11 @@ For example, the test cases for the prototype:
|
|||||||
1. A sequence of events (or the lack of events) such as a killed validator is
|
1. A sequence of events (or the lack of events) such as a killed validator is
|
||||||
added to the negative UNL.
|
added to the negative UNL.
|
||||||
|
|
||||||
#### Roads Not Taken: Test with Extended CSF
|
#### Roads Not Taken: Test with Extended CSF
|
||||||
|
|
||||||
We considered testing with the current unit test framework, specifically the
|
We considered testing with the current unit test framework, specifically the
|
||||||
[Consensus Simulation
|
[Consensus Simulation
|
||||||
Framework](https://github.com/ripple/rippled/blob/develop/src/test/csf/README.md)
|
Framework](https://github.com/ripple/rippled/blob/develop/src/test/csf/README.md)
|
||||||
(CSF). However, the CSF currently can only test the generic consensus algorithm
|
(CSF). However, the CSF currently can only test the generic consensus algorithm
|
||||||
as in the paper: [Analysis of the XRP Ledger Consensus
|
as in the paper: [Analysis of the XRP Ledger Consensus
|
||||||
Protocol](https://arxiv.org/abs/1802.07242).
|
Protocol](https://arxiv.org/abs/1802.07242).
|
||||||
|
|||||||
@@ -82,7 +82,9 @@ pattern and the way coroutines are implemented, where every yield saves the spot
|
|||||||
in the code where it left off and every resume jumps back to that spot.
|
in the code where it left off and every resume jumps back to that spot.
|
||||||
|
|
||||||
### Sequence Diagram
|
### Sequence Diagram
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Class Diagram
|
### Class Diagram
|
||||||
|
|
||||||

|

|
||||||
|
|||||||
@@ -16,5 +16,5 @@
|
|||||||
## Function
|
## Function
|
||||||
|
|
||||||
- Minimize external dependencies
|
- Minimize external dependencies
|
||||||
* Pass options in the ctor instead of using theConfig
|
- Pass options in the ctor instead of using theConfig
|
||||||
* Use as few other classes as possible
|
- Use as few other classes as possible
|
||||||
|
|||||||
@@ -1,18 +1,18 @@
|
|||||||
# Coding Standards
|
# Coding Standards
|
||||||
|
|
||||||
Coding standards used here gradually evolve and propagate through
|
Coding standards used here gradually evolve and propagate through
|
||||||
code reviews. Some aspects are enforced more strictly than others.
|
code reviews. Some aspects are enforced more strictly than others.
|
||||||
|
|
||||||
## Rules
|
## Rules
|
||||||
|
|
||||||
These rules only apply to our own code. We can't enforce any sort of
|
These rules only apply to our own code. We can't enforce any sort of
|
||||||
style on the external repositories and libraries we include. The best
|
style on the external repositories and libraries we include. The best
|
||||||
guideline is to maintain the standards that are used in those libraries.
|
guideline is to maintain the standards that are used in those libraries.
|
||||||
|
|
||||||
* Tab inserts 4 spaces. No tab characters.
|
- Tab inserts 4 spaces. No tab characters.
|
||||||
* Braces are indented in the [Allman style][1].
|
- Braces are indented in the [Allman style][1].
|
||||||
* Modern C++ principles. No naked ```new``` or ```delete```.
|
- Modern C++ principles. No naked `new` or `delete`.
|
||||||
* Line lengths limited to 80 characters. Exceptions limited to data and tables.
|
- Line lengths limited to 80 characters. Exceptions limited to data and tables.
|
||||||
|
|
||||||
## Guidelines
|
## Guidelines
|
||||||
|
|
||||||
@@ -21,17 +21,17 @@ why you're doing it. Think, use common sense, and consider that this
|
|||||||
your changes will probably need to be maintained long after you've
|
your changes will probably need to be maintained long after you've
|
||||||
moved on to other projects.
|
moved on to other projects.
|
||||||
|
|
||||||
* Use white space and blank lines to guide the eye and keep your intent clear.
|
- Use white space and blank lines to guide the eye and keep your intent clear.
|
||||||
* Put private data members at the top of a class, and the 6 public special
|
- Put private data members at the top of a class, and the 6 public special
|
||||||
members immediately after, in the following order:
|
members immediately after, in the following order:
|
||||||
* Destructor
|
- Destructor
|
||||||
* Default constructor
|
- Default constructor
|
||||||
* Copy constructor
|
- Copy constructor
|
||||||
* Copy assignment
|
- Copy assignment
|
||||||
* Move constructor
|
- Move constructor
|
||||||
* Move assignment
|
- Move assignment
|
||||||
* Don't over-inline by defining large functions within the class
|
- Don't over-inline by defining large functions within the class
|
||||||
declaration, not even for template classes.
|
declaration, not even for template classes.
|
||||||
|
|
||||||
## Formatting
|
## Formatting
|
||||||
|
|
||||||
@@ -39,44 +39,44 @@ The goal of source code formatting should always be to make things as easy to
|
|||||||
read as possible. White space is used to guide the eye so that details are not
|
read as possible. White space is used to guide the eye so that details are not
|
||||||
overlooked. Blank lines are used to separate code into "paragraphs."
|
overlooked. Blank lines are used to separate code into "paragraphs."
|
||||||
|
|
||||||
* Always place a space before and after all binary operators,
|
- Always place a space before and after all binary operators,
|
||||||
especially assignments (`operator=`).
|
especially assignments (`operator=`).
|
||||||
* The `!` operator should be preceded by a space, but not followed by one.
|
- The `!` operator should be preceded by a space, but not followed by one.
|
||||||
* The `~` operator should be preceded by a space, but not followed by one.
|
- The `~` operator should be preceded by a space, but not followed by one.
|
||||||
* The `++` and `--` operators should have no spaces between the operator and
|
- The `++` and `--` operators should have no spaces between the operator and
|
||||||
the operand.
|
the operand.
|
||||||
* A space never appears before a comma, and always appears after a comma.
|
- A space never appears before a comma, and always appears after a comma.
|
||||||
* Don't put spaces after a parenthesis. A typical member function call might
|
- Don't put spaces after a parenthesis. A typical member function call might
|
||||||
look like this: `foobar (1, 2, 3);`
|
look like this: `foobar (1, 2, 3);`
|
||||||
* In general, leave a blank line before an `if` statement.
|
- In general, leave a blank line before an `if` statement.
|
||||||
* In general, leave a blank line after a closing brace `}`.
|
- In general, leave a blank line after a closing brace `}`.
|
||||||
* Do not place code on the same line as any opening or
|
- Do not place code on the same line as any opening or
|
||||||
closing brace.
|
closing brace.
|
||||||
* Do not write `if` statements all-on-one-line. The exception to this is when
|
- Do not write `if` statements all-on-one-line. The exception to this is when
|
||||||
you've got a sequence of similar `if` statements, and are aligning them all
|
you've got a sequence of similar `if` statements, and are aligning them all
|
||||||
vertically to highlight their similarities.
|
vertically to highlight their similarities.
|
||||||
* In an `if-else` statement, if you surround one half of the statement with
|
- In an `if-else` statement, if you surround one half of the statement with
|
||||||
braces, you also need to put braces around the other half, to match.
|
braces, you also need to put braces around the other half, to match.
|
||||||
* When writing a pointer type, use this spacing: `SomeObject* myObject`.
|
- When writing a pointer type, use this spacing: `SomeObject* myObject`.
|
||||||
Technically, a more correct spacing would be `SomeObject *myObject`, but
|
Technically, a more correct spacing would be `SomeObject *myObject`, but
|
||||||
it makes more sense for the asterisk to be grouped with the type name,
|
it makes more sense for the asterisk to be grouped with the type name,
|
||||||
since being a pointer is part of the type, not the variable name. The only
|
since being a pointer is part of the type, not the variable name. The only
|
||||||
time that this can lead to any problems is when you're declaring multiple
|
time that this can lead to any problems is when you're declaring multiple
|
||||||
pointers of the same type in the same statement - which leads on to the next
|
pointers of the same type in the same statement - which leads on to the next
|
||||||
rule:
|
rule:
|
||||||
* When declaring multiple pointers, never do so in a single statement, e.g.
|
- When declaring multiple pointers, never do so in a single statement, e.g.
|
||||||
`SomeObject* p1, *p2;` - instead, always split them out onto separate lines
|
`SomeObject* p1, *p2;` - instead, always split them out onto separate lines
|
||||||
and write the type name again, to make it quite clear what's going on, and
|
and write the type name again, to make it quite clear what's going on, and
|
||||||
avoid the danger of missing out any vital asterisks.
|
avoid the danger of missing out any vital asterisks.
|
||||||
* The previous point also applies to references, so always put the `&` next to
|
- The previous point also applies to references, so always put the `&` next to
|
||||||
the type rather than the variable, e.g. `void foo (Thing const& thing)`. And
|
the type rather than the variable, e.g. `void foo (Thing const& thing)`. And
|
||||||
don't put a space on both sides of the `*` or `&` - always put a space after
|
don't put a space on both sides of the `*` or `&` - always put a space after
|
||||||
it, but never before it.
|
it, but never before it.
|
||||||
* The word `const` should be placed to the right of the thing that it modifies,
|
- The word `const` should be placed to the right of the thing that it modifies,
|
||||||
for consistency. For example `int const` refers to an int which is const.
|
for consistency. For example `int const` refers to an int which is const.
|
||||||
`int const*` is a pointer to an int which is const. `int *const` is a const
|
`int const*` is a pointer to an int which is const. `int *const` is a const
|
||||||
pointer to an int.
|
pointer to an int.
|
||||||
* Always place a space in between the template angle brackets and the type
|
- Always place a space in between the template angle brackets and the type
|
||||||
name. Template code is already hard enough to read!
|
name. Template code is already hard enough to read!
|
||||||
|
|
||||||
[1]: http://en.wikipedia.org/wiki/Indent_style#Allman_style
|
[1]: http://en.wikipedia.org/wiki/Indent_style#Allman_style
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ and header under /opt/local/include:
|
|||||||
|
|
||||||
$ scons clang profile-jemalloc=/opt/local
|
$ scons clang profile-jemalloc=/opt/local
|
||||||
|
|
||||||
----------------------
|
---
|
||||||
|
|
||||||
## Using the jemalloc library from within the code
|
## Using the jemalloc library from within the code
|
||||||
|
|
||||||
@@ -60,4 +60,3 @@ Linking against the jemalloc library will override
|
|||||||
the system's default `malloc()` and related functions with jemalloc's
|
the system's default `malloc()` and related functions with jemalloc's
|
||||||
implementation. This is the case even if the code is not instrumented
|
implementation. This is the case even if the code is not instrumented
|
||||||
to use jemalloc's specific API.
|
to use jemalloc's specific API.
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ Install these dependencies:
|
|||||||
- [Doxygen](http://www.doxygen.nl): All major platforms have [official binary
|
- [Doxygen](http://www.doxygen.nl): All major platforms have [official binary
|
||||||
distributions](http://www.doxygen.nl/download.html#srcbin), or you can
|
distributions](http://www.doxygen.nl/download.html#srcbin), or you can
|
||||||
build from [source](http://www.doxygen.nl/download.html#srcbin).
|
build from [source](http://www.doxygen.nl/download.html#srcbin).
|
||||||
|
|
||||||
- MacOS: We recommend installing via Homebrew: `brew install doxygen`.
|
- MacOS: We recommend installing via Homebrew: `brew install doxygen`.
|
||||||
The executable will be installed in `/usr/local/bin` which is already
|
The executable will be installed in `/usr/local/bin` which is already
|
||||||
in the default `PATH`.
|
in the default `PATH`.
|
||||||
@@ -21,18 +20,15 @@ Install these dependencies:
|
|||||||
$ ln -s /Applications/Doxygen.app/Contents/Resources/doxygen /usr/local/bin/doxygen
|
$ ln -s /Applications/Doxygen.app/Contents/Resources/doxygen /usr/local/bin/doxygen
|
||||||
```
|
```
|
||||||
|
|
||||||
- [PlantUML](http://plantuml.com):
|
- [PlantUML](http://plantuml.com):
|
||||||
|
|
||||||
1. Install a functioning Java runtime, if you don't already have one.
|
1. Install a functioning Java runtime, if you don't already have one.
|
||||||
2. Download [`plantuml.jar`](http://sourceforge.net/projects/plantuml/files/plantuml.jar/download).
|
2. Download [`plantuml.jar`](http://sourceforge.net/projects/plantuml/files/plantuml.jar/download).
|
||||||
|
|
||||||
- [Graphviz](https://www.graphviz.org):
|
- [Graphviz](https://www.graphviz.org):
|
||||||
|
|
||||||
- Linux: Install from your package manager.
|
- Linux: Install from your package manager.
|
||||||
- Windows: Use an [official installer](https://graphviz.gitlab.io/_pages/Download/Download_windows.html).
|
- Windows: Use an [official installer](https://graphviz.gitlab.io/_pages/Download/Download_windows.html).
|
||||||
- MacOS: Install via Homebrew: `brew install graphviz`.
|
- MacOS: Install via Homebrew: `brew install graphviz`.
|
||||||
|
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
Instead of installing the above dependencies locally, you can use the official
|
Instead of installing the above dependencies locally, you can use the official
|
||||||
@@ -40,14 +36,16 @@ build environment Docker image, which has all of them installed already.
|
|||||||
|
|
||||||
1. Install [Docker](https://docs.docker.com/engine/installation/)
|
1. Install [Docker](https://docs.docker.com/engine/installation/)
|
||||||
2. Pull the image:
|
2. Pull the image:
|
||||||
```
|
|
||||||
sudo docker pull rippleci/rippled-ci-builder:2944b78d22db
|
|
||||||
```
|
|
||||||
3. Run the image from the project folder:
|
|
||||||
```
|
|
||||||
sudo docker run -v $PWD:/opt/rippled --rm rippleci/rippled-ci-builder:2944b78d22db
|
|
||||||
```
|
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo docker pull rippleci/rippled-ci-builder:2944b78d22db
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Run the image from the project folder:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo docker run -v $PWD:/opt/rippled --rm rippleci/rippled-ci-builder:2944b78d22db
|
||||||
|
```
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
|
|
||||||
|
|||||||
14
docs/build/conan.md
vendored
14
docs/build/conan.md
vendored
@@ -5,7 +5,6 @@ we should first understand _why_ we use Conan,
|
|||||||
and to understand that,
|
and to understand that,
|
||||||
we need to understand how we use CMake.
|
we need to understand how we use CMake.
|
||||||
|
|
||||||
|
|
||||||
### CMake
|
### CMake
|
||||||
|
|
||||||
Technically, you don't need CMake to build this project.
|
Technically, you don't need CMake to build this project.
|
||||||
@@ -33,9 +32,9 @@ Parameters include:
|
|||||||
- where to find the compiler and linker
|
- where to find the compiler and linker
|
||||||
- where to find dependencies, e.g. libraries and headers
|
- where to find dependencies, e.g. libraries and headers
|
||||||
- how to link dependencies, e.g. any special compiler or linker flags that
|
- how to link dependencies, e.g. any special compiler or linker flags that
|
||||||
need to be used with them, including preprocessor definitions
|
need to be used with them, including preprocessor definitions
|
||||||
- how to compile translation units, e.g. with optimizations, debug symbols,
|
- how to compile translation units, e.g. with optimizations, debug symbols,
|
||||||
position-independent code, etc.
|
position-independent code, etc.
|
||||||
- on Windows, which runtime library to link with
|
- on Windows, which runtime library to link with
|
||||||
|
|
||||||
For some of these parameters, like the build system and compiler,
|
For some of these parameters, like the build system and compiler,
|
||||||
@@ -54,7 +53,6 @@ Most humans prefer to put them into a configuration file, once, that
|
|||||||
CMake can read every time it is configured.
|
CMake can read every time it is configured.
|
||||||
For CMake, that file is a [toolchain file][toolchain].
|
For CMake, that file is a [toolchain file][toolchain].
|
||||||
|
|
||||||
|
|
||||||
### Conan
|
### Conan
|
||||||
|
|
||||||
These next few paragraphs on Conan are going to read much like the ones above
|
These next few paragraphs on Conan are going to read much like the ones above
|
||||||
@@ -79,10 +77,10 @@ Those files include:
|
|||||||
|
|
||||||
- A single toolchain file.
|
- A single toolchain file.
|
||||||
- For every dependency, a CMake [package configuration file][pcf],
|
- For every dependency, a CMake [package configuration file][pcf],
|
||||||
[package version file][pvf], and for every build type, a package
|
[package version file][pvf], and for every build type, a package
|
||||||
targets file.
|
targets file.
|
||||||
Together, these files implement version checking and define `IMPORTED`
|
Together, these files implement version checking and define `IMPORTED`
|
||||||
targets for the dependencies.
|
targets for the dependencies.
|
||||||
|
|
||||||
The toolchain file itself amends the search path
|
The toolchain file itself amends the search path
|
||||||
([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package]
|
([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package]
|
||||||
|
|||||||
5
docs/build/depend.md
vendored
5
docs/build/depend.md
vendored
@@ -2,8 +2,7 @@ We recommend two different methods to depend on libxrpl in your own [CMake][]
|
|||||||
project.
|
project.
|
||||||
Both methods add a CMake library target named `xrpl::libxrpl`.
|
Both methods add a CMake library target named `xrpl::libxrpl`.
|
||||||
|
|
||||||
|
## Conan requirement
|
||||||
## Conan requirement
|
|
||||||
|
|
||||||
The first method adds libxrpl as a [Conan][] requirement.
|
The first method adds libxrpl as a [Conan][] requirement.
|
||||||
With this method, there is no need for a Git [submodule][].
|
With this method, there is no need for a Git [submodule][].
|
||||||
@@ -48,7 +47,6 @@ cmake \
|
|||||||
cmake --build . --parallel
|
cmake --build . --parallel
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## CMake subdirectory
|
## CMake subdirectory
|
||||||
|
|
||||||
The second method adds the [rippled][] project as a CMake
|
The second method adds the [rippled][] project as a CMake
|
||||||
@@ -90,7 +88,6 @@ cmake \
|
|||||||
cmake --build . --parallel
|
cmake --build . --parallel
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
[add_subdirectory]: https://cmake.org/cmake/help/latest/command/add_subdirectory.html
|
[add_subdirectory]: https://cmake.org/cmake/help/latest/command/add_subdirectory.html
|
||||||
[submodule]: https://git-scm.com/book/en/v2/Git-Tools-Submodules
|
[submodule]: https://git-scm.com/book/en/v2/Git-Tools-Submodules
|
||||||
[rippled]: https://github.com/ripple/rippled
|
[rippled]: https://github.com/ripple/rippled
|
||||||
|
|||||||
83
docs/build/environment.md
vendored
83
docs/build/environment.md
vendored
@@ -5,41 +5,39 @@ platforms: Linux, macOS, or Windows.
|
|||||||
|
|
||||||
[BUILD.md]: ../../BUILD.md
|
[BUILD.md]: ../../BUILD.md
|
||||||
|
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
Package ecosystems vary across Linux distributions,
|
Package ecosystems vary across Linux distributions,
|
||||||
so there is no one set of instructions that will work for every Linux user.
|
so there is no one set of instructions that will work for every Linux user.
|
||||||
These instructions are written for Ubuntu 22.04.
|
The instructions below are written for Debian 12 (Bookworm).
|
||||||
They are largely copied from the [script][1] used to configure our Docker
|
|
||||||
container for continuous integration.
|
|
||||||
That script handles many more responsibilities.
|
|
||||||
These instructions are just the bare minimum to build one configuration of
|
|
||||||
rippled.
|
|
||||||
You can check that codebase for other Linux distributions and versions.
|
|
||||||
If you cannot find yours there,
|
|
||||||
then we hope that these instructions can at least guide you in the right
|
|
||||||
direction.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
apt update
|
export GCC_RELEASE=12
|
||||||
apt install --yes curl git libssl-dev python3.10-dev python3-pip make g++-11 libprotobuf-dev protobuf-compiler
|
sudo apt update
|
||||||
|
sudo apt install --yes gcc-${GCC_RELEASE} g++-${GCC_RELEASE} python3-pip \
|
||||||
|
python-is-python3 python3-venv python3-dev curl wget ca-certificates \
|
||||||
|
git build-essential cmake ninja-build libc6-dev
|
||||||
|
sudo pip install --break-system-packages conan
|
||||||
|
|
||||||
curl --location --remote-name \
|
sudo update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_RELEASE} 999
|
||||||
"https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1.tar.gz"
|
sudo update-alternatives --install \
|
||||||
tar -xzf cmake-3.25.1.tar.gz
|
/usr/bin/gcc gcc /usr/bin/gcc-${GCC_RELEASE} 100 \
|
||||||
rm cmake-3.25.1.tar.gz
|
--slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_RELEASE} \
|
||||||
cd cmake-3.25.1
|
--slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-${GCC_RELEASE} \
|
||||||
./bootstrap --parallel=$(nproc)
|
--slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-${GCC_RELEASE} \
|
||||||
make --jobs $(nproc)
|
--slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-${GCC_RELEASE} \
|
||||||
make install
|
--slave /usr/bin/gcov gcov /usr/bin/gcov-${GCC_RELEASE} \
|
||||||
cd ..
|
--slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_RELEASE} \
|
||||||
|
--slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_RELEASE} \
|
||||||
pip3 install 'conan<2'
|
--slave /usr/bin/lto-dump lto-dump /usr/bin/lto-dump-${GCC_RELEASE}
|
||||||
|
sudo update-alternatives --auto cc
|
||||||
|
sudo update-alternatives --auto gcc
|
||||||
```
|
```
|
||||||
|
|
||||||
[1]: https://github.com/thejohnfreeman/rippled-docker/blob/master/ubuntu-22.04/install.sh
|
If you use different Linux distribution, hope the instruction above can guide
|
||||||
|
you in the right direction. We try to maintain compatibility with all recent
|
||||||
|
compiler releases, so if you use a rolling distribution like e.g. Arch or CentOS
|
||||||
|
then there is a chance that everything will "just work".
|
||||||
|
|
||||||
## macOS
|
## macOS
|
||||||
|
|
||||||
@@ -52,6 +50,33 @@ minimum required (see [BUILD.md][]).
|
|||||||
clang --version
|
clang --version
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Install Xcode Specific Version (Optional)
|
||||||
|
|
||||||
|
If you develop other applications using XCode you might be consistently updating to the newest version of Apple Clang.
|
||||||
|
This will likely cause issues building rippled. You may want to install a specific version of Xcode:
|
||||||
|
|
||||||
|
1. **Download Xcode**
|
||||||
|
- Visit [Apple Developer Downloads](https://developer.apple.com/download/more/)
|
||||||
|
- Sign in with your Apple Developer account
|
||||||
|
- Search for an Xcode version that includes **Apple Clang (Expected Version)**
|
||||||
|
- Download the `.xip` file
|
||||||
|
|
||||||
|
2. **Install and Configure Xcode**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Extract the .xip file and rename for version management
|
||||||
|
# Example: Xcode_16.2.app
|
||||||
|
|
||||||
|
# Move to Applications directory
|
||||||
|
sudo mv Xcode_16.2.app /Applications/
|
||||||
|
|
||||||
|
# Set as default toolchain (persistent)
|
||||||
|
sudo xcode-select -s /Applications/Xcode_16.2.app/Contents/Developer
|
||||||
|
|
||||||
|
# Set as environment variable (temporary)
|
||||||
|
export DEVELOPER_DIR=/Applications/Xcode_16.2.app/Contents/Developer
|
||||||
|
```
|
||||||
|
|
||||||
The command line developer tools should include Git too:
|
The command line developer tools should include Git too:
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -71,10 +96,10 @@ and use it to install Conan:
|
|||||||
brew update
|
brew update
|
||||||
brew install xz
|
brew install xz
|
||||||
brew install pyenv
|
brew install pyenv
|
||||||
pyenv install 3.10-dev
|
pyenv install 3.11
|
||||||
pyenv global 3.10-dev
|
pyenv global 3.11
|
||||||
eval "$(pyenv init -)"
|
eval "$(pyenv init -)"
|
||||||
pip install 'conan<2'
|
pip install 'conan'
|
||||||
```
|
```
|
||||||
|
|
||||||
Install CMake with Homebrew too:
|
Install CMake with Homebrew too:
|
||||||
|
|||||||
42
docs/build/install.md
vendored
42
docs/build/install.md
vendored
@@ -6,7 +6,6 @@ like CentOS.
|
|||||||
Installing from source is an option for all platforms,
|
Installing from source is an option for all platforms,
|
||||||
and the only supported option for installing custom builds.
|
and the only supported option for installing custom builds.
|
||||||
|
|
||||||
|
|
||||||
## From source
|
## From source
|
||||||
|
|
||||||
From a source build, you can install rippled and libxrpl using CMake's
|
From a source build, you can install rippled and libxrpl using CMake's
|
||||||
@@ -21,25 +20,23 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
|||||||
|
|
||||||
[1]: https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html
|
[1]: https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html
|
||||||
|
|
||||||
|
|
||||||
## With the APT package manager
|
## With the APT package manager
|
||||||
|
|
||||||
1. Update repositories:
|
1. Update repositories:
|
||||||
|
|
||||||
sudo apt update -y
|
sudo apt update -y
|
||||||
|
|
||||||
2. Install utilities:
|
2. Install utilities:
|
||||||
|
|
||||||
sudo apt install -y apt-transport-https ca-certificates wget gnupg
|
sudo apt install -y apt-transport-https ca-certificates wget gnupg
|
||||||
|
|
||||||
3. Add Ripple's package-signing GPG key to your list of trusted keys:
|
3. Add Ripple's package-signing GPG key to your list of trusted keys:
|
||||||
|
|
||||||
sudo mkdir /usr/local/share/keyrings/
|
sudo mkdir /usr/local/share/keyrings/
|
||||||
wget -q -O - "https://repos.ripple.com/repos/api/gpg/key/public" | gpg --dearmor > ripple-key.gpg
|
wget -q -O - "https://repos.ripple.com/repos/api/gpg/key/public" | gpg --dearmor > ripple-key.gpg
|
||||||
sudo mv ripple-key.gpg /usr/local/share/keyrings
|
sudo mv ripple-key.gpg /usr/local/share/keyrings
|
||||||
|
|
||||||
|
4. Check the fingerprint of the newly-added key:
|
||||||
4. Check the fingerprint of the newly-added key:
|
|
||||||
|
|
||||||
gpg /usr/local/share/keyrings/ripple-key.gpg
|
gpg /usr/local/share/keyrings/ripple-key.gpg
|
||||||
|
|
||||||
@@ -51,37 +48,34 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
|||||||
uid TechOps Team at Ripple <techops+rippled@ripple.com>
|
uid TechOps Team at Ripple <techops+rippled@ripple.com>
|
||||||
sub rsa3072 2019-02-14 [E] [expires: 2026-02-17]
|
sub rsa3072 2019-02-14 [E] [expires: 2026-02-17]
|
||||||
|
|
||||||
|
|
||||||
In particular, make sure that the fingerprint matches. (In the above example, the fingerprint is on the third line, starting with `C001`.)
|
In particular, make sure that the fingerprint matches. (In the above example, the fingerprint is on the third line, starting with `C001`.)
|
||||||
|
|
||||||
4. Add the appropriate Ripple repository for your operating system version:
|
5. Add the appropriate Ripple repository for your operating system version:
|
||||||
|
|
||||||
echo "deb [signed-by=/usr/local/share/keyrings/ripple-key.gpg] https://repos.ripple.com/repos/rippled-deb focal stable" | \
|
echo "deb [signed-by=/usr/local/share/keyrings/ripple-key.gpg] https://repos.ripple.com/repos/rippled-deb focal stable" | \
|
||||||
sudo tee -a /etc/apt/sources.list.d/ripple.list
|
sudo tee -a /etc/apt/sources.list.d/ripple.list
|
||||||
|
|
||||||
The above example is appropriate for **Ubuntu 20.04 Focal Fossa**. For other operating systems, replace the word `focal` with one of the following:
|
The above example is appropriate for **Ubuntu 20.04 Focal Fossa**. For other operating systems, replace the word `focal` with one of the following:
|
||||||
|
|
||||||
- `jammy` for **Ubuntu 22.04 Jammy Jellyfish**
|
- `jammy` for **Ubuntu 22.04 Jammy Jellyfish**
|
||||||
- `bionic` for **Ubuntu 18.04 Bionic Beaver**
|
- `bionic` for **Ubuntu 18.04 Bionic Beaver**
|
||||||
- `bullseye` for **Debian 11 Bullseye**
|
- `bullseye` for **Debian 11 Bullseye**
|
||||||
- `buster` for **Debian 10 Buster**
|
- `buster` for **Debian 10 Buster**
|
||||||
|
|
||||||
If you want access to development or pre-release versions of `rippled`, use one of the following instead of `stable`:
|
If you want access to development or pre-release versions of `rippled`, use one of the following instead of `stable`:
|
||||||
|
|
||||||
- `unstable` - Pre-release builds ([`release` branch](https://github.com/ripple/rippled/tree/release))
|
- `unstable` - Pre-release builds ([`release` branch](https://github.com/ripple/rippled/tree/release))
|
||||||
- `nightly` - Experimental/development builds ([`develop` branch](https://github.com/ripple/rippled/tree/develop))
|
- `nightly` - Experimental/development builds ([`develop` branch](https://github.com/ripple/rippled/tree/develop))
|
||||||
|
|
||||||
**Warning:** Unstable and nightly builds may be broken at any time. Do not use these builds for production servers.
|
**Warning:** Unstable and nightly builds may be broken at any time. Do not use these builds for production servers.
|
||||||
|
|
||||||
5. Fetch the Ripple repository.
|
6. Fetch the Ripple repository.
|
||||||
|
|
||||||
sudo apt -y update
|
sudo apt -y update
|
||||||
|
|
||||||
6. Install the `rippled` software package:
|
7. Install the `rippled` software package:
|
||||||
|
|
||||||
sudo apt -y install rippled
|
sudo apt -y install rippled
|
||||||
|
|
||||||
7. Check the status of the `rippled` service:
|
8. Check the status of the `rippled` service:
|
||||||
|
|
||||||
systemctl status rippled.service
|
systemctl status rippled.service
|
||||||
|
|
||||||
@@ -89,24 +83,22 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
|||||||
|
|
||||||
sudo systemctl start rippled.service
|
sudo systemctl start rippled.service
|
||||||
|
|
||||||
8. Optional: allow `rippled` to bind to privileged ports.
|
9. Optional: allow `rippled` to bind to privileged ports.
|
||||||
|
|
||||||
This allows you to serve incoming API requests on port 80 or 443. (If you want to do so, you must also update the config file's port settings.)
|
This allows you to serve incoming API requests on port 80 or 443. (If you want to do so, you must also update the config file's port settings.)
|
||||||
|
|
||||||
sudo setcap 'cap_net_bind_service=+ep' /opt/ripple/bin/rippled
|
sudo setcap 'cap_net_bind_service=+ep' /opt/ripple/bin/rippled
|
||||||
|
|
||||||
|
|
||||||
## With the YUM package manager
|
## With the YUM package manager
|
||||||
|
|
||||||
1. Install the Ripple RPM repository:
|
1. Install the Ripple RPM repository:
|
||||||
|
|
||||||
Choose the appropriate RPM repository for the stability of releases you want:
|
Choose the appropriate RPM repository for the stability of releases you want:
|
||||||
|
|
||||||
- `stable` for the latest production release (`master` branch)
|
- `stable` for the latest production release (`master` branch)
|
||||||
- `unstable` for pre-release builds (`release` branch)
|
- `unstable` for pre-release builds (`release` branch)
|
||||||
- `nightly` for experimental/development builds (`develop` branch)
|
- `nightly` for experimental/development builds (`develop` branch)
|
||||||
|
|
||||||
*Stable*
|
_Stable_
|
||||||
|
|
||||||
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
||||||
[ripple-stable]
|
[ripple-stable]
|
||||||
@@ -118,7 +110,7 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
|||||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/stable/repodata/repomd.xml.key
|
gpgkey=https://repos.ripple.com/repos/rippled-rpm/stable/repodata/repomd.xml.key
|
||||||
REPOFILE
|
REPOFILE
|
||||||
|
|
||||||
*Unstable*
|
_Unstable_
|
||||||
|
|
||||||
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
||||||
[ripple-unstable]
|
[ripple-unstable]
|
||||||
@@ -130,7 +122,7 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
|||||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/unstable/repodata/repomd.xml.key
|
gpgkey=https://repos.ripple.com/repos/rippled-rpm/unstable/repodata/repomd.xml.key
|
||||||
REPOFILE
|
REPOFILE
|
||||||
|
|
||||||
*Nightly*
|
_Nightly_
|
||||||
|
|
||||||
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
||||||
[ripple-nightly]
|
[ripple-nightly]
|
||||||
@@ -142,18 +134,18 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
|||||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key
|
gpgkey=https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key
|
||||||
REPOFILE
|
REPOFILE
|
||||||
|
|
||||||
2. Fetch the latest repo updates:
|
2. Fetch the latest repo updates:
|
||||||
|
|
||||||
sudo yum -y update
|
sudo yum -y update
|
||||||
|
|
||||||
3. Install the new `rippled` package:
|
3. Install the new `rippled` package:
|
||||||
|
|
||||||
sudo yum install -y rippled
|
sudo yum install -y rippled
|
||||||
|
|
||||||
4. Configure the `rippled` service to start on boot:
|
4. Configure the `rippled` service to start on boot:
|
||||||
|
|
||||||
sudo systemctl enable rippled.service
|
sudo systemctl enable rippled.service
|
||||||
|
|
||||||
5. Start the `rippled` service:
|
5. Start the `rippled` service:
|
||||||
|
|
||||||
sudo systemctl start rippled.service
|
sudo systemctl start rippled.service
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
**This section is a work in progress!!**
|
**This section is a work in progress!!**
|
||||||
|
|
||||||
Consensus is the task of reaching agreement within a distributed system in the
|
Consensus is the task of reaching agreement within a distributed system in the
|
||||||
presence of faulty or even malicious participants. This document outlines the
|
presence of faulty or even malicious participants. This document outlines the
|
||||||
[XRP Ledger Consensus Algorithm](https://arxiv.org/abs/1802.07242)
|
[XRP Ledger Consensus Algorithm](https://arxiv.org/abs/1802.07242)
|
||||||
as implemented in [rippled](https://github.com/ripple/rippled), but
|
as implemented in [rippled](https://github.com/ripple/rippled), but
|
||||||
focuses on its utility as a generic consensus algorithm independent of the
|
focuses on its utility as a generic consensus algorithm independent of the
|
||||||
@@ -15,38 +15,38 @@ collectively trusted subnetworks.
|
|||||||
## Distributed Agreement
|
## Distributed Agreement
|
||||||
|
|
||||||
A challenge for distributed systems is reaching agreement on changes in shared
|
A challenge for distributed systems is reaching agreement on changes in shared
|
||||||
state. For the Ripple network, the shared state is the current ledger--account
|
state. For the Ripple network, the shared state is the current ledger--account
|
||||||
information, account balances, order books and other financial data. We will
|
information, account balances, order books and other financial data. We will
|
||||||
refer to shared distributed state as a /ledger/ throughout the remainder of this
|
refer to shared distributed state as a /ledger/ throughout the remainder of this
|
||||||
document.
|
document.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
As shown above, new ledgers are made by applying a set of transactions to the
|
As shown above, new ledgers are made by applying a set of transactions to the
|
||||||
prior ledger. For the Ripple network, transactions include payments,
|
prior ledger. For the Ripple network, transactions include payments,
|
||||||
modification of account settings, updates to offers and more.
|
modification of account settings, updates to offers and more.
|
||||||
|
|
||||||
In a centralized system, generating the next ledger is trivial since there is a
|
In a centralized system, generating the next ledger is trivial since there is a
|
||||||
single unique arbiter of which transactions to include and how to apply them to
|
single unique arbiter of which transactions to include and how to apply them to
|
||||||
a ledger. For decentralized systems, participants must resolve disagreements on
|
a ledger. For decentralized systems, participants must resolve disagreements on
|
||||||
the set of transactions to include, the order to apply those transactions, and
|
the set of transactions to include, the order to apply those transactions, and
|
||||||
even the resulting ledger after applying the transactions. This is even more
|
even the resulting ledger after applying the transactions. This is even more
|
||||||
difficult when some participants are faulty or malicious.
|
difficult when some participants are faulty or malicious.
|
||||||
|
|
||||||
The Ripple network is a decentralized and **trust-full** network. Anyone is free
|
The Ripple network is a decentralized and **trust-full** network. Anyone is free
|
||||||
to join and participants are free to choose a subset of peers that are
|
to join and participants are free to choose a subset of peers that are
|
||||||
collectively trusted to not collude in an attempt to defraud the participant.
|
collectively trusted to not collude in an attempt to defraud the participant.
|
||||||
Leveraging this network of trust, the Ripple algorithm has two main components.
|
Leveraging this network of trust, the Ripple algorithm has two main components.
|
||||||
|
|
||||||
* *Consensus* in which network participants agree on the transactions to apply
|
- _Consensus_ in which network participants agree on the transactions to apply
|
||||||
to a prior ledger, based on the positions of their chosen peers.
|
to a prior ledger, based on the positions of their chosen peers.
|
||||||
* *Validation* in which network participants agree on what ledger was
|
- _Validation_ in which network participants agree on what ledger was
|
||||||
generated, based on the ledgers generated by chosen peers.
|
generated, based on the ledgers generated by chosen peers.
|
||||||
|
|
||||||
These phases are continually repeated to process transactions submitted to the
|
These phases are continually repeated to process transactions submitted to the
|
||||||
network, generating successive ledgers and giving rise to the blockchain ledger
|
network, generating successive ledgers and giving rise to the blockchain ledger
|
||||||
history depicted below. In this diagram, time is flowing to the right, but
|
history depicted below. In this diagram, time is flowing to the right, but
|
||||||
links between ledgers point backward to the parent. Also note the alternate
|
links between ledgers point backward to the parent. Also note the alternate
|
||||||
Ledger 2 that was generated by some participants, but which failed validation
|
Ledger 2 that was generated by some participants, but which failed validation
|
||||||
and was abandoned.
|
and was abandoned.
|
||||||
|
|
||||||
@@ -54,7 +54,7 @@ and was abandoned.
|
|||||||
|
|
||||||
The remainder of this section describes the Consensus and Validation algorithms
|
The remainder of this section describes the Consensus and Validation algorithms
|
||||||
in more detail and is meant as a companion guide to understanding the generic
|
in more detail and is meant as a companion guide to understanding the generic
|
||||||
implementation in `rippled`. The document **does not** discuss correctness,
|
implementation in `rippled`. The document **does not** discuss correctness,
|
||||||
fault-tolerance or liveness properties of the algorithms or the full details of
|
fault-tolerance or liveness properties of the algorithms or the full details of
|
||||||
how they integrate within `rippled` to support the Ripple Consensus Ledger.
|
how they integrate within `rippled` to support the Ripple Consensus Ledger.
|
||||||
|
|
||||||
@@ -62,76 +62,76 @@ how they integrate within `rippled` to support the Ripple Consensus Ledger.
|
|||||||
|
|
||||||
### Definitions
|
### Definitions
|
||||||
|
|
||||||
* The *ledger* is the shared distributed state. Each ledger has a unique ID to
|
- The _ledger_ is the shared distributed state. Each ledger has a unique ID to
|
||||||
distinguish it from all other ledgers. During consensus, the *previous*,
|
distinguish it from all other ledgers. During consensus, the _previous_,
|
||||||
*prior* or *last-closed* ledger is the most recent ledger seen by consensus
|
_prior_ or _last-closed_ ledger is the most recent ledger seen by consensus
|
||||||
and is the basis upon which it will build the next ledger.
|
and is the basis upon which it will build the next ledger.
|
||||||
* A *transaction* is an instruction for an atomic change in the ledger state. A
|
- A _transaction_ is an instruction for an atomic change in the ledger state. A
|
||||||
unique ID distinguishes a transaction from other transactions.
|
unique ID distinguishes a transaction from other transactions.
|
||||||
* A *transaction set* is a set of transactions under consideration by consensus.
|
- A _transaction set_ is a set of transactions under consideration by consensus.
|
||||||
The goal of consensus is to reach agreement on this set. The generic
|
The goal of consensus is to reach agreement on this set. The generic
|
||||||
consensus algorithm does not rely on an ordering of transactions within the
|
consensus algorithm does not rely on an ordering of transactions within the
|
||||||
set, nor does it specify how to apply a transaction set to a ledger to
|
set, nor does it specify how to apply a transaction set to a ledger to
|
||||||
generate a new ledger. A unique ID distinguishes a set of transactions from
|
generate a new ledger. A unique ID distinguishes a set of transactions from
|
||||||
all other sets of transactions.
|
all other sets of transactions.
|
||||||
* A *node* is one of the distributed actors running the consensus algorithm. It
|
- A _node_ is one of the distributed actors running the consensus algorithm. It
|
||||||
has a unique ID to distinguish it from all other nodes.
|
has a unique ID to distinguish it from all other nodes.
|
||||||
* A *peer* of a node is another node that it has chosen to follow and which it
|
- A _peer_ of a node is another node that it has chosen to follow and which it
|
||||||
believes will not collude with other chosen peers. The choice of peers is not
|
believes will not collude with other chosen peers. The choice of peers is not
|
||||||
symmetric, since participants can decide on their chosen sets independently.
|
symmetric, since participants can decide on their chosen sets independently.
|
||||||
* A /position/ is the current belief of the next ledger's transaction set and
|
- A /position/ is the current belief of the next ledger's transaction set and
|
||||||
close time. Position can refer to the node's own position or the position of a
|
close time. Position can refer to the node's own position or the position of a
|
||||||
peer.
|
peer.
|
||||||
* A *proposal* is one of a sequence of positions a node shares during consensus.
|
- A _proposal_ is one of a sequence of positions a node shares during consensus.
|
||||||
An initial proposal contains the starting position taken by a node before it
|
An initial proposal contains the starting position taken by a node before it
|
||||||
considers any peer positions. If a node subsequently updates its position in
|
considers any peer positions. If a node subsequently updates its position in
|
||||||
response to its peers, it will issue an updated proposal. A proposal is
|
response to its peers, it will issue an updated proposal. A proposal is
|
||||||
uniquely identified by the ID of the proposing node, the ID of the position
|
uniquely identified by the ID of the proposing node, the ID of the position
|
||||||
taken, the ID of the prior ledger the proposal is for, and the sequence number
|
taken, the ID of the prior ledger the proposal is for, and the sequence number
|
||||||
of the proposal.
|
of the proposal.
|
||||||
* A *dispute* is a transaction that is either not part of a node's position or
|
- A _dispute_ is a transaction that is either not part of a node's position or
|
||||||
not in a peer's position. During consensus, the node will add or remove
|
not in a peer's position. During consensus, the node will add or remove
|
||||||
disputed transactions from its position based on that transaction's support
|
disputed transactions from its position based on that transaction's support
|
||||||
amongst its peers.
|
amongst its peers.
|
||||||
|
|
||||||
Note that most types have an ID as a lightweight identifier of instances of that
|
Note that most types have an ID as a lightweight identifier of instances of that
|
||||||
type. Consensus often operates on the IDs directly since the underlying type is
|
type. Consensus often operates on the IDs directly since the underlying type is
|
||||||
potentially expensive to share over the network. For example, proposal's only
|
potentially expensive to share over the network. For example, proposal's only
|
||||||
contain the ID of the position of a peer. Since many peers likely have the same
|
contain the ID of the position of a peer. Since many peers likely have the same
|
||||||
position, this reduces the need to send the full transaction set multiple times.
|
position, this reduces the need to send the full transaction set multiple times.
|
||||||
Instead, a node can request the transaction set from the network if necessary.
|
Instead, a node can request the transaction set from the network if necessary.
|
||||||
|
|
||||||
### Overview
|
### Overview
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The diagram above is an overview of the consensus process from the perspective
|
The diagram above is an overview of the consensus process from the perspective
|
||||||
of a single participant. Recall that during a single consensus round, a node is
|
of a single participant. Recall that during a single consensus round, a node is
|
||||||
trying to agree with its peers on which transactions to apply to its prior
|
trying to agree with its peers on which transactions to apply to its prior
|
||||||
ledger when generating the next ledger. It also attempts to agree on the
|
ledger when generating the next ledger. It also attempts to agree on the
|
||||||
[network time when the ledger closed](#effective_close_time). There are
|
[network time when the ledger closed](#effective_close_time). There are
|
||||||
3 main phases to a consensus round:
|
3 main phases to a consensus round:
|
||||||
|
|
||||||
* A call to `startRound` places the node in the `Open` phase. In this phase,
|
- A call to `startRound` places the node in the `Open` phase. In this phase,
|
||||||
the node is waiting for transactions to include in its open ledger.
|
the node is waiting for transactions to include in its open ledger.
|
||||||
* At some point, the node will `Close` the open ledger and transition to the
|
- At some point, the node will `Close` the open ledger and transition to the
|
||||||
`Establish` phase. In this phase, the node shares/receives peer proposals on
|
`Establish` phase. In this phase, the node shares/receives peer proposals on
|
||||||
which transactions should be accepted in the closed ledger.
|
which transactions should be accepted in the closed ledger.
|
||||||
* At some point, the node determines it has reached consensus with its peers on
|
- At some point, the node determines it has reached consensus with its peers on
|
||||||
which transactions to include. It transitions to the `Accept` phase. In this
|
which transactions to include. It transitions to the `Accept` phase. In this
|
||||||
phase, the node works on applying the transactions to the prior ledger to
|
phase, the node works on applying the transactions to the prior ledger to
|
||||||
generate a new closed ledger. Once the new ledger is completed, the node shares
|
generate a new closed ledger. Once the new ledger is completed, the node shares
|
||||||
the validated ledger hash with the network and makes a call to `startRound` to
|
the validated ledger hash with the network and makes a call to `startRound` to
|
||||||
start the cycle again for the next ledger.
|
start the cycle again for the next ledger.
|
||||||
|
|
||||||
Throughout, a heartbeat timer calls `timerEntry` at a regular frequency to drive
|
Throughout, a heartbeat timer calls `timerEntry` at a regular frequency to drive
|
||||||
the process forward. Although the `startRound` call occurs at arbitrary times
|
the process forward. Although the `startRound` call occurs at arbitrary times
|
||||||
based on when the initial round began and the time it takes to apply
|
based on when the initial round began and the time it takes to apply
|
||||||
transactions, the transitions from `Open` to `Establish` and `Establish` to
|
transactions, the transitions from `Open` to `Establish` and `Establish` to
|
||||||
`Accept` only occur during calls to `timerEntry`. Similarly, transactions can
|
`Accept` only occur during calls to `timerEntry`. Similarly, transactions can
|
||||||
arrive at arbitrary times, independent of the heartbeat timer. Transactions
|
arrive at arbitrary times, independent of the heartbeat timer. Transactions
|
||||||
received after the `Open` to `Close` transition and not part of peer proposals
|
received after the `Open` to `Close` transition and not part of peer proposals
|
||||||
won't be considered until the next consensus round. They are represented above
|
won't be considered until the next consensus round. They are represented above
|
||||||
by the light green triangles.
|
by the light green triangles.
|
||||||
|
|
||||||
Peer proposals are issued by a node during a `timerEntry` call, but since peers
|
Peer proposals are issued by a node during a `timerEntry` call, but since peers
|
||||||
@@ -139,16 +139,16 @@ do not synchronize `timerEntry` calls, they are received by other peers at
|
|||||||
arbitrary times. Peer proposals are only considered if received prior to the
|
arbitrary times. Peer proposals are only considered if received prior to the
|
||||||
`Establish` to `Accept` transition, and only if the peer is working on the same
|
`Establish` to `Accept` transition, and only if the peer is working on the same
|
||||||
prior ledger. Peer proposals received after consensus is reached will not be
|
prior ledger. Peer proposals received after consensus is reached will not be
|
||||||
meaningful and are represented above by the circle with the X in it. Only
|
meaningful and are represented above by the circle with the X in it. Only
|
||||||
proposals from chosen peers are considered.
|
proposals from chosen peers are considered.
|
||||||
|
|
||||||
### Effective Close Time ### {#effective_close_time}
|
### Effective Close Time ### {#effective_close_time}
|
||||||
|
|
||||||
In addition to agreeing on a transaction set, each consensus round tries to
|
In addition to agreeing on a transaction set, each consensus round tries to
|
||||||
agree on the time the ledger closed. Each node calculates its own close time
|
agree on the time the ledger closed. Each node calculates its own close time
|
||||||
when it closes the open ledger. This exact close time is rounded to the nearest
|
when it closes the open ledger. This exact close time is rounded to the nearest
|
||||||
multiple of the current *effective close time resolution*. It is this
|
multiple of the current _effective close time resolution_. It is this
|
||||||
*effective close time* that nodes seek to agree on. This allows servers to
|
_effective close time_ that nodes seek to agree on. This allows servers to
|
||||||
derive a common time for a ledger without the need for perfectly synchronized
|
derive a common time for a ledger without the need for perfectly synchronized
|
||||||
clocks. As depicted below, the 3 pink arrows represent exact close times from 3
|
clocks. As depicted below, the 3 pink arrows represent exact close times from 3
|
||||||
consensus nodes that round to the same effective close time given the current
|
consensus nodes that round to the same effective close time given the current
|
||||||
@@ -158,9 +158,9 @@ different effective close time given the current resolution.
|
|||||||

|

|
||||||
|
|
||||||
The effective close time is part of the node's position and is shared with peers
|
The effective close time is part of the node's position and is shared with peers
|
||||||
in its proposals. Just like the position on the consensus transaction set, a
|
in its proposals. Just like the position on the consensus transaction set, a
|
||||||
node will update its close time position in response to its peers' effective
|
node will update its close time position in response to its peers' effective
|
||||||
close time positions. Peers can agree to disagree on the close time, in which
|
close time positions. Peers can agree to disagree on the close time, in which
|
||||||
case the effective close time is taken as 1 second past the prior close.
|
case the effective close time is taken as 1 second past the prior close.
|
||||||
|
|
||||||
The close time resolution is itself dynamic, decreasing (coarser) resolution in
|
The close time resolution is itself dynamic, decreasing (coarser) resolution in
|
||||||
@@ -173,12 +173,12 @@ reach close time consensus.
|
|||||||
Internally, a node operates under one of the following consensus modes. Either
|
Internally, a node operates under one of the following consensus modes. Either
|
||||||
of the first two modes may be chosen when a consensus round starts.
|
of the first two modes may be chosen when a consensus round starts.
|
||||||
|
|
||||||
* *Proposing* indicates the node is a full-fledged consensus participant. It
|
- _Proposing_ indicates the node is a full-fledged consensus participant. It
|
||||||
takes on positions and sends proposals to its peers.
|
takes on positions and sends proposals to its peers.
|
||||||
* *Observing* indicates the node is a passive consensus participant. It
|
- _Observing_ indicates the node is a passive consensus participant. It
|
||||||
maintains a position internally, but does not propose that position to its
|
maintains a position internally, but does not propose that position to its
|
||||||
peers. Instead, it receives peer proposals and updates its position
|
peers. Instead, it receives peer proposals and updates its position
|
||||||
to track the majority of its peers. This may be preferred if the node is only
|
to track the majority of its peers. This may be preferred if the node is only
|
||||||
being used to track the state of the network or during a start-up phase while
|
being used to track the state of the network or during a start-up phase while
|
||||||
it is still synchronizing with the network.
|
it is still synchronizing with the network.
|
||||||
|
|
||||||
@@ -186,14 +186,14 @@ The other two modes are set internally during the consensus round when the node
|
|||||||
believes it is no longer working on the dominant ledger chain based on peer
|
believes it is no longer working on the dominant ledger chain based on peer
|
||||||
validations. It checks this on every call to `timerEntry`.
|
validations. It checks this on every call to `timerEntry`.
|
||||||
|
|
||||||
* *Wrong Ledger* indicates the node is not working on the correct prior ledger
|
- _Wrong Ledger_ indicates the node is not working on the correct prior ledger
|
||||||
and does not have it available. It requests that ledger from the network, but
|
and does not have it available. It requests that ledger from the network, but
|
||||||
continues to work towards consensus this round while waiting. If it had been
|
continues to work towards consensus this round while waiting. If it had been
|
||||||
*proposing*, it will send a special "bowout" proposal to its peers to indicate
|
_proposing_, it will send a special "bowout" proposal to its peers to indicate
|
||||||
its change in mode for the rest of this round. For the duration of the round,
|
its change in mode for the rest of this round. For the duration of the round,
|
||||||
it defers to peer positions for determining the consensus outcome as if it
|
it defers to peer positions for determining the consensus outcome as if it
|
||||||
were just *observing*.
|
were just _observing_.
|
||||||
* *Switch Ledger* indicates that the node has acquired the correct prior ledger
|
- _Switch Ledger_ indicates that the node has acquired the correct prior ledger
|
||||||
from the network. Although it now has the correct prior ledger, the fact that
|
from the network. Although it now has the correct prior ledger, the fact that
|
||||||
it had the wrong one at some point during this round means it is likely behind
|
it had the wrong one at some point during this round means it is likely behind
|
||||||
and should defer to peer positions for determining the consensus outcome.
|
and should defer to peer positions for determining the consensus outcome.
|
||||||
@@ -201,7 +201,7 @@ validations. It checks this on every call to `timerEntry`.
|
|||||||

|

|
||||||
|
|
||||||
Once either wrong ledger or switch ledger are reached, the node cannot
|
Once either wrong ledger or switch ledger are reached, the node cannot
|
||||||
return to proposing or observing until the next consensus round. However,
|
return to proposing or observing until the next consensus round. However,
|
||||||
the node could change its view of the correct prior ledger, so going from
|
the node could change its view of the correct prior ledger, so going from
|
||||||
switch ledger to wrong ledger and back again is possible.
|
switch ledger to wrong ledger and back again is possible.
|
||||||
|
|
||||||
@@ -215,16 +215,16 @@ decide how best to generate the next ledger once it declares consensus.
|
|||||||
### Phases
|
### Phases
|
||||||
|
|
||||||
As depicted in the overview diagram, consensus is best viewed as a progression
|
As depicted in the overview diagram, consensus is best viewed as a progression
|
||||||
through 3 phases. There are 4 public methods of the generic consensus algorithm
|
through 3 phases. There are 4 public methods of the generic consensus algorithm
|
||||||
that determine this progression
|
that determine this progression
|
||||||
|
|
||||||
* `startRound` begins a consensus round.
|
- `startRound` begins a consensus round.
|
||||||
* `timerEntry` is called at a regular frequency (`LEDGER_MIN_CLOSE`) and is the
|
- `timerEntry` is called at a regular frequency (`LEDGER_MIN_CLOSE`) and is the
|
||||||
only call to consensus that can change the phase from `Open` to `Establish`
|
only call to consensus that can change the phase from `Open` to `Establish`
|
||||||
or `Accept`.
|
or `Accept`.
|
||||||
* `peerProposal` is called whenever a peer proposal is received and is what
|
- `peerProposal` is called whenever a peer proposal is received and is what
|
||||||
allows a node to update its position in a subsequent `timerEntry` call.
|
allows a node to update its position in a subsequent `timerEntry` call.
|
||||||
* `gotTxSet` is called when a transaction set is received from the network. This
|
- `gotTxSet` is called when a transaction set is received from the network. This
|
||||||
is typically in response to a prior request from the node to acquire the
|
is typically in response to a prior request from the node to acquire the
|
||||||
transaction set corresponding to a disagreeing peer's position.
|
transaction set corresponding to a disagreeing peer's position.
|
||||||
|
|
||||||
@@ -234,13 +234,13 @@ actions are taken in response to these calls.
|
|||||||
#### Open
|
#### Open
|
||||||
|
|
||||||
The `Open` phase is a quiescent period to allow transactions to build up in the
|
The `Open` phase is a quiescent period to allow transactions to build up in the
|
||||||
node's open ledger. The duration is a trade-off between latency and throughput.
|
node's open ledger. The duration is a trade-off between latency and throughput.
|
||||||
A shorter window reduces the latency to generating the next ledger, but also
|
A shorter window reduces the latency to generating the next ledger, but also
|
||||||
reduces transaction throughput due to fewer transactions accepted into the
|
reduces transaction throughput due to fewer transactions accepted into the
|
||||||
ledger.
|
ledger.
|
||||||
|
|
||||||
A call to `startRound` would forcibly begin the next consensus round, skipping
|
A call to `startRound` would forcibly begin the next consensus round, skipping
|
||||||
completion of the current round. This is not expected during normal operation.
|
completion of the current round. This is not expected during normal operation.
|
||||||
Calls to `peerProposal` or `gotTxSet` simply store the proposal or transaction
|
Calls to `peerProposal` or `gotTxSet` simply store the proposal or transaction
|
||||||
set for use in the coming `Establish` phase.
|
set for use in the coming `Establish` phase.
|
||||||
|
|
||||||
@@ -254,28 +254,27 @@ the ledger.
|
|||||||
Under normal circumstances, the open ledger period ends when one of the following
|
Under normal circumstances, the open ledger period ends when one of the following
|
||||||
is true
|
is true
|
||||||
|
|
||||||
* if there are transactions in the open ledger and more than `LEDGER_MIN_CLOSE`
|
- if there are transactions in the open ledger and more than `LEDGER_MIN_CLOSE`
|
||||||
have elapsed. This is the typical behavior.
|
have elapsed. This is the typical behavior.
|
||||||
* if there are no open transactions and a suitably longer idle interval has
|
- if there are no open transactions and a suitably longer idle interval has
|
||||||
elapsed. This increases the opportunity to get some transaction into
|
elapsed. This increases the opportunity to get some transaction into
|
||||||
the next ledger and avoids doing useless work closing an empty ledger.
|
the next ledger and avoids doing useless work closing an empty ledger.
|
||||||
* if more than half the number of prior round peers have already closed or finished
|
- if more than half the number of prior round peers have already closed or finished
|
||||||
this round. This indicates the node is falling behind and needs to catch up.
|
this round. This indicates the node is falling behind and needs to catch up.
|
||||||
|
|
||||||
|
|
||||||
When closing the ledger, the node takes its initial position based on the
|
When closing the ledger, the node takes its initial position based on the
|
||||||
transactions in the open ledger and uses the current time as
|
transactions in the open ledger and uses the current time as
|
||||||
its initial close time estimate. If in the proposing mode, the node shares its
|
its initial close time estimate. If in the proposing mode, the node shares its
|
||||||
initial position with peers. Now that the node has taken a position, it will
|
initial position with peers. Now that the node has taken a position, it will
|
||||||
consider any peer positions for this round that arrived earlier. The node
|
consider any peer positions for this round that arrived earlier. The node
|
||||||
generates disputed transactions for each transaction not in common with a peer's
|
generates disputed transactions for each transaction not in common with a peer's
|
||||||
position. The node also records the vote of each peer for each disputed
|
position. The node also records the vote of each peer for each disputed
|
||||||
transaction.
|
transaction.
|
||||||
|
|
||||||
In the example below, we suppose our node has closed with transactions 1,2 and 3. It creates disputes
|
In the example below, we suppose our node has closed with transactions 1,2 and 3. It creates disputes
|
||||||
for transactions 2,3 and 4, since at least one peer position differs on each.
|
for transactions 2,3 and 4, since at least one peer position differs on each.
|
||||||
|
|
||||||
##### disputes ##### {#disputes_image}
|
##### disputes ##### {#disputes_image}
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -286,22 +285,22 @@ exchanges proposals with peers in an attempt to reach agreement on the consensus
|
|||||||
transactions and effective close time.
|
transactions and effective close time.
|
||||||
|
|
||||||
A call to `startRound` would forcibly begin the next consensus round, skipping
|
A call to `startRound` would forcibly begin the next consensus round, skipping
|
||||||
completion of the current round. This is not expected during normal operation.
|
completion of the current round. This is not expected during normal operation.
|
||||||
Calls to `peerProposal` or `gotTxSet` that reflect new positions will generate
|
Calls to `peerProposal` or `gotTxSet` that reflect new positions will generate
|
||||||
disputed transactions for any new disagreements and will update the peer's vote
|
disputed transactions for any new disagreements and will update the peer's vote
|
||||||
for all disputed transactions.
|
for all disputed transactions.
|
||||||
|
|
||||||
A call to `timerEntry` first checks that the node is working from the correct
|
A call to `timerEntry` first checks that the node is working from the correct
|
||||||
prior ledger. If not, the node will update the mode and request the correct
|
prior ledger. If not, the node will update the mode and request the correct
|
||||||
ledger. Otherwise, the node updates the node's position and considers whether
|
ledger. Otherwise, the node updates the node's position and considers whether
|
||||||
to switch to the `Accepted` phase and declare consensus reached. However, at
|
to switch to the `Accepted` phase and declare consensus reached. However, at
|
||||||
least `LEDGER_MIN_CONSENSUS` time must have elapsed before doing either. This
|
least `LEDGER_MIN_CONSENSUS` time must have elapsed before doing either. This
|
||||||
allows peers an opportunity to take an initial position and share it.
|
allows peers an opportunity to take an initial position and share it.
|
||||||
|
|
||||||
##### Update Position
|
##### Update Position
|
||||||
|
|
||||||
In order to achieve consensus, the node is looking for a transaction set that is
|
In order to achieve consensus, the node is looking for a transaction set that is
|
||||||
supported by a super-majority of peers. The node works towards this set by
|
supported by a super-majority of peers. The node works towards this set by
|
||||||
adding or removing disputed transactions from its position based on an
|
adding or removing disputed transactions from its position based on an
|
||||||
increasing threshold for inclusion.
|
increasing threshold for inclusion.
|
||||||
|
|
||||||
@@ -310,23 +309,23 @@ increasing threshold for inclusion.
|
|||||||
By starting with a lower threshold, a node initially allows a wide set of
|
By starting with a lower threshold, a node initially allows a wide set of
|
||||||
transactions into its position. If the establish round continues and the node is
|
transactions into its position. If the establish round continues and the node is
|
||||||
"stuck", a higher threshold can focus on accepting transactions with the most
|
"stuck", a higher threshold can focus on accepting transactions with the most
|
||||||
support. The constants that define the thresholds and durations at which the
|
support. The constants that define the thresholds and durations at which the
|
||||||
thresholds change are given by `AV_XXX_CONSENSUS_PCT` and
|
thresholds change are given by `AV_XXX_CONSENSUS_PCT` and
|
||||||
`AV_XXX_CONSENSUS_TIME` respectively, where `XXX` is `INIT`,`MID`,`LATE` and
|
`AV_XXX_CONSENSUS_TIME` respectively, where `XXX` is `INIT`,`MID`,`LATE` and
|
||||||
`STUCK`. The effective close time position is updated using the same
|
`STUCK`. The effective close time position is updated using the same
|
||||||
thresholds.
|
thresholds.
|
||||||
|
|
||||||
Given the [example disputes above](#disputes_image) and an initial threshold
|
Given the [example disputes above](#disputes_image) and an initial threshold
|
||||||
of 50%, our node would retain its position since transaction 1 was not in
|
of 50%, our node would retain its position since transaction 1 was not in
|
||||||
dispute and transactions 2 and 3 have 75% support. Since its position did not
|
dispute and transactions 2 and 3 have 75% support. Since its position did not
|
||||||
change, it would not need to send a new proposal to peers. Peer C would not
|
change, it would not need to send a new proposal to peers. Peer C would not
|
||||||
change either. Peer A would add transaction 3 to its position and Peer B would
|
change either. Peer A would add transaction 3 to its position and Peer B would
|
||||||
remove transaction 4 from its position; both would then send an updated
|
remove transaction 4 from its position; both would then send an updated
|
||||||
position.
|
position.
|
||||||
|
|
||||||
Conversely, if the diagram reflected a later call to =timerEntry= that occurs in
|
Conversely, if the diagram reflected a later call to =timerEntry= that occurs in
|
||||||
the stuck region with a threshold of say 95%, our node would remove transactions
|
the stuck region with a threshold of say 95%, our node would remove transactions
|
||||||
2 and 3 from its candidate set and send an updated position. Likewise, all the
|
2 and 3 from its candidate set and send an updated position. Likewise, all the
|
||||||
other peers would end up with only transaction 1 in their position.
|
other peers would end up with only transaction 1 in their position.
|
||||||
|
|
||||||
Lastly, if our node were not in the proposing mode, it would not include its own
|
Lastly, if our node were not in the proposing mode, it would not include its own
|
||||||
@@ -336,7 +335,7 @@ our node would maintain its position of transactions 1, 2 and 3.
|
|||||||
##### Checking Consensus
|
##### Checking Consensus
|
||||||
|
|
||||||
After updating its position, the node checks for supermajority agreement with
|
After updating its position, the node checks for supermajority agreement with
|
||||||
its peers on its current position. This agreement is of the exact transaction
|
its peers on its current position. This agreement is of the exact transaction
|
||||||
set, not just the support of individual transactions. That is, if our position
|
set, not just the support of individual transactions. That is, if our position
|
||||||
is a subset of a peer's position, that counts as a disagreement. Also recall
|
is a subset of a peer's position, that counts as a disagreement. Also recall
|
||||||
that effective close time agreement allows a supermajority of participants
|
that effective close time agreement allows a supermajority of participants
|
||||||
@@ -344,10 +343,10 @@ agreeing to disagree.
|
|||||||
|
|
||||||
Consensus is declared when the following 3 clauses are true:
|
Consensus is declared when the following 3 clauses are true:
|
||||||
|
|
||||||
* `LEDGER_MIN_CONSENSUS` time has elapsed in the establish phase
|
- `LEDGER_MIN_CONSENSUS` time has elapsed in the establish phase
|
||||||
* At least 75% of the prior round proposers have proposed OR this establish
|
- At least 75% of the prior round proposers have proposed OR this establish
|
||||||
phase is `LEDGER_MIN_CONSENSUS` longer than the last round's establish phase
|
phase is `LEDGER_MIN_CONSENSUS` longer than the last round's establish phase
|
||||||
* `minimumConsensusPercentage` of ourself and our peers share the same position
|
- `minimumConsensusPercentage` of ourself and our peers share the same position
|
||||||
|
|
||||||
The middle condition ensures slower peers have a chance to share positions, but
|
The middle condition ensures slower peers have a chance to share positions, but
|
||||||
prevents waiting too long on peers that have disconnected. Additionally, a node
|
prevents waiting too long on peers that have disconnected. Additionally, a node
|
||||||
@@ -364,22 +363,22 @@ logic.
|
|||||||
Once consensus is reached (or moved on), the node switches to the `Accept` phase
|
Once consensus is reached (or moved on), the node switches to the `Accept` phase
|
||||||
and signals to the implementing code that the round is complete. That code is
|
and signals to the implementing code that the round is complete. That code is
|
||||||
responsible for using the consensus transaction set to generate the next ledger
|
responsible for using the consensus transaction set to generate the next ledger
|
||||||
and calling `startRound` to begin the next round. The implementation has total
|
and calling `startRound` to begin the next round. The implementation has total
|
||||||
freedom on ordering transactions, deciding what to do if consensus moved on,
|
freedom on ordering transactions, deciding what to do if consensus moved on,
|
||||||
determining whether to retry or abandon local transactions that did not make the
|
determining whether to retry or abandon local transactions that did not make the
|
||||||
consensus set and updating any internal state based on the consensus progress.
|
consensus set and updating any internal state based on the consensus progress.
|
||||||
|
|
||||||
#### Accept
|
#### Accept
|
||||||
|
|
||||||
The `Accept` phase is the terminal phase of the consensus algorithm. Calls to
|
The `Accept` phase is the terminal phase of the consensus algorithm. Calls to
|
||||||
`timerEntry`, `peerProposal` and `gotTxSet` will not change the internal
|
`timerEntry`, `peerProposal` and `gotTxSet` will not change the internal
|
||||||
consensus state while in the accept phase. The expectation is that the
|
consensus state while in the accept phase. The expectation is that the
|
||||||
application specific code is working to generate the new ledger based on the
|
application specific code is working to generate the new ledger based on the
|
||||||
consensus outcome. Once complete, that code should make a call to `startRound`
|
consensus outcome. Once complete, that code should make a call to `startRound`
|
||||||
to kick off the next consensus round. The `startRound` call includes the new
|
to kick off the next consensus round. The `startRound` call includes the new
|
||||||
prior ledger, prior ledger ID and whether the round should begin in the
|
prior ledger, prior ledger ID and whether the round should begin in the
|
||||||
proposing or observing mode. After setting some initial state, the phase
|
proposing or observing mode. After setting some initial state, the phase
|
||||||
transitions to `Open`. The node will also check if the provided prior ledger
|
transitions to `Open`. The node will also check if the provided prior ledger
|
||||||
and ID are correct, updating the mode and requesting the proper ledger from the
|
and ID are correct, updating the mode and requesting the proper ledger from the
|
||||||
network if necessary.
|
network if necessary.
|
||||||
|
|
||||||
@@ -448,9 +447,9 @@ struct TxSet
|
|||||||
### Ledger
|
### Ledger
|
||||||
|
|
||||||
The `Ledger` type represents the state shared amongst the
|
The `Ledger` type represents the state shared amongst the
|
||||||
distributed participants. Notice that the details of how the next ledger is
|
distributed participants. Notice that the details of how the next ledger is
|
||||||
generated from the prior ledger and the consensus accepted transaction set is
|
generated from the prior ledger and the consensus accepted transaction set is
|
||||||
not part of the interface. Within the generic code, this type is primarily used
|
not part of the interface. Within the generic code, this type is primarily used
|
||||||
to know that peers are working on the same tip of the ledger chain and to
|
to know that peers are working on the same tip of the ledger chain and to
|
||||||
provide some basic timing data for consensus.
|
provide some basic timing data for consensus.
|
||||||
|
|
||||||
@@ -626,7 +625,7 @@ struct Adaptor
|
|||||||
|
|
||||||
// Called when consensus operating mode changes
|
// Called when consensus operating mode changes
|
||||||
void onModeChange(ConsensuMode before, ConsensusMode after);
|
void onModeChange(ConsensuMode before, ConsensusMode after);
|
||||||
|
|
||||||
// Called when ledger closes. Implementation should generate an initial Result
|
// Called when ledger closes. Implementation should generate an initial Result
|
||||||
// with position based on the current open ledger's transactions.
|
// with position based on the current open ledger's transactions.
|
||||||
ConsensusResult onClose(Ledger const &, Ledger const & prev, ConsensusMode mode);
|
ConsensusResult onClose(Ledger const &, Ledger const & prev, ConsensusMode mode);
|
||||||
@@ -657,27 +656,24 @@ struct Adaptor
|
|||||||
The implementing class hides many details of the peer communication
|
The implementing class hides many details of the peer communication
|
||||||
model from the generic code.
|
model from the generic code.
|
||||||
|
|
||||||
* The `share` member functions are responsible for sharing the given type with a
|
- The `share` member functions are responsible for sharing the given type with a
|
||||||
node's peers, but are agnostic to the mechanism. Ideally, messages are delivered
|
node's peers, but are agnostic to the mechanism. Ideally, messages are delivered
|
||||||
faster than `LEDGER_GRANULARITY`.
|
faster than `LEDGER_GRANULARITY`.
|
||||||
* The generic code does not specify how transactions are submitted by clients,
|
- The generic code does not specify how transactions are submitted by clients,
|
||||||
propagated through the network or stored in the open ledger. Indeed, the open
|
propagated through the network or stored in the open ledger. Indeed, the open
|
||||||
ledger is only conceptual from the perspective of the generic code---the
|
ledger is only conceptual from the perspective of the generic code---the
|
||||||
initial position and transaction set are opaquely generated in a
|
initial position and transaction set are opaquely generated in a
|
||||||
`Consensus::Result` instance returned from the `onClose` callback.
|
`Consensus::Result` instance returned from the `onClose` callback.
|
||||||
* The calls to `acquireLedger` and `acquireTxSet` only have non-trivial return
|
- The calls to `acquireLedger` and `acquireTxSet` only have non-trivial return
|
||||||
if the ledger or transaction set of interest is available. The implementing
|
if the ledger or transaction set of interest is available. The implementing
|
||||||
class is free to block while acquiring, or return the empty option while
|
class is free to block while acquiring, or return the empty option while
|
||||||
servicing the request asynchronously. Due to legacy reasons, the two calls
|
servicing the request asynchronously. Due to legacy reasons, the two calls
|
||||||
are not symmetric. `acquireTxSet` requires the host application to call
|
are not symmetric. `acquireTxSet` requires the host application to call
|
||||||
`gotTxSet` when an asynchronous `acquire` completes. Conversely,
|
`gotTxSet` when an asynchronous `acquire` completes. Conversely,
|
||||||
`acquireLedger` will be called again later by the consensus code if it still
|
`acquireLedger` will be called again later by the consensus code if it still
|
||||||
desires the ledger with the hope that the asynchronous acquisition is
|
desires the ledger with the hope that the asynchronous acquisition is
|
||||||
complete.
|
complete.
|
||||||
|
|
||||||
|
|
||||||
## Validation
|
## Validation
|
||||||
|
|
||||||
Coming Soon!
|
Coming Soon!
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
18
external/README.md
vendored
18
external/README.md
vendored
@@ -1,14 +1,10 @@
|
|||||||
# External Conan recipes
|
# External Conan recipes
|
||||||
|
|
||||||
The subdirectories in this directory contain either copies or Conan recipes
|
The subdirectories in this directory contain copies of external libraries used
|
||||||
of external libraries used by rippled.
|
by rippled.
|
||||||
The Conan recipes include patches we have not yet pushed upstream.
|
|
||||||
|
|
||||||
| Folder | Upstream | Description |
|
| Folder | Upstream | Description |
|
||||||
|:----------------|:---------------------------------------------|:------------|
|
| :--------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------- |
|
||||||
| `antithesis-sdk`| [Project](https://github.com/antithesishq/antithesis-sdk-cpp/) | [Antithesis](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview.html) SDK for C++ |
|
| `antithesis-sdk` | [Project](https://github.com/antithesishq/antithesis-sdk-cpp/) | [Antithesis](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview.html) SDK for C++ |
|
||||||
| `ed25519-donna` | [Project](https://github.com/floodyberry/ed25519-donna) | [Ed25519](http://ed25519.cr.yp.to/) digital signatures |
|
| `ed25519-donna` | [Project](https://github.com/floodyberry/ed25519-donna) | [Ed25519](http://ed25519.cr.yp.to/) digital signatures |
|
||||||
| `rocksdb` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/rocksdb) | Fast key/value database. (Supports rotational disks better than NuDB.) |
|
| `secp256k1` | [Project](https://github.com/bitcoin-core/secp256k1) | ECDSA digital signatures using the **secp256k1** curve |
|
||||||
| `secp256k1` | [Project](https://github.com/bitcoin-core/secp256k1) | ECDSA digital signatures using the **secp256k1** curve |
|
|
||||||
| `snappy` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/snappy) | "Snappy" lossless compression algorithm. |
|
|
||||||
| `soci` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/soci) | Abstraction layer for database access. |
|
|
||||||
|
|||||||
2
external/antithesis-sdk/CMakeLists.txt
vendored
2
external/antithesis-sdk/CMakeLists.txt
vendored
@@ -1,4 +1,4 @@
|
|||||||
cmake_minimum_required(VERSION 3.25)
|
cmake_minimum_required(VERSION 3.18)
|
||||||
|
|
||||||
# Note, version set explicitly by rippled project
|
# Note, version set explicitly by rippled project
|
||||||
project(antithesis-sdk-cpp VERSION 0.4.4 LANGUAGES CXX)
|
project(antithesis-sdk-cpp VERSION 0.4.4 LANGUAGES CXX)
|
||||||
|
|||||||
7
external/antithesis-sdk/README.md
vendored
7
external/antithesis-sdk/README.md
vendored
@@ -1,8 +1,9 @@
|
|||||||
# Antithesis C++ SDK
|
# Antithesis C++ SDK
|
||||||
|
|
||||||
This library provides methods for C++ programs to configure the [Antithesis](https://antithesis.com) platform. It contains three kinds of functionality:
|
This library provides methods for C++ programs to configure the [Antithesis](https://antithesis.com) platform. It contains three kinds of functionality:
|
||||||
* Assertion macros that allow you to define test properties about your software or workload.
|
|
||||||
* Randomness functions for requesting both structured and unstructured randomness from the Antithesis platform.
|
- Assertion macros that allow you to define test properties about your software or workload.
|
||||||
* Lifecycle functions that inform the Antithesis environment that particular test phases or milestones have been reached.
|
- Randomness functions for requesting both structured and unstructured randomness from the Antithesis platform.
|
||||||
|
- Lifecycle functions that inform the Antithesis environment that particular test phases or milestones have been reached.
|
||||||
|
|
||||||
For general usage guidance see the [Antithesis C++ SDK Documentation](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview/)
|
For general usage guidance see the [Antithesis C++ SDK Documentation](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview/)
|
||||||
|
|||||||
3
external/ed25519-donna/CMakeLists.txt
vendored
3
external/ed25519-donna/CMakeLists.txt
vendored
@@ -17,6 +17,9 @@ add_library(ed25519 STATIC
|
|||||||
)
|
)
|
||||||
add_library(ed25519::ed25519 ALIAS ed25519)
|
add_library(ed25519::ed25519 ALIAS ed25519)
|
||||||
target_link_libraries(ed25519 PUBLIC OpenSSL::SSL)
|
target_link_libraries(ed25519 PUBLIC OpenSSL::SSL)
|
||||||
|
if(NOT MSVC)
|
||||||
|
target_compile_options(ed25519 PRIVATE -Wno-implicit-fallthrough)
|
||||||
|
endif()
|
||||||
|
|
||||||
include(GNUInstallDirs)
|
include(GNUInstallDirs)
|
||||||
|
|
||||||
|
|||||||
97
external/ed25519-donna/README.md
vendored
97
external/ed25519-donna/README.md
vendored
@@ -1,12 +1,12 @@
|
|||||||
[ed25519](http://ed25519.cr.yp.to/) is an
|
[ed25519](http://ed25519.cr.yp.to/) is an
|
||||||
[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA),
|
[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA),
|
||||||
developed by [Dan Bernstein](http://cr.yp.to/djb.html),
|
developed by [Dan Bernstein](http://cr.yp.to/djb.html),
|
||||||
[Niels Duif](http://www.nielsduif.nl/),
|
[Niels Duif](http://www.nielsduif.nl/),
|
||||||
[Tanja Lange](http://hyperelliptic.org/tanja),
|
[Tanja Lange](http://hyperelliptic.org/tanja),
|
||||||
[Peter Schwabe](http://www.cryptojedi.org/users/peter/),
|
[Peter Schwabe](http://www.cryptojedi.org/users/peter/),
|
||||||
and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/).
|
and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/).
|
||||||
|
|
||||||
This project provides performant, portable 32-bit & 64-bit implementations. All implementations are
|
This project provides performant, portable 32-bit & 64-bit implementations. All implementations are
|
||||||
of course constant time in regard to secret data.
|
of course constant time in regard to secret data.
|
||||||
|
|
||||||
#### Performance
|
#### Performance
|
||||||
@@ -52,35 +52,35 @@ are made.
|
|||||||
|
|
||||||
#### Compilation
|
#### Compilation
|
||||||
|
|
||||||
No configuration is needed **if you are compiling against OpenSSL**.
|
No configuration is needed **if you are compiling against OpenSSL**.
|
||||||
|
|
||||||
##### Hash Options
|
##### Hash Options
|
||||||
|
|
||||||
If you are not compiling aginst OpenSSL, you will need a hash function.
|
If you are not compiling aginst OpenSSL, you will need a hash function.
|
||||||
|
|
||||||
To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`.
|
To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`.
|
||||||
This should never be used except to verify the code works when OpenSSL is not available.
|
This should never be used except to verify the code works when OpenSSL is not available.
|
||||||
|
|
||||||
To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your
|
To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your
|
||||||
custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement
|
custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement
|
||||||
|
|
||||||
struct ed25519_hash_context;
|
struct ed25519_hash_context;
|
||||||
|
|
||||||
void ed25519_hash_init(ed25519_hash_context *ctx);
|
void ed25519_hash_init(ed25519_hash_context *ctx);
|
||||||
void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
|
void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
|
||||||
void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
|
void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
|
||||||
void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
|
void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
|
||||||
|
|
||||||
##### Random Options
|
##### Random Options
|
||||||
|
|
||||||
If you are not compiling aginst OpenSSL, you will need a random function for batch verification.
|
If you are not compiling aginst OpenSSL, you will need a random function for batch verification.
|
||||||
|
|
||||||
To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your
|
To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your
|
||||||
custom hash implementation in ed25519-randombytes-custom.h. The random function must implement:
|
custom hash implementation in ed25519-randombytes-custom.h. The random function must implement:
|
||||||
|
|
||||||
void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
|
void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
|
||||||
|
|
||||||
Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG
|
Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG
|
||||||
variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29)
|
variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29)
|
||||||
|
|
||||||
##### Minor options
|
##### Minor options
|
||||||
@@ -91,80 +91,79 @@ Use `-DED25519_FORCE_32BIT` to force the use of 32 bit routines even when compil
|
|||||||
|
|
||||||
##### 32-bit
|
##### 32-bit
|
||||||
|
|
||||||
gcc ed25519.c -m32 -O3 -c
|
gcc ed25519.c -m32 -O3 -c
|
||||||
|
|
||||||
##### 64-bit
|
##### 64-bit
|
||||||
|
|
||||||
gcc ed25519.c -m64 -O3 -c
|
gcc ed25519.c -m64 -O3 -c
|
||||||
|
|
||||||
##### SSE2
|
##### SSE2
|
||||||
|
|
||||||
gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2
|
gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2
|
||||||
gcc ed25519.c -m64 -O3 -c -DED25519_SSE2
|
gcc ed25519.c -m64 -O3 -c -DED25519_SSE2
|
||||||
|
|
||||||
clang and icc are also supported
|
clang and icc are also supported
|
||||||
|
|
||||||
|
|
||||||
#### Usage
|
#### Usage
|
||||||
|
|
||||||
To use the code, link against `ed25519.o -mbits` and:
|
To use the code, link against `ed25519.o -mbits` and:
|
||||||
|
|
||||||
#include "ed25519.h"
|
#include "ed25519.h"
|
||||||
|
|
||||||
Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error).
|
Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error).
|
||||||
|
|
||||||
To generate a private key, simply generate 32 bytes from a secure
|
To generate a private key, simply generate 32 bytes from a secure
|
||||||
cryptographic source:
|
cryptographic source:
|
||||||
|
|
||||||
ed25519_secret_key sk;
|
ed25519_secret_key sk;
|
||||||
randombytes(sk, sizeof(ed25519_secret_key));
|
randombytes(sk, sizeof(ed25519_secret_key));
|
||||||
|
|
||||||
To generate a public key:
|
To generate a public key:
|
||||||
|
|
||||||
ed25519_public_key pk;
|
ed25519_public_key pk;
|
||||||
ed25519_publickey(sk, pk);
|
ed25519_publickey(sk, pk);
|
||||||
|
|
||||||
To sign a message:
|
To sign a message:
|
||||||
|
|
||||||
ed25519_signature sig;
|
ed25519_signature sig;
|
||||||
ed25519_sign(message, message_len, sk, pk, signature);
|
ed25519_sign(message, message_len, sk, pk, signature);
|
||||||
|
|
||||||
To verify a signature:
|
To verify a signature:
|
||||||
|
|
||||||
int valid = ed25519_sign_open(message, message_len, pk, signature) == 0;
|
int valid = ed25519_sign_open(message, message_len, pk, signature) == 0;
|
||||||
|
|
||||||
To batch verify signatures:
|
To batch verify signatures:
|
||||||
|
|
||||||
const unsigned char *mp[num] = {message1, message2..}
|
const unsigned char *mp[num] = {message1, message2..}
|
||||||
size_t ml[num] = {message_len1, message_len2..}
|
size_t ml[num] = {message_len1, message_len2..}
|
||||||
const unsigned char *pkp[num] = {pk1, pk2..}
|
const unsigned char *pkp[num] = {pk1, pk2..}
|
||||||
const unsigned char *sigp[num] = {signature1, signature2..}
|
const unsigned char *sigp[num] = {signature1, signature2..}
|
||||||
int valid[num]
|
int valid[num]
|
||||||
|
|
||||||
/* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */
|
/* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */
|
||||||
int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0;
|
int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0;
|
||||||
|
|
||||||
**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in
|
**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in
|
||||||
`ed25519-randombytes.h`, to generate random scalars for the verification code.
|
`ed25519-randombytes.h`, to generate random scalars for the verification code.
|
||||||
The default implementation now uses OpenSSLs `RAND_bytes`.
|
The default implementation now uses OpenSSLs `RAND_bytes`.
|
||||||
|
|
||||||
Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are
|
Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are
|
||||||
not appended to messages, and there is no need for padding in front of messages.
|
not appended to messages, and there is no need for padding in front of messages.
|
||||||
Additionally, the secret key does not contain a copy of the public key, so it is
|
Additionally, the secret key does not contain a copy of the public key, so it is
|
||||||
32 bytes instead of 64 bytes, and the public key must be provided to the signing
|
32 bytes instead of 64 bytes, and the public key must be provided to the signing
|
||||||
function.
|
function.
|
||||||
|
|
||||||
##### Curve25519
|
##### Curve25519
|
||||||
|
|
||||||
Curve25519 public keys can be generated thanks to
|
Curve25519 public keys can be generated thanks to
|
||||||
[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html)
|
[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html)
|
||||||
leveraging Ed25519's precomputed basepoint scalar multiplication.
|
leveraging Ed25519's precomputed basepoint scalar multiplication.
|
||||||
|
|
||||||
curved25519_key sk, pk;
|
curved25519_key sk, pk;
|
||||||
randombytes(sk, sizeof(curved25519_key));
|
randombytes(sk, sizeof(curved25519_key));
|
||||||
curved25519_scalarmult_basepoint(pk, sk);
|
curved25519_scalarmult_basepoint(pk, sk);
|
||||||
|
|
||||||
Note the name is curved25519, a combination of curve and ed25519, to prevent
|
Note the name is curved25519, a combination of curve and ed25519, to prevent
|
||||||
name clashes. Performance is slightly faster than short message ed25519
|
name clashes. Performance is slightly faster than short message ed25519
|
||||||
signing due to both using the same code for the scalar multiply.
|
signing due to both using the same code for the scalar multiply.
|
||||||
|
|
||||||
@@ -180,4 +179,4 @@ with extreme values to ensure they function correctly. SSE2 is now supported.
|
|||||||
|
|
||||||
#### Papers
|
#### Papers
|
||||||
|
|
||||||
[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html)
|
[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html)
|
||||||
|
|||||||
99
external/ed25519-donna/fuzz/README.md
vendored
99
external/ed25519-donna/fuzz/README.md
vendored
@@ -1,78 +1,78 @@
|
|||||||
This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of
|
This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of
|
||||||
[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and
|
[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and
|
||||||
[ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10).
|
[ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10).
|
||||||
|
|
||||||
Curve25519 tests that generating a public key from a secret key
|
Curve25519 tests that generating a public key from a secret key
|
||||||
|
|
||||||
# Building
|
# Building
|
||||||
|
|
||||||
## *nix + PHP
|
## \*nix + PHP
|
||||||
|
|
||||||
`php build-nix.php (required parameters) (optional parameters)`
|
`php build-nix.php (required parameters) (optional parameters)`
|
||||||
|
|
||||||
Required parameters:
|
Required parameters:
|
||||||
|
|
||||||
* `--function=[curve25519,ed25519]`
|
- `--function=[curve25519,ed25519]`
|
||||||
* `--bits=[32,64]`
|
- `--bits=[32,64]`
|
||||||
|
|
||||||
Optional parameters:
|
Optional parameters:
|
||||||
|
|
||||||
* `--with-sse2`
|
- `--with-sse2`
|
||||||
|
|
||||||
Also fuzz against ed25519-donna-sse2
|
Also fuzz against ed25519-donna-sse2
|
||||||
* `--with-openssl`
|
|
||||||
|
|
||||||
Build with OpenSSL's SHA-512.
|
- `--with-openssl`
|
||||||
|
|
||||||
Default: Reference SHA-512 implementation (slow!)
|
Build with OpenSSL's SHA-512.
|
||||||
|
|
||||||
* `--compiler=[gcc,clang,icc]`
|
Default: Reference SHA-512 implementation (slow!)
|
||||||
|
|
||||||
Default: gcc
|
- `--compiler=[gcc,clang,icc]`
|
||||||
|
|
||||||
* `--no-asm`
|
Default: gcc
|
||||||
|
|
||||||
Do not use platform specific assembler
|
- `--no-asm`
|
||||||
|
|
||||||
|
Do not use platform specific assembler
|
||||||
|
|
||||||
example:
|
example:
|
||||||
|
|
||||||
php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc
|
php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc
|
||||||
|
|
||||||
## Windows
|
## Windows
|
||||||
|
|
||||||
Create a project with access to the ed25519 files.
|
Create a project with access to the ed25519 files.
|
||||||
|
|
||||||
If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects
|
If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects
|
||||||
"Properties/Preprocessor/Preprocessor Definitions" option
|
"Properties/Preprocessor/Preprocessor Definitions" option
|
||||||
|
|
||||||
Add the following files to the project:
|
Add the following files to the project:
|
||||||
|
|
||||||
* `fuzz/curve25519-ref10.c`
|
- `fuzz/curve25519-ref10.c`
|
||||||
* `fuzz/ed25519-ref10.c`
|
- `fuzz/ed25519-ref10.c`
|
||||||
* `fuzz/ed25519-donna.c`
|
- `fuzz/ed25519-donna.c`
|
||||||
* `fuzz/ed25519-donna-sse2.c` (optional)
|
- `fuzz/ed25519-donna-sse2.c` (optional)
|
||||||
* `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz)
|
- `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz)
|
||||||
|
|
||||||
If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under
|
If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under
|
||||||
its "Properties/Preprocessor/Preprocessor Definitions" option.
|
its "Properties/Preprocessor/Preprocessor Definitions" option.
|
||||||
|
|
||||||
# Running
|
# Running
|
||||||
|
|
||||||
If everything agrees, the program will only output occasional status dots (every 0x1000 passes)
|
If everything agrees, the program will only output occasional status dots (every 0x1000 passes)
|
||||||
and a 64bit progress count (every 0x20000 passes):
|
and a 64bit progress count (every 0x20000 passes):
|
||||||
|
|
||||||
fuzzing: ref10 curved25519 curved25519-sse2
|
fuzzing: ref10 curved25519 curved25519-sse2
|
||||||
|
|
||||||
................................ [0000000000020000]
|
................................ [0000000000020000]
|
||||||
................................ [0000000000040000]
|
................................ [0000000000040000]
|
||||||
................................ [0000000000060000]
|
................................ [0000000000060000]
|
||||||
................................ [0000000000080000]
|
................................ [0000000000080000]
|
||||||
................................ [00000000000a0000]
|
................................ [00000000000a0000]
|
||||||
................................ [00000000000c0000]
|
................................ [00000000000c0000]
|
||||||
|
|
||||||
If any of the implementations do not agree with the ref10 implementation, the program will dump
|
If any of the implementations do not agree with the ref10 implementation, the program will dump
|
||||||
the random data that was used, the data generated by the ref10 implementation, and diffs of the
|
the random data that was used, the data generated by the ref10 implementation, and diffs of the
|
||||||
ed25519-donna data against the ref10 data.
|
ed25519-donna data against the ref10 data.
|
||||||
|
|
||||||
## Example errors
|
## Example errors
|
||||||
@@ -83,21 +83,21 @@ These are example error dumps (with intentionally introduced errors).
|
|||||||
|
|
||||||
Random data:
|
Random data:
|
||||||
|
|
||||||
* sk, or Secret Key
|
- sk, or Secret Key
|
||||||
* m, or Message
|
- m, or Message
|
||||||
|
|
||||||
Generated data:
|
Generated data:
|
||||||
|
|
||||||
* pk, or Public Key
|
- pk, or Public Key
|
||||||
* sig, or Signature
|
- sig, or Signature
|
||||||
* valid, or if the signature of the message is valid with the public key
|
- valid, or if the signature of the message is valid with the public key
|
||||||
|
|
||||||
Dump:
|
Dump:
|
||||||
|
|
||||||
sk:
|
sk:
|
||||||
0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef,
|
0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef,
|
||||||
0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c,
|
0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c,
|
||||||
|
|
||||||
m:
|
m:
|
||||||
0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91,
|
0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91,
|
||||||
0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7,
|
0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7,
|
||||||
@@ -107,67 +107,66 @@ Dump:
|
|||||||
0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89,
|
0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89,
|
||||||
0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b,
|
0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b,
|
||||||
0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a,
|
0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a,
|
||||||
|
|
||||||
ref10:
|
ref10:
|
||||||
pk:
|
pk:
|
||||||
0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04,
|
0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04,
|
||||||
0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04,
|
0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04,
|
||||||
|
|
||||||
sig:
|
sig:
|
||||||
0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96,
|
0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96,
|
||||||
0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5,
|
0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5,
|
||||||
0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f,
|
0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f,
|
||||||
0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02,
|
0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02,
|
||||||
|
|
||||||
valid: yes
|
valid: yes
|
||||||
|
|
||||||
ed25519-donna:
|
ed25519-donna:
|
||||||
pk diff:
|
pk diff:
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
|
|
||||||
sig diff:
|
sig diff:
|
||||||
0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5,
|
0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5,
|
||||||
0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab,
|
0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab,
|
||||||
0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97,
|
0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97,
|
||||||
0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08,
|
0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08,
|
||||||
|
|
||||||
valid: no
|
valid: no
|
||||||
|
|
||||||
In this case, the generated public key matches, but the generated signature is completely
|
In this case, the generated public key matches, but the generated signature is completely
|
||||||
different and does not validate.
|
different and does not validate.
|
||||||
|
|
||||||
### Curve25519
|
### Curve25519
|
||||||
|
|
||||||
Random data:
|
Random data:
|
||||||
|
|
||||||
* sk, or Secret Key
|
- sk, or Secret Key
|
||||||
|
|
||||||
Generated data:
|
Generated data:
|
||||||
|
|
||||||
* pk, or Public Key
|
- pk, or Public Key
|
||||||
|
|
||||||
Dump:
|
Dump:
|
||||||
|
|
||||||
sk:
|
sk:
|
||||||
0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e,
|
0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e,
|
||||||
0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70,
|
0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70,
|
||||||
|
|
||||||
|
|
||||||
ref10:
|
ref10:
|
||||||
0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95,
|
0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95,
|
||||||
0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62,
|
0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62,
|
||||||
|
|
||||||
|
|
||||||
curved25519 diff:
|
curved25519 diff:
|
||||||
0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43,
|
0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43,
|
||||||
0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35,
|
0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35,
|
||||||
|
|
||||||
|
|
||||||
curved25519-sse2 diff:
|
curved25519-sse2 diff:
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
|
|
||||||
|
In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference
|
||||||
In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference
|
implementation.
|
||||||
implementation.
|
|
||||||
|
|||||||
10
external/nudb/conandata.yml
vendored
10
external/nudb/conandata.yml
vendored
@@ -1,10 +0,0 @@
|
|||||||
sources:
|
|
||||||
"2.0.8":
|
|
||||||
url: "https://github.com/CPPAlliance/NuDB/archive/2.0.8.tar.gz"
|
|
||||||
sha256: "9b71903d8ba111cd893ab064b9a8b6ac4124ed8bd6b4f67250205bc43c7f13a8"
|
|
||||||
patches:
|
|
||||||
"2.0.8":
|
|
||||||
- patch_file: "patches/2.0.8-0001-add-include-stdexcept-for-msvc.patch"
|
|
||||||
patch_description: "Fix build for MSVC by including stdexcept"
|
|
||||||
patch_type: "portability"
|
|
||||||
patch_source: "https://github.com/cppalliance/NuDB/pull/100/files"
|
|
||||||
72
external/nudb/conanfile.py
vendored
72
external/nudb/conanfile.py
vendored
@@ -1,72 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from conan import ConanFile
|
|
||||||
from conan.tools.build import check_min_cppstd
|
|
||||||
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get
|
|
||||||
from conan.tools.layout import basic_layout
|
|
||||||
|
|
||||||
required_conan_version = ">=1.52.0"
|
|
||||||
|
|
||||||
|
|
||||||
class NudbConan(ConanFile):
|
|
||||||
name = "nudb"
|
|
||||||
description = "A fast key/value insert-only database for SSD drives in C++11"
|
|
||||||
license = "BSL-1.0"
|
|
||||||
url = "https://github.com/conan-io/conan-center-index"
|
|
||||||
homepage = "https://github.com/CPPAlliance/NuDB"
|
|
||||||
topics = ("header-only", "KVS", "insert-only")
|
|
||||||
|
|
||||||
package_type = "header-library"
|
|
||||||
settings = "os", "arch", "compiler", "build_type"
|
|
||||||
no_copy_source = True
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _min_cppstd(self):
|
|
||||||
return 11
|
|
||||||
|
|
||||||
def export_sources(self):
|
|
||||||
export_conandata_patches(self)
|
|
||||||
|
|
||||||
def layout(self):
|
|
||||||
basic_layout(self, src_folder="src")
|
|
||||||
|
|
||||||
def requirements(self):
|
|
||||||
self.requires("boost/1.83.0")
|
|
||||||
|
|
||||||
def package_id(self):
|
|
||||||
self.info.clear()
|
|
||||||
|
|
||||||
def validate(self):
|
|
||||||
if self.settings.compiler.cppstd:
|
|
||||||
check_min_cppstd(self, self._min_cppstd)
|
|
||||||
|
|
||||||
def source(self):
|
|
||||||
get(self, **self.conan_data["sources"][self.version], strip_root=True)
|
|
||||||
|
|
||||||
def build(self):
|
|
||||||
apply_conandata_patches(self)
|
|
||||||
|
|
||||||
def package(self):
|
|
||||||
copy(self, "LICENSE*",
|
|
||||||
dst=os.path.join(self.package_folder, "licenses"),
|
|
||||||
src=self.source_folder)
|
|
||||||
copy(self, "*",
|
|
||||||
dst=os.path.join(self.package_folder, "include"),
|
|
||||||
src=os.path.join(self.source_folder, "include"))
|
|
||||||
|
|
||||||
def package_info(self):
|
|
||||||
self.cpp_info.bindirs = []
|
|
||||||
self.cpp_info.libdirs = []
|
|
||||||
|
|
||||||
self.cpp_info.set_property("cmake_target_name", "NuDB")
|
|
||||||
self.cpp_info.set_property("cmake_target_aliases", ["NuDB::nudb"])
|
|
||||||
self.cpp_info.set_property("cmake_find_mode", "both")
|
|
||||||
|
|
||||||
self.cpp_info.components["core"].set_property("cmake_target_name", "nudb")
|
|
||||||
self.cpp_info.components["core"].names["cmake_find_package"] = "nudb"
|
|
||||||
self.cpp_info.components["core"].names["cmake_find_package_multi"] = "nudb"
|
|
||||||
self.cpp_info.components["core"].requires = ["boost::thread", "boost::system"]
|
|
||||||
|
|
||||||
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
|
|
||||||
self.cpp_info.names["cmake_find_package"] = "NuDB"
|
|
||||||
self.cpp_info.names["cmake_find_package_multi"] = "NuDB"
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
diff --git a/include/nudb/detail/stream.hpp b/include/nudb/detail/stream.hpp
|
|
||||||
index 6c07bf1..e0ce8ed 100644
|
|
||||||
--- a/include/nudb/detail/stream.hpp
|
|
||||||
+++ b/include/nudb/detail/stream.hpp
|
|
||||||
@@ -14,6 +14,7 @@
|
|
||||||
#include <cstdint>
|
|
||||||
#include <cstring>
|
|
||||||
#include <memory>
|
|
||||||
+#include <stdexcept>
|
|
||||||
|
|
||||||
namespace nudb {
|
|
||||||
namespace detail {
|
|
||||||
diff --git a/include/nudb/impl/context.ipp b/include/nudb/impl/context.ipp
|
|
||||||
index beb7058..ffde0b3 100644
|
|
||||||
--- a/include/nudb/impl/context.ipp
|
|
||||||
+++ b/include/nudb/impl/context.ipp
|
|
||||||
@@ -9,6 +9,7 @@
|
|
||||||
#define NUDB_IMPL_CONTEXT_IPP
|
|
||||||
|
|
||||||
#include <nudb/detail/store_base.hpp>
|
|
||||||
+#include <stdexcept>
|
|
||||||
|
|
||||||
namespace nudb {
|
|
||||||
|
|
||||||
12
external/rocksdb/conandata.yml
vendored
12
external/rocksdb/conandata.yml
vendored
@@ -1,12 +0,0 @@
|
|||||||
sources:
|
|
||||||
"9.7.3":
|
|
||||||
url: "https://github.com/facebook/rocksdb/archive/refs/tags/v9.7.3.tar.gz"
|
|
||||||
sha256: "acfabb989cbfb5b5c4d23214819b059638193ec33dad2d88373c46448d16d38b"
|
|
||||||
patches:
|
|
||||||
"9.7.3":
|
|
||||||
- patch_file: "patches/9.x.x-0001-exclude-thirdparty.patch"
|
|
||||||
patch_description: "Do not include thirdparty.inc"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/9.7.3-0001-memory-leak.patch"
|
|
||||||
patch_description: "Fix a leak of obsolete blob files left open until DB::Close()"
|
|
||||||
patch_type: "portability"
|
|
||||||
235
external/rocksdb/conanfile.py
vendored
235
external/rocksdb/conanfile.py
vendored
@@ -1,235 +0,0 @@
|
|||||||
import os
|
|
||||||
import glob
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
from conan import ConanFile
|
|
||||||
from conan.errors import ConanInvalidConfiguration
|
|
||||||
from conan.tools.build import check_min_cppstd
|
|
||||||
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
|
|
||||||
from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, rm, rmdir
|
|
||||||
from conan.tools.microsoft import check_min_vs, is_msvc, is_msvc_static_runtime
|
|
||||||
from conan.tools.scm import Version
|
|
||||||
|
|
||||||
required_conan_version = ">=1.53.0"
|
|
||||||
|
|
||||||
|
|
||||||
class RocksDBConan(ConanFile):
|
|
||||||
name = "rocksdb"
|
|
||||||
description = "A library that provides an embeddable, persistent key-value store for fast storage"
|
|
||||||
license = ("GPL-2.0-only", "Apache-2.0")
|
|
||||||
url = "https://github.com/conan-io/conan-center-index"
|
|
||||||
homepage = "https://github.com/facebook/rocksdb"
|
|
||||||
topics = ("database", "leveldb", "facebook", "key-value")
|
|
||||||
package_type = "library"
|
|
||||||
settings = "os", "arch", "compiler", "build_type"
|
|
||||||
options = {
|
|
||||||
"shared": [True, False],
|
|
||||||
"fPIC": [True, False],
|
|
||||||
"lite": [True, False],
|
|
||||||
"with_gflags": [True, False],
|
|
||||||
"with_snappy": [True, False],
|
|
||||||
"with_lz4": [True, False],
|
|
||||||
"with_zlib": [True, False],
|
|
||||||
"with_zstd": [True, False],
|
|
||||||
"with_tbb": [True, False],
|
|
||||||
"with_jemalloc": [True, False],
|
|
||||||
"enable_sse": [False, "sse42", "avx2"],
|
|
||||||
"use_rtti": [True, False],
|
|
||||||
}
|
|
||||||
default_options = {
|
|
||||||
"shared": False,
|
|
||||||
"fPIC": True,
|
|
||||||
"lite": False,
|
|
||||||
"with_snappy": False,
|
|
||||||
"with_lz4": False,
|
|
||||||
"with_zlib": False,
|
|
||||||
"with_zstd": False,
|
|
||||||
"with_gflags": False,
|
|
||||||
"with_tbb": False,
|
|
||||||
"with_jemalloc": False,
|
|
||||||
"enable_sse": False,
|
|
||||||
"use_rtti": False,
|
|
||||||
}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _min_cppstd(self):
|
|
||||||
return "11" if Version(self.version) < "8.8.1" else "17"
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _compilers_minimum_version(self):
|
|
||||||
return {} if self._min_cppstd == "11" else {
|
|
||||||
"apple-clang": "10",
|
|
||||||
"clang": "7",
|
|
||||||
"gcc": "7",
|
|
||||||
"msvc": "191",
|
|
||||||
"Visual Studio": "15",
|
|
||||||
}
|
|
||||||
|
|
||||||
def export_sources(self):
|
|
||||||
export_conandata_patches(self)
|
|
||||||
|
|
||||||
def config_options(self):
|
|
||||||
if self.settings.os == "Windows":
|
|
||||||
del self.options.fPIC
|
|
||||||
if self.settings.arch != "x86_64":
|
|
||||||
del self.options.with_tbb
|
|
||||||
if self.settings.build_type == "Debug":
|
|
||||||
self.options.use_rtti = True # Rtti are used in asserts for debug mode...
|
|
||||||
|
|
||||||
def configure(self):
|
|
||||||
if self.options.shared:
|
|
||||||
self.options.rm_safe("fPIC")
|
|
||||||
|
|
||||||
def layout(self):
|
|
||||||
cmake_layout(self, src_folder="src")
|
|
||||||
|
|
||||||
def requirements(self):
|
|
||||||
if self.options.with_gflags:
|
|
||||||
self.requires("gflags/2.2.2")
|
|
||||||
if self.options.with_snappy:
|
|
||||||
self.requires("snappy/1.1.10")
|
|
||||||
if self.options.with_lz4:
|
|
||||||
self.requires("lz4/1.10.0")
|
|
||||||
if self.options.with_zlib:
|
|
||||||
self.requires("zlib/[>=1.2.11 <2]")
|
|
||||||
if self.options.with_zstd:
|
|
||||||
self.requires("zstd/1.5.6")
|
|
||||||
if self.options.get_safe("with_tbb"):
|
|
||||||
self.requires("onetbb/2021.12.0")
|
|
||||||
if self.options.with_jemalloc:
|
|
||||||
self.requires("jemalloc/5.3.0")
|
|
||||||
|
|
||||||
def validate(self):
|
|
||||||
if self.settings.compiler.get_safe("cppstd"):
|
|
||||||
check_min_cppstd(self, self._min_cppstd)
|
|
||||||
|
|
||||||
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
|
|
||||||
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
|
|
||||||
raise ConanInvalidConfiguration(
|
|
||||||
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.settings.arch not in ["x86_64", "ppc64le", "ppc64", "mips64", "armv8"]:
|
|
||||||
raise ConanInvalidConfiguration("Rocksdb requires 64 bits")
|
|
||||||
|
|
||||||
check_min_vs(self, "191")
|
|
||||||
|
|
||||||
if self.version == "6.20.3" and \
|
|
||||||
self.settings.os == "Linux" and \
|
|
||||||
self.settings.compiler == "gcc" and \
|
|
||||||
Version(self.settings.compiler.version) < "5":
|
|
||||||
raise ConanInvalidConfiguration("Rocksdb 6.20.3 is not compilable with gcc <5.") # See https://github.com/facebook/rocksdb/issues/3522
|
|
||||||
|
|
||||||
def source(self):
|
|
||||||
get(self, **self.conan_data["sources"][self.version], strip_root=True)
|
|
||||||
|
|
||||||
def generate(self):
|
|
||||||
tc = CMakeToolchain(self)
|
|
||||||
tc.variables["FAIL_ON_WARNINGS"] = False
|
|
||||||
tc.variables["WITH_TESTS"] = False
|
|
||||||
tc.variables["WITH_TOOLS"] = False
|
|
||||||
tc.variables["WITH_CORE_TOOLS"] = False
|
|
||||||
tc.variables["WITH_BENCHMARK_TOOLS"] = False
|
|
||||||
tc.variables["WITH_FOLLY_DISTRIBUTED_MUTEX"] = False
|
|
||||||
if is_msvc(self):
|
|
||||||
tc.variables["WITH_MD_LIBRARY"] = not is_msvc_static_runtime(self)
|
|
||||||
tc.variables["ROCKSDB_INSTALL_ON_WINDOWS"] = self.settings.os == "Windows"
|
|
||||||
tc.variables["ROCKSDB_LITE"] = self.options.lite
|
|
||||||
tc.variables["WITH_GFLAGS"] = self.options.with_gflags
|
|
||||||
tc.variables["WITH_SNAPPY"] = self.options.with_snappy
|
|
||||||
tc.variables["WITH_LZ4"] = self.options.with_lz4
|
|
||||||
tc.variables["WITH_ZLIB"] = self.options.with_zlib
|
|
||||||
tc.variables["WITH_ZSTD"] = self.options.with_zstd
|
|
||||||
tc.variables["WITH_TBB"] = self.options.get_safe("with_tbb", False)
|
|
||||||
tc.variables["WITH_JEMALLOC"] = self.options.with_jemalloc
|
|
||||||
tc.variables["ROCKSDB_BUILD_SHARED"] = self.options.shared
|
|
||||||
tc.variables["ROCKSDB_LIBRARY_EXPORTS"] = self.settings.os == "Windows" and self.options.shared
|
|
||||||
tc.variables["ROCKSDB_DLL" ] = self.settings.os == "Windows" and self.options.shared
|
|
||||||
tc.variables["USE_RTTI"] = self.options.use_rtti
|
|
||||||
if not bool(self.options.enable_sse):
|
|
||||||
tc.variables["PORTABLE"] = True
|
|
||||||
tc.variables["FORCE_SSE42"] = False
|
|
||||||
elif self.options.enable_sse == "sse42":
|
|
||||||
tc.variables["PORTABLE"] = True
|
|
||||||
tc.variables["FORCE_SSE42"] = True
|
|
||||||
elif self.options.enable_sse == "avx2":
|
|
||||||
tc.variables["PORTABLE"] = False
|
|
||||||
tc.variables["FORCE_SSE42"] = False
|
|
||||||
# not available yet in CCI
|
|
||||||
tc.variables["WITH_NUMA"] = False
|
|
||||||
tc.generate()
|
|
||||||
|
|
||||||
deps = CMakeDeps(self)
|
|
||||||
if self.options.with_jemalloc:
|
|
||||||
deps.set_property("jemalloc", "cmake_file_name", "JeMalloc")
|
|
||||||
deps.set_property("jemalloc", "cmake_target_name", "JeMalloc::JeMalloc")
|
|
||||||
if self.options.with_zstd:
|
|
||||||
deps.set_property("zstd", "cmake_target_name", "zstd::zstd")
|
|
||||||
deps.generate()
|
|
||||||
|
|
||||||
def build(self):
|
|
||||||
apply_conandata_patches(self)
|
|
||||||
cmake = CMake(self)
|
|
||||||
cmake.configure()
|
|
||||||
cmake.build()
|
|
||||||
|
|
||||||
def _remove_static_libraries(self):
|
|
||||||
rm(self, "rocksdb.lib", os.path.join(self.package_folder, "lib"))
|
|
||||||
for lib in glob.glob(os.path.join(self.package_folder, "lib", "*.a")):
|
|
||||||
if not lib.endswith(".dll.a"):
|
|
||||||
os.remove(lib)
|
|
||||||
|
|
||||||
def _remove_cpp_headers(self):
|
|
||||||
for path in glob.glob(os.path.join(self.package_folder, "include", "rocksdb", "*")):
|
|
||||||
if path != os.path.join(self.package_folder, "include", "rocksdb", "c.h"):
|
|
||||||
if os.path.isfile(path):
|
|
||||||
os.remove(path)
|
|
||||||
else:
|
|
||||||
shutil.rmtree(path)
|
|
||||||
|
|
||||||
def package(self):
|
|
||||||
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
|
|
||||||
copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
|
|
||||||
cmake = CMake(self)
|
|
||||||
cmake.install()
|
|
||||||
if self.options.shared:
|
|
||||||
self._remove_static_libraries()
|
|
||||||
self._remove_cpp_headers() # Force stable ABI for shared libraries
|
|
||||||
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
|
|
||||||
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
|
|
||||||
|
|
||||||
def package_info(self):
|
|
||||||
cmake_target = "rocksdb-shared" if self.options.shared else "rocksdb"
|
|
||||||
self.cpp_info.set_property("cmake_file_name", "RocksDB")
|
|
||||||
self.cpp_info.set_property("cmake_target_name", f"RocksDB::{cmake_target}")
|
|
||||||
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
|
|
||||||
self.cpp_info.components["librocksdb"].libs = collect_libs(self)
|
|
||||||
if self.settings.os == "Windows":
|
|
||||||
self.cpp_info.components["librocksdb"].system_libs = ["shlwapi", "rpcrt4"]
|
|
||||||
if self.options.shared:
|
|
||||||
self.cpp_info.components["librocksdb"].defines = ["ROCKSDB_DLL"]
|
|
||||||
elif self.settings.os in ["Linux", "FreeBSD"]:
|
|
||||||
self.cpp_info.components["librocksdb"].system_libs = ["pthread", "m"]
|
|
||||||
if self.options.lite:
|
|
||||||
self.cpp_info.components["librocksdb"].defines.append("ROCKSDB_LITE")
|
|
||||||
|
|
||||||
# TODO: to remove in conan v2 once cmake_find_package* generators removed
|
|
||||||
self.cpp_info.names["cmake_find_package"] = "RocksDB"
|
|
||||||
self.cpp_info.names["cmake_find_package_multi"] = "RocksDB"
|
|
||||||
self.cpp_info.components["librocksdb"].names["cmake_find_package"] = cmake_target
|
|
||||||
self.cpp_info.components["librocksdb"].names["cmake_find_package_multi"] = cmake_target
|
|
||||||
self.cpp_info.components["librocksdb"].set_property("cmake_target_name", f"RocksDB::{cmake_target}")
|
|
||||||
if self.options.with_gflags:
|
|
||||||
self.cpp_info.components["librocksdb"].requires.append("gflags::gflags")
|
|
||||||
if self.options.with_snappy:
|
|
||||||
self.cpp_info.components["librocksdb"].requires.append("snappy::snappy")
|
|
||||||
if self.options.with_lz4:
|
|
||||||
self.cpp_info.components["librocksdb"].requires.append("lz4::lz4")
|
|
||||||
if self.options.with_zlib:
|
|
||||||
self.cpp_info.components["librocksdb"].requires.append("zlib::zlib")
|
|
||||||
if self.options.with_zstd:
|
|
||||||
self.cpp_info.components["librocksdb"].requires.append("zstd::zstd")
|
|
||||||
if self.options.get_safe("with_tbb"):
|
|
||||||
self.cpp_info.components["librocksdb"].requires.append("onetbb::onetbb")
|
|
||||||
if self.options.with_jemalloc:
|
|
||||||
self.cpp_info.components["librocksdb"].requires.append("jemalloc::jemalloc")
|
|
||||||
@@ -1,319 +0,0 @@
|
|||||||
diff --git a/HISTORY.md b/HISTORY.md
|
|
||||||
index 36d472229..05ad1a202 100644
|
|
||||||
--- a/HISTORY.md
|
|
||||||
+++ b/HISTORY.md
|
|
||||||
@@ -1,6 +1,10 @@
|
|
||||||
# Rocksdb Change Log
|
|
||||||
> NOTE: Entries for next release do not go here. Follow instructions in `unreleased_history/README.txt`
|
|
||||||
|
|
||||||
+## 9.7.4 (10/31/2024)
|
|
||||||
+### Bug Fixes
|
|
||||||
+* Fix a leak of obsolete blob files left open until DB::Close(). This bug was introduced in version 9.4.0.
|
|
||||||
+
|
|
||||||
## 9.7.3 (10/16/2024)
|
|
||||||
### Behavior Changes
|
|
||||||
* OPTIONS file to be loaded by remote worker is now preserved so that it does not get purged by the primary host. A similar technique as how we are preserving new SST files from getting purged is used for this. min_options_file_numbers_ is tracked like pending_outputs_ is tracked.
|
|
||||||
diff --git a/db/blob/blob_file_cache.cc b/db/blob/blob_file_cache.cc
|
|
||||||
index 5f340aadf..1b9faa238 100644
|
|
||||||
--- a/db/blob/blob_file_cache.cc
|
|
||||||
+++ b/db/blob/blob_file_cache.cc
|
|
||||||
@@ -42,6 +42,7 @@ Status BlobFileCache::GetBlobFileReader(
|
|
||||||
assert(blob_file_reader);
|
|
||||||
assert(blob_file_reader->IsEmpty());
|
|
||||||
|
|
||||||
+ // NOTE: sharing same Cache with table_cache
|
|
||||||
const Slice key = GetSliceForKey(&blob_file_number);
|
|
||||||
|
|
||||||
assert(cache_);
|
|
||||||
@@ -98,4 +99,13 @@ Status BlobFileCache::GetBlobFileReader(
|
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
|
|
||||||
+void BlobFileCache::Evict(uint64_t blob_file_number) {
|
|
||||||
+ // NOTE: sharing same Cache with table_cache
|
|
||||||
+ const Slice key = GetSliceForKey(&blob_file_number);
|
|
||||||
+
|
|
||||||
+ assert(cache_);
|
|
||||||
+
|
|
||||||
+ cache_.get()->Erase(key);
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
diff --git a/db/blob/blob_file_cache.h b/db/blob/blob_file_cache.h
|
|
||||||
index 740e67ada..6858d012b 100644
|
|
||||||
--- a/db/blob/blob_file_cache.h
|
|
||||||
+++ b/db/blob/blob_file_cache.h
|
|
||||||
@@ -36,6 +36,15 @@ class BlobFileCache {
|
|
||||||
uint64_t blob_file_number,
|
|
||||||
CacheHandleGuard<BlobFileReader>* blob_file_reader);
|
|
||||||
|
|
||||||
+ // Called when a blob file is obsolete to ensure it is removed from the cache
|
|
||||||
+ // to avoid effectively leaking the open file and assicated memory
|
|
||||||
+ void Evict(uint64_t blob_file_number);
|
|
||||||
+
|
|
||||||
+ // Used to identify cache entries for blob files (not normally useful)
|
|
||||||
+ static const Cache::CacheItemHelper* GetHelper() {
|
|
||||||
+ return CacheInterface::GetBasicHelper();
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
private:
|
|
||||||
using CacheInterface =
|
|
||||||
BasicTypedCacheInterface<BlobFileReader, CacheEntryRole::kMisc>;
|
|
||||||
diff --git a/db/column_family.h b/db/column_family.h
|
|
||||||
index e4b7adde8..86637736a 100644
|
|
||||||
--- a/db/column_family.h
|
|
||||||
+++ b/db/column_family.h
|
|
||||||
@@ -401,6 +401,7 @@ class ColumnFamilyData {
|
|
||||||
SequenceNumber earliest_seq);
|
|
||||||
|
|
||||||
TableCache* table_cache() const { return table_cache_.get(); }
|
|
||||||
+ BlobFileCache* blob_file_cache() const { return blob_file_cache_.get(); }
|
|
||||||
BlobSource* blob_source() const { return blob_source_.get(); }
|
|
||||||
|
|
||||||
// See documentation in compaction_picker.h
|
|
||||||
diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc
|
|
||||||
index 261593423..06573ac2e 100644
|
|
||||||
--- a/db/db_impl/db_impl.cc
|
|
||||||
+++ b/db/db_impl/db_impl.cc
|
|
||||||
@@ -659,8 +659,9 @@ Status DBImpl::CloseHelper() {
|
|
||||||
// We need to release them before the block cache is destroyed. The block
|
|
||||||
// cache may be destroyed inside versions_.reset(), when column family data
|
|
||||||
// list is destroyed, so leaving handles in table cache after
|
|
||||||
- // versions_.reset() may cause issues.
|
|
||||||
- // Here we clean all unreferenced handles in table cache.
|
|
||||||
+ // versions_.reset() may cause issues. Here we clean all unreferenced handles
|
|
||||||
+ // in table cache, and (for certain builds/conditions) assert that no obsolete
|
|
||||||
+ // files are hanging around unreferenced (leak) in the table/blob file cache.
|
|
||||||
// Now we assume all user queries have finished, so only version set itself
|
|
||||||
// can possibly hold the blocks from block cache. After releasing unreferenced
|
|
||||||
// handles here, only handles held by version set left and inside
|
|
||||||
@@ -668,6 +669,9 @@ Status DBImpl::CloseHelper() {
|
|
||||||
// time a handle is released, we erase it from the cache too. By doing that,
|
|
||||||
// we can guarantee that after versions_.reset(), table cache is empty
|
|
||||||
// so the cache can be safely destroyed.
|
|
||||||
+#ifndef NDEBUG
|
|
||||||
+ TEST_VerifyNoObsoleteFilesCached(/*db_mutex_already_held=*/true);
|
|
||||||
+#endif // !NDEBUG
|
|
||||||
table_cache_->EraseUnRefEntries();
|
|
||||||
|
|
||||||
for (auto& txn_entry : recovered_transactions_) {
|
|
||||||
@@ -3227,6 +3231,8 @@ Status DBImpl::MultiGetImpl(
|
|
||||||
s = Status::Aborted();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
+ // This could be a long-running operation
|
|
||||||
+ ROCKSDB_THREAD_YIELD_HOOK();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Post processing (decrement reference counts and record statistics)
|
|
||||||
diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h
|
|
||||||
index 5e4fa310b..ccc0abfa7 100644
|
|
||||||
--- a/db/db_impl/db_impl.h
|
|
||||||
+++ b/db/db_impl/db_impl.h
|
|
||||||
@@ -1241,9 +1241,14 @@ class DBImpl : public DB {
|
|
||||||
static Status TEST_ValidateOptions(const DBOptions& db_options) {
|
|
||||||
return ValidateOptions(db_options);
|
|
||||||
}
|
|
||||||
-
|
|
||||||
#endif // NDEBUG
|
|
||||||
|
|
||||||
+ // In certain configurations, verify that the table/blob file cache only
|
|
||||||
+ // contains entries for live files, to check for effective leaks of open
|
|
||||||
+ // files. This can only be called when purging of obsolete files has
|
|
||||||
+ // "settled," such as during parts of DB Close().
|
|
||||||
+ void TEST_VerifyNoObsoleteFilesCached(bool db_mutex_already_held) const;
|
|
||||||
+
|
|
||||||
// persist stats to column family "_persistent_stats"
|
|
||||||
void PersistStats();
|
|
||||||
|
|
||||||
diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc
|
|
||||||
index 790a50d7a..67f5b4aaf 100644
|
|
||||||
--- a/db/db_impl/db_impl_debug.cc
|
|
||||||
+++ b/db/db_impl/db_impl_debug.cc
|
|
||||||
@@ -9,6 +9,7 @@
|
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
|
|
||||||
+#include "db/blob/blob_file_cache.h"
|
|
||||||
#include "db/column_family.h"
|
|
||||||
#include "db/db_impl/db_impl.h"
|
|
||||||
#include "db/error_handler.h"
|
|
||||||
@@ -328,5 +329,49 @@ size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const {
|
|
||||||
InstrumentedMutexLock l(&const_cast<DBImpl*>(this)->stats_history_mutex_);
|
|
||||||
return EstimateInMemoryStatsHistorySize();
|
|
||||||
}
|
|
||||||
+
|
|
||||||
+void DBImpl::TEST_VerifyNoObsoleteFilesCached(
|
|
||||||
+ bool db_mutex_already_held) const {
|
|
||||||
+ // This check is somewhat expensive and obscure to make a part of every
|
|
||||||
+ // unit test in every build variety. Thus, we only enable it for ASAN builds.
|
|
||||||
+ if (!kMustFreeHeapAllocations) {
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ std::optional<InstrumentedMutexLock> l;
|
|
||||||
+ if (db_mutex_already_held) {
|
|
||||||
+ mutex_.AssertHeld();
|
|
||||||
+ } else {
|
|
||||||
+ l.emplace(&mutex_);
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ std::vector<uint64_t> live_files;
|
|
||||||
+ for (auto cfd : *versions_->GetColumnFamilySet()) {
|
|
||||||
+ if (cfd->IsDropped()) {
|
|
||||||
+ continue;
|
|
||||||
+ }
|
|
||||||
+ // Sneakily add both SST and blob files to the same list
|
|
||||||
+ cfd->current()->AddLiveFiles(&live_files, &live_files);
|
|
||||||
+ }
|
|
||||||
+ std::sort(live_files.begin(), live_files.end());
|
|
||||||
+
|
|
||||||
+ auto fn = [&live_files](const Slice& key, Cache::ObjectPtr, size_t,
|
|
||||||
+ const Cache::CacheItemHelper* helper) {
|
|
||||||
+ if (helper != BlobFileCache::GetHelper()) {
|
|
||||||
+ // Skip non-blob files for now
|
|
||||||
+ // FIXME: diagnose and fix the leaks of obsolete SST files revealed in
|
|
||||||
+ // unit tests.
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+ // See TableCache and BlobFileCache
|
|
||||||
+ assert(key.size() == sizeof(uint64_t));
|
|
||||||
+ uint64_t file_number;
|
|
||||||
+ GetUnaligned(reinterpret_cast<const uint64_t*>(key.data()), &file_number);
|
|
||||||
+ // Assert file is in sorted live_files
|
|
||||||
+ assert(
|
|
||||||
+ std::binary_search(live_files.begin(), live_files.end(), file_number));
|
|
||||||
+ };
|
|
||||||
+ table_cache_->ApplyToAllEntries(fn, {});
|
|
||||||
+}
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
|
||||||
#endif // NDEBUG
|
|
||||||
diff --git a/db/db_iter.cc b/db/db_iter.cc
|
|
||||||
index e02586377..bf4749eb9 100644
|
|
||||||
--- a/db/db_iter.cc
|
|
||||||
+++ b/db/db_iter.cc
|
|
||||||
@@ -540,6 +540,8 @@ bool DBIter::FindNextUserEntryInternal(bool skipping_saved_key,
|
|
||||||
} else {
|
|
||||||
iter_.Next();
|
|
||||||
}
|
|
||||||
+ // This could be a long-running operation due to tombstones, etc.
|
|
||||||
+ ROCKSDB_THREAD_YIELD_HOOK();
|
|
||||||
} while (iter_.Valid());
|
|
||||||
|
|
||||||
valid_ = false;
|
|
||||||
diff --git a/db/table_cache.cc b/db/table_cache.cc
|
|
||||||
index 71fc29c32..8a5be75e8 100644
|
|
||||||
--- a/db/table_cache.cc
|
|
||||||
+++ b/db/table_cache.cc
|
|
||||||
@@ -164,6 +164,7 @@ Status TableCache::GetTableReader(
|
|
||||||
}
|
|
||||||
|
|
||||||
Cache::Handle* TableCache::Lookup(Cache* cache, uint64_t file_number) {
|
|
||||||
+ // NOTE: sharing same Cache with BlobFileCache
|
|
||||||
Slice key = GetSliceForFileNumber(&file_number);
|
|
||||||
return cache->Lookup(key);
|
|
||||||
}
|
|
||||||
@@ -179,6 +180,7 @@ Status TableCache::FindTable(
|
|
||||||
size_t max_file_size_for_l0_meta_pin, Temperature file_temperature) {
|
|
||||||
PERF_TIMER_GUARD_WITH_CLOCK(find_table_nanos, ioptions_.clock);
|
|
||||||
uint64_t number = file_meta.fd.GetNumber();
|
|
||||||
+ // NOTE: sharing same Cache with BlobFileCache
|
|
||||||
Slice key = GetSliceForFileNumber(&number);
|
|
||||||
*handle = cache_.Lookup(key);
|
|
||||||
TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0",
|
|
||||||
diff --git a/db/version_builder.cc b/db/version_builder.cc
|
|
||||||
index ed8ab8214..c98f53f42 100644
|
|
||||||
--- a/db/version_builder.cc
|
|
||||||
+++ b/db/version_builder.cc
|
|
||||||
@@ -24,6 +24,7 @@
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "cache/cache_reservation_manager.h"
|
|
||||||
+#include "db/blob/blob_file_cache.h"
|
|
||||||
#include "db/blob/blob_file_meta.h"
|
|
||||||
#include "db/dbformat.h"
|
|
||||||
#include "db/internal_stats.h"
|
|
||||||
@@ -744,12 +745,9 @@ class VersionBuilder::Rep {
|
|
||||||
return Status::Corruption("VersionBuilder", oss.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
- // Note: we use C++11 for now but in C++14, this could be done in a more
|
|
||||||
- // elegant way using generalized lambda capture.
|
|
||||||
- VersionSet* const vs = version_set_;
|
|
||||||
- const ImmutableCFOptions* const ioptions = ioptions_;
|
|
||||||
-
|
|
||||||
- auto deleter = [vs, ioptions](SharedBlobFileMetaData* shared_meta) {
|
|
||||||
+ auto deleter = [vs = version_set_, ioptions = ioptions_,
|
|
||||||
+ bc = cfd_ ? cfd_->blob_file_cache()
|
|
||||||
+ : nullptr](SharedBlobFileMetaData* shared_meta) {
|
|
||||||
if (vs) {
|
|
||||||
assert(ioptions);
|
|
||||||
assert(!ioptions->cf_paths.empty());
|
|
||||||
@@ -758,6 +756,9 @@ class VersionBuilder::Rep {
|
|
||||||
vs->AddObsoleteBlobFile(shared_meta->GetBlobFileNumber(),
|
|
||||||
ioptions->cf_paths.front().path);
|
|
||||||
}
|
|
||||||
+ if (bc) {
|
|
||||||
+ bc->Evict(shared_meta->GetBlobFileNumber());
|
|
||||||
+ }
|
|
||||||
|
|
||||||
delete shared_meta;
|
|
||||||
};
|
|
||||||
@@ -766,7 +767,7 @@ class VersionBuilder::Rep {
|
|
||||||
blob_file_number, blob_file_addition.GetTotalBlobCount(),
|
|
||||||
blob_file_addition.GetTotalBlobBytes(),
|
|
||||||
blob_file_addition.GetChecksumMethod(),
|
|
||||||
- blob_file_addition.GetChecksumValue(), deleter);
|
|
||||||
+ blob_file_addition.GetChecksumValue(), std::move(deleter));
|
|
||||||
|
|
||||||
mutable_blob_file_metas_.emplace(
|
|
||||||
blob_file_number, MutableBlobFileMetaData(std::move(shared_meta)));
|
|
||||||
diff --git a/db/version_set.h b/db/version_set.h
|
|
||||||
index 9336782b1..024f869e7 100644
|
|
||||||
--- a/db/version_set.h
|
|
||||||
+++ b/db/version_set.h
|
|
||||||
@@ -1514,7 +1514,6 @@ class VersionSet {
|
|
||||||
void GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata);
|
|
||||||
|
|
||||||
void AddObsoleteBlobFile(uint64_t blob_file_number, std::string path) {
|
|
||||||
- // TODO: Erase file from BlobFileCache?
|
|
||||||
obsolete_blob_files_.emplace_back(blob_file_number, std::move(path));
|
|
||||||
}
|
|
||||||
|
|
||||||
diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h
|
|
||||||
index 2a19796b8..0afa2cab1 100644
|
|
||||||
--- a/include/rocksdb/version.h
|
|
||||||
+++ b/include/rocksdb/version.h
|
|
||||||
@@ -13,7 +13,7 @@
|
|
||||||
// minor or major version number planned for release.
|
|
||||||
#define ROCKSDB_MAJOR 9
|
|
||||||
#define ROCKSDB_MINOR 7
|
|
||||||
-#define ROCKSDB_PATCH 3
|
|
||||||
+#define ROCKSDB_PATCH 4
|
|
||||||
|
|
||||||
// Do not use these. We made the mistake of declaring macros starting with
|
|
||||||
// double underscore. Now we have to live with our choice. We'll deprecate these
|
|
||||||
diff --git a/port/port.h b/port/port.h
|
|
||||||
index 13aa56d47..141716e5b 100644
|
|
||||||
--- a/port/port.h
|
|
||||||
+++ b/port/port.h
|
|
||||||
@@ -19,3 +19,19 @@
|
|
||||||
#elif defined(OS_WIN)
|
|
||||||
#include "port/win/port_win.h"
|
|
||||||
#endif
|
|
||||||
+
|
|
||||||
+#ifdef OS_LINUX
|
|
||||||
+// A temporary hook into long-running RocksDB threads to support modifying their
|
|
||||||
+// priority etc. This should become a public API hook once the requirements
|
|
||||||
+// are better understood.
|
|
||||||
+extern "C" void RocksDbThreadYield() __attribute__((__weak__));
|
|
||||||
+#define ROCKSDB_THREAD_YIELD_HOOK() \
|
|
||||||
+ { \
|
|
||||||
+ if (RocksDbThreadYield) { \
|
|
||||||
+ RocksDbThreadYield(); \
|
|
||||||
+ } \
|
|
||||||
+ }
|
|
||||||
+#else
|
|
||||||
+#define ROCKSDB_THREAD_YIELD_HOOK() \
|
|
||||||
+ {}
|
|
||||||
+#endif
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
diff --git a/CMakeLists.txt b/CMakeLists.txt
|
|
||||||
index 93b884d..b715cb6 100644
|
|
||||||
--- a/CMakeLists.txt
|
|
||||||
+++ b/CMakeLists.txt
|
|
||||||
@@ -106,14 +106,9 @@ endif()
|
|
||||||
include(CMakeDependentOption)
|
|
||||||
|
|
||||||
if(MSVC)
|
|
||||||
- option(WITH_GFLAGS "build with GFlags" OFF)
|
|
||||||
option(WITH_XPRESS "build with windows built in compression" OFF)
|
|
||||||
- option(ROCKSDB_SKIP_THIRDPARTY "skip thirdparty.inc" OFF)
|
|
||||||
-
|
|
||||||
- if(NOT ROCKSDB_SKIP_THIRDPARTY)
|
|
||||||
- include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc)
|
|
||||||
- endif()
|
|
||||||
-else()
|
|
||||||
+endif()
|
|
||||||
+if(TRUE)
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
|
||||||
# FreeBSD has jemalloc as default malloc
|
|
||||||
# but it does not have all the jemalloc files in include/...
|
|
||||||
@@ -126,7 +121,7 @@ else()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
- if(MINGW)
|
|
||||||
+ if(MSVC OR MINGW)
|
|
||||||
option(WITH_GFLAGS "build with GFlags" OFF)
|
|
||||||
else()
|
|
||||||
option(WITH_GFLAGS "build with GFlags" ON)
|
|
||||||
144
external/secp256k1/CHANGELOG.md
vendored
144
external/secp256k1/CHANGELOG.md
vendored
@@ -8,153 +8,189 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
## [0.6.0] - 2024-11-04
|
## [0.6.0] - 2024-11-04
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See:
|
|
||||||
- Header file `include/secp256k1_musig.h` which defines the new API.
|
- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See:
|
||||||
- Document `doc/musig.md` for further notes on API usage.
|
- Header file `include/secp256k1_musig.h` which defines the new API.
|
||||||
- Usage example `examples/musig.c`.
|
- Document `doc/musig.md` for further notes on API usage.
|
||||||
- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command.
|
- Usage example `examples/musig.c`.
|
||||||
|
- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal.
|
|
||||||
- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++).
|
- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal.
|
||||||
- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility.
|
- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++).
|
||||||
|
- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility.
|
||||||
|
|
||||||
#### Removed
|
#### Removed
|
||||||
- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API.
|
|
||||||
|
- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The symbols `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` were removed.
|
The symbols `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` were removed.
|
||||||
Otherwise, the library maintains backward compatibility with versions 0.3.x through 0.5.x.
|
Otherwise, the library maintains backward compatibility with versions 0.3.x through 0.5.x.
|
||||||
|
|
||||||
## [0.5.1] - 2024-08-01
|
## [0.5.1] - 2024-08-01
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added usage example for an ElligatorSwift key exchange.
|
|
||||||
|
- Added usage example for an ElligatorSwift key exchange.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
|
||||||
- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly.
|
- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
||||||
|
- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly.
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed compilation when the extrakeys module is disabled.
|
|
||||||
|
- Fixed compilation when the extrakeys module is disabled.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is backward compatible with versions 0.5.0, 0.4.x and 0.3.x.
|
The ABI is backward compatible with versions 0.5.0, 0.4.x and 0.3.x.
|
||||||
|
|
||||||
## [0.5.0] - 2024-05-06
|
## [0.5.0] - 2024-05-06
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order.
|
|
||||||
|
- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations.
|
|
||||||
- The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations.
|
||||||
- This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB).
|
- The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
||||||
|
- This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB).
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is backward compatible with versions 0.4.x and 0.3.x.
|
The ABI is backward compatible with versions 0.4.x and 0.3.x.
|
||||||
|
|
||||||
## [0.4.1] - 2023-12-21
|
## [0.4.1] - 2023-12-21
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one.
|
|
||||||
- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`.
|
- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one.
|
||||||
|
- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is backward compatible with versions 0.4.0 and 0.3.x.
|
The ABI is backward compatible with versions 0.4.0 and 0.3.x.
|
||||||
|
|
||||||
## [0.4.0] - 2023-09-04
|
## [0.4.0] - 2023-09-04
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them.
|
|
||||||
ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See:
|
- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them.
|
||||||
- Header file `include/secp256k1_ellswift.h` which defines the new API.
|
ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See:
|
||||||
- Document `doc/ellswift.md` which explains the mathematical background of the scheme.
|
- Header file `include/secp256k1_ellswift.h` which defines the new API.
|
||||||
- The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based.
|
- Document `doc/ellswift.md` which explains the mathematical background of the scheme.
|
||||||
- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases).
|
- The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based.
|
||||||
|
- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases).
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported.
|
|
||||||
|
- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`.
|
|
||||||
|
- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug.
|
This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug.
|
||||||
|
|
||||||
## [0.3.2] - 2023-05-13
|
## [0.3.2] - 2023-05-13
|
||||||
|
|
||||||
We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`.
|
We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`.
|
||||||
|
|
||||||
#### Security
|
#### Security
|
||||||
- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1.
|
|
||||||
|
- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1.
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far.
|
|
||||||
|
- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Various improvements and changes to CMake builds. CMake builds remain experimental.
|
|
||||||
- Made API versioning consistent with GNU Autotools builds.
|
- Various improvements and changes to CMake builds. CMake builds remain experimental.
|
||||||
- Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library.
|
- Made API versioning consistent with GNU Autotools builds.
|
||||||
- Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts.
|
- Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library.
|
||||||
- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake).
|
- Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts.
|
||||||
|
- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake).
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is compatible with versions 0.3.0 and 0.3.1.
|
The ABI is compatible with versions 0.3.0 and 0.3.1.
|
||||||
|
|
||||||
## [0.3.1] - 2023-04-10
|
## [0.3.1] - 2023-04-10
|
||||||
|
|
||||||
We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`.
|
We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`.
|
||||||
|
|
||||||
#### Security
|
#### Security
|
||||||
- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14.
|
|
||||||
|
- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14.
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases.
|
|
||||||
|
- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Increased minimum required CMake version to 3.13. CMake builds remain experimental.
|
|
||||||
|
- Increased minimum required CMake version to 3.13. CMake builds remain experimental.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is compatible with version 0.3.0.
|
The ABI is compatible with version 0.3.0.
|
||||||
|
|
||||||
## [0.3.0] - 2023-03-08
|
## [0.3.0] - 2023-03-08
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported.
|
|
||||||
- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory.
|
- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported.
|
||||||
- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target.
|
- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory.
|
||||||
|
- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target.
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning.
|
|
||||||
|
- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.)
|
|
||||||
- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization.
|
- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.)
|
||||||
|
- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization.
|
||||||
|
|
||||||
#### Removed
|
#### Removed
|
||||||
- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags).
|
|
||||||
|
- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags).
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is *not* compatible with previous versions.
|
|
||||||
|
Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is _not_ compatible with previous versions.
|
||||||
|
|
||||||
## [0.2.0] - 2022-12-12
|
## [0.2.0] - 2022-12-12
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added usage examples for common use cases in a new `examples/` directory.
|
|
||||||
- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`.
|
- Added usage examples for common use cases in a new `examples/` directory.
|
||||||
- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms.
|
- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`.
|
||||||
|
- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`.
|
|
||||||
- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API.
|
- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`.
|
||||||
|
- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API.
|
||||||
|
|
||||||
#### Deprecated
|
#### Deprecated
|
||||||
- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead.
|
|
||||||
- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`.
|
- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead.
|
||||||
- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`.
|
- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`.
|
||||||
|
- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
Since this is the first release, we do not compare application binary interfaces.
|
Since this is the first release, we do not compare application binary interfaces.
|
||||||
However, there are earlier unreleased versions of libsecp256k1 that are *not* ABI compatible with this version.
|
However, there are earlier unreleased versions of libsecp256k1 that are _not_ ABI compatible with this version.
|
||||||
|
|
||||||
## [0.1.0] - 2013-03-05 to 2021-12-25
|
## [0.1.0] - 2013-03-05 to 2021-12-25
|
||||||
|
|
||||||
|
|||||||
6
external/secp256k1/CMakePresets.json
vendored
6
external/secp256k1/CMakePresets.json
vendored
@@ -1,5 +1,9 @@
|
|||||||
{
|
{
|
||||||
"cmakeMinimumRequired": {"major": 3, "minor": 21, "patch": 0},
|
"cmakeMinimumRequired": {
|
||||||
|
"major": 3,
|
||||||
|
"minor": 21,
|
||||||
|
"patch": 0
|
||||||
|
},
|
||||||
"version": 3,
|
"version": 3,
|
||||||
"configurePresets": [
|
"configurePresets": [
|
||||||
{
|
{
|
||||||
|
|||||||
74
external/secp256k1/CONTRIBUTING.md
vendored
74
external/secp256k1/CONTRIBUTING.md
vendored
@@ -12,15 +12,15 @@ The libsecp256k1 project welcomes contributions in the form of new functionality
|
|||||||
It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable.
|
It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable.
|
||||||
Contributors are recommended to provide the following in addition to the new code:
|
Contributors are recommended to provide the following in addition to the new code:
|
||||||
|
|
||||||
* **Specification:**
|
- **Specification:**
|
||||||
A specification can help significantly in reviewing the new code as it provides documentation and context.
|
A specification can help significantly in reviewing the new code as it provides documentation and context.
|
||||||
It may justify various design decisions, give a motivation and outline security goals.
|
It may justify various design decisions, give a motivation and outline security goals.
|
||||||
If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code.
|
If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code.
|
||||||
* **Security Arguments:**
|
- **Security Arguments:**
|
||||||
In addition to a defining the security goals, it should be argued that the new functionality meets these goals.
|
In addition to a defining the security goals, it should be argued that the new functionality meets these goals.
|
||||||
Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security.
|
Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security.
|
||||||
* **Relevance Arguments:**
|
- **Relevance Arguments:**
|
||||||
The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases.
|
The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases.
|
||||||
|
|
||||||
These are not the only factors taken into account when considering to add new functionality.
|
These are not the only factors taken into account when considering to add new functionality.
|
||||||
The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design.
|
The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design.
|
||||||
@@ -44,36 +44,36 @@ The Contributor Workflow & Peer Review in libsecp256k1 are similar to Bitcoin Co
|
|||||||
|
|
||||||
In addition, libsecp256k1 tries to maintain the following coding conventions:
|
In addition, libsecp256k1 tries to maintain the following coding conventions:
|
||||||
|
|
||||||
* No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations.
|
- No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations.
|
||||||
* The tests should cover all lines and branches of the library (see [Test coverage](#coverage)).
|
- The tests should cover all lines and branches of the library (see [Test coverage](#coverage)).
|
||||||
* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
|
- Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
|
||||||
* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
|
- Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
|
||||||
* Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
|
- Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
|
||||||
* As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)).
|
- As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)).
|
||||||
|
|
||||||
#### Style conventions
|
#### Style conventions
|
||||||
|
|
||||||
* Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures.
|
- Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures.
|
||||||
* New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting.
|
- New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting.
|
||||||
* The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block:
|
- The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block:
|
||||||
```C
|
```C
|
||||||
void secp256k_foo(void) {
|
void secp256k_foo(void) {
|
||||||
unsigned int x; /* declaration */
|
unsigned int x; /* declaration */
|
||||||
int y = 2*x; /* declaration */
|
int y = 2*x; /* declaration */
|
||||||
x = 17; /* statement */
|
x = 17; /* statement */
|
||||||
{
|
{
|
||||||
int a, b; /* declaration */
|
int a, b; /* declaration */
|
||||||
a = x + y; /* statement */
|
a = x + y; /* statement */
|
||||||
secp256k_bar(x, &b); /* statement */
|
secp256k_bar(x, &b); /* statement */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
* Use `unsigned int` instead of just `unsigned`.
|
- Use `unsigned int` instead of just `unsigned`.
|
||||||
* Use `void *ptr` instead of `void* ptr`.
|
- Use `void *ptr` instead of `void* ptr`.
|
||||||
* Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h).
|
- Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h).
|
||||||
* User-facing comment lines in headers should be limited to 80 chars if possible.
|
- User-facing comment lines in headers should be limited to 80 chars if possible.
|
||||||
* All identifiers in file scope should start with `secp256k1_`.
|
- All identifiers in file scope should start with `secp256k1_`.
|
||||||
* Avoid trailing whitespace.
|
- Avoid trailing whitespace.
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
|
|
||||||
@@ -101,7 +101,7 @@ To create a HTML report with coloured and annotated source code:
|
|||||||
#### Exhaustive tests
|
#### Exhaustive tests
|
||||||
|
|
||||||
There are tests of several functions in which a small group replaces secp256k1.
|
There are tests of several functions in which a small group replaces secp256k1.
|
||||||
These tests are *exhaustive* since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)).
|
These tests are _exhaustive_ since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)).
|
||||||
|
|
||||||
### Benchmarks
|
### Benchmarks
|
||||||
|
|
||||||
|
|||||||
130
external/secp256k1/README.md
vendored
130
external/secp256k1/README.md
vendored
@@ -1,5 +1,4 @@
|
|||||||
libsecp256k1
|
# libsecp256k1
|
||||||
============
|
|
||||||
|
|
||||||

|

|
||||||
[](https://web.libera.chat/#secp256k1)
|
[](https://web.libera.chat/#secp256k1)
|
||||||
@@ -9,60 +8,59 @@ High-performance high-assurance C library for digital signatures and other crypt
|
|||||||
This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose.
|
This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose.
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
* secp256k1 ECDSA signing/verification and key generation.
|
|
||||||
* Additive and multiplicative tweaking of secret/public keys.
|
|
||||||
* Serialization/parsing of secret keys, public keys, signatures.
|
|
||||||
* Constant time, constant memory access signing and public key generation.
|
|
||||||
* Derandomized ECDSA (via RFC6979 or with a caller provided function.)
|
|
||||||
* Very efficient implementation.
|
|
||||||
* Suitable for embedded systems.
|
|
||||||
* No runtime dependencies.
|
|
||||||
* Optional module for public key recovery.
|
|
||||||
* Optional module for ECDH key exchange.
|
|
||||||
* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
|
|
||||||
* Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki).
|
|
||||||
* Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki).
|
|
||||||
|
|
||||||
Implementation details
|
- secp256k1 ECDSA signing/verification and key generation.
|
||||||
----------------------
|
- Additive and multiplicative tweaking of secret/public keys.
|
||||||
|
- Serialization/parsing of secret keys, public keys, signatures.
|
||||||
|
- Constant time, constant memory access signing and public key generation.
|
||||||
|
- Derandomized ECDSA (via RFC6979 or with a caller provided function.)
|
||||||
|
- Very efficient implementation.
|
||||||
|
- Suitable for embedded systems.
|
||||||
|
- No runtime dependencies.
|
||||||
|
- Optional module for public key recovery.
|
||||||
|
- Optional module for ECDH key exchange.
|
||||||
|
- Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
|
||||||
|
- Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki).
|
||||||
|
- Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki).
|
||||||
|
|
||||||
* General
|
## Implementation details
|
||||||
* No runtime heap allocation.
|
|
||||||
* Extensive testing infrastructure.
|
|
||||||
* Structured to facilitate review and analysis.
|
|
||||||
* Intended to be portable to any system with a C89 compiler and uint64_t support.
|
|
||||||
* No use of floating types.
|
|
||||||
* Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
|
|
||||||
* Field operations
|
|
||||||
* Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
|
|
||||||
* Using 5 52-bit limbs
|
|
||||||
* Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
|
|
||||||
* This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community.
|
|
||||||
* Scalar operations
|
|
||||||
* Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
|
|
||||||
* Using 4 64-bit limbs (relying on __int128 support in the compiler).
|
|
||||||
* Using 8 32-bit limbs.
|
|
||||||
* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman).
|
|
||||||
* Group operations
|
|
||||||
* Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
|
|
||||||
* Use addition between points in Jacobian and affine coordinates where possible.
|
|
||||||
* Use a unified addition/doubling formula where necessary to avoid data-dependent branches.
|
|
||||||
* Point/x comparison without a field inversion by comparison in the Jacobian coordinate space.
|
|
||||||
* Point multiplication for verification (a*P + b*G).
|
|
||||||
* Use wNAF notation for point multiplicands.
|
|
||||||
* Use a much larger window for multiples of G, using precomputed multiples.
|
|
||||||
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
|
|
||||||
* Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
|
|
||||||
* Point multiplication for signing
|
|
||||||
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
|
|
||||||
* Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
|
|
||||||
* Access the table with branch-free conditional moves so memory access is uniform.
|
|
||||||
* No data-dependent branches
|
|
||||||
* Optional runtime blinding which attempts to frustrate differential power analysis.
|
|
||||||
* The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally.
|
|
||||||
|
|
||||||
Building with Autotools
|
- General
|
||||||
-----------------------
|
- No runtime heap allocation.
|
||||||
|
- Extensive testing infrastructure.
|
||||||
|
- Structured to facilitate review and analysis.
|
||||||
|
- Intended to be portable to any system with a C89 compiler and uint64_t support.
|
||||||
|
- No use of floating types.
|
||||||
|
- Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
|
||||||
|
- Field operations
|
||||||
|
- Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
|
||||||
|
- Using 5 52-bit limbs
|
||||||
|
- Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
|
||||||
|
- This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community.
|
||||||
|
- Scalar operations
|
||||||
|
- Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
|
||||||
|
- Using 4 64-bit limbs (relying on \_\_int128 support in the compiler).
|
||||||
|
- Using 8 32-bit limbs.
|
||||||
|
- Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman).
|
||||||
|
- Group operations
|
||||||
|
- Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
|
||||||
|
- Use addition between points in Jacobian and affine coordinates where possible.
|
||||||
|
- Use a unified addition/doubling formula where necessary to avoid data-dependent branches.
|
||||||
|
- Point/x comparison without a field inversion by comparison in the Jacobian coordinate space.
|
||||||
|
- Point multiplication for verification (a*P + b*G).
|
||||||
|
- Use wNAF notation for point multiplicands.
|
||||||
|
- Use a much larger window for multiples of G, using precomputed multiples.
|
||||||
|
- Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
|
||||||
|
- Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
|
||||||
|
- Point multiplication for signing
|
||||||
|
- Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
|
||||||
|
- Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
|
||||||
|
- Access the table with branch-free conditional moves so memory access is uniform.
|
||||||
|
- No data-dependent branches
|
||||||
|
- Optional runtime blinding which attempts to frustrate differential power analysis.
|
||||||
|
- The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally.
|
||||||
|
|
||||||
|
## Building with Autotools
|
||||||
|
|
||||||
$ ./autogen.sh
|
$ ./autogen.sh
|
||||||
$ ./configure
|
$ ./configure
|
||||||
@@ -72,8 +70,7 @@ Building with Autotools
|
|||||||
|
|
||||||
To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags.
|
To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags.
|
||||||
|
|
||||||
Building with CMake (experimental)
|
## Building with CMake (experimental)
|
||||||
----------------------------------
|
|
||||||
|
|
||||||
To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree.
|
To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree.
|
||||||
|
|
||||||
@@ -109,18 +106,19 @@ In "Developer Command Prompt for VS 2022":
|
|||||||
>cmake -G "Visual Studio 17 2022" -A x64 -S . -B build
|
>cmake -G "Visual Studio 17 2022" -A x64 -S . -B build
|
||||||
>cmake --build build --config RelWithDebInfo
|
>cmake --build build --config RelWithDebInfo
|
||||||
|
|
||||||
Usage examples
|
## Usage examples
|
||||||
-----------
|
|
||||||
Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`.
|
Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`.
|
||||||
* [ECDSA example](examples/ecdsa.c)
|
|
||||||
* [Schnorr signatures example](examples/schnorr.c)
|
- [ECDSA example](examples/ecdsa.c)
|
||||||
* [Deriving a shared secret (ECDH) example](examples/ecdh.c)
|
- [Schnorr signatures example](examples/schnorr.c)
|
||||||
* [ElligatorSwift key exchange example](examples/ellswift.c)
|
- [Deriving a shared secret (ECDH) example](examples/ecdh.c)
|
||||||
|
- [ElligatorSwift key exchange example](examples/ellswift.c)
|
||||||
|
|
||||||
To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`.
|
To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`.
|
||||||
|
|
||||||
Benchmark
|
## Benchmark
|
||||||
------------
|
|
||||||
If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build.
|
If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build.
|
||||||
|
|
||||||
To print the benchmark result to the command line:
|
To print the benchmark result to the command line:
|
||||||
@@ -131,12 +129,10 @@ To create a CSV file for the benchmark result :
|
|||||||
|
|
||||||
$ ./bench_name | sed '2d;s/ \{1,\}//g' > bench_name.csv
|
$ ./bench_name | sed '2d;s/ \{1,\}//g' > bench_name.csv
|
||||||
|
|
||||||
Reporting a vulnerability
|
## Reporting a vulnerability
|
||||||
------------
|
|
||||||
|
|
||||||
See [SECURITY.md](SECURITY.md)
|
See [SECURITY.md](SECURITY.md)
|
||||||
|
|
||||||
Contributing to libsecp256k1
|
## Contributing to libsecp256k1
|
||||||
------------
|
|
||||||
|
|
||||||
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||||
|
|||||||
10
external/secp256k1/SECURITY.md
vendored
10
external/secp256k1/SECURITY.md
vendored
@@ -6,10 +6,10 @@ To report security issues send an email to secp256k1-security@bitcoincore.org (n
|
|||||||
|
|
||||||
The following keys may be used to communicate sensitive information to developers:
|
The following keys may be used to communicate sensitive information to developers:
|
||||||
|
|
||||||
| Name | Fingerprint |
|
| Name | Fingerprint |
|
||||||
|------|-------------|
|
| ------------- | ------------------------------------------------- |
|
||||||
| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 |
|
| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 |
|
||||||
| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 |
|
| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 |
|
||||||
| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 |
|
| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 |
|
||||||
|
|
||||||
You can import a key by running the following command with that individual’s fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys "<fingerprint>"` Ensure that you put quotes around fingerprints containing spaces.
|
You can import a key by running the following command with that individual’s fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys "<fingerprint>"` Ensure that you put quotes around fingerprints containing spaces.
|
||||||
|
|||||||
410
external/secp256k1/doc/ellswift.md
vendored
410
external/secp256k1/doc/ellswift.md
vendored
@@ -5,17 +5,17 @@ construction in the
|
|||||||
["SwiftEC: Shallue–van de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759)
|
["SwiftEC: Shallue–van de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759)
|
||||||
paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi.
|
paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi.
|
||||||
|
|
||||||
* [1. Introduction](#1-introduction)
|
- [1. Introduction](#1-introduction)
|
||||||
* [2. The decoding function](#2-the-decoding-function)
|
- [2. The decoding function](#2-the-decoding-function)
|
||||||
+ [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1)
|
- [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1)
|
||||||
* [3. The encoding function](#3-the-encoding-function)
|
- [3. The encoding function](#3-the-encoding-function)
|
||||||
+ [3.1 Switching to *v, w* coordinates](#31-switching-to-v-w-coordinates)
|
- [3.1 Switching to _v, w_ coordinates](#31-switching-to-v-w-coordinates)
|
||||||
+ [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses)
|
- [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses)
|
||||||
+ [3.3 Finding the inverse](#33-finding-the-inverse)
|
- [3.3 Finding the inverse](#33-finding-the-inverse)
|
||||||
+ [3.4 Dealing with special cases](#34-dealing-with-special-cases)
|
- [3.4 Dealing with special cases](#34-dealing-with-special-cases)
|
||||||
+ [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1)
|
- [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1)
|
||||||
* [4. Encoding and decoding full *(x, y)* coordinates](#4-encoding-and-decoding-full-x-y-coordinates)
|
- [4. Encoding and decoding full _(x, y)_ coordinates](#4-encoding-and-decoding-full-x-y-coordinates)
|
||||||
+ [4.1 Full *(x, y)* coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1)
|
- [4.1 Full _(x, y)_ coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1)
|
||||||
|
|
||||||
## 1. Introduction
|
## 1. Introduction
|
||||||
|
|
||||||
@@ -34,13 +34,14 @@ are taken modulo $p$), and then evaluating $F_u(t)$, which for every $u$ and $t$
|
|||||||
x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function).
|
x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function).
|
||||||
|
|
||||||
**Encoding** a given $x$ coordinate is conceptually done as follows:
|
**Encoding** a given $x$ coordinate is conceptually done as follows:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
|
||||||
* Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to *8* elements.
|
|
||||||
* With probability $1 - \dfrac{\\#L}{8}$, restart the loop.
|
|
||||||
* Select a uniformly random $t \in L$ and return $(u, t).$
|
|
||||||
|
|
||||||
This is the *ElligatorSwift* algorithm, here given for just x-coordinates. An extension to full
|
- Loop:
|
||||||
|
- Pick a uniformly random field element $u.$
|
||||||
|
- Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to _8_ elements.
|
||||||
|
- With probability $1 - \dfrac{\\#L}{8}$, restart the loop.
|
||||||
|
- Select a uniformly random $t \in L$ and return $(u, t).$
|
||||||
|
|
||||||
|
This is the _ElligatorSwift_ algorithm, here given for just x-coordinates. An extension to full
|
||||||
$(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates).
|
$(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates).
|
||||||
The algorithm finds a uniformly random $(u, t)$ among (almost all) those
|
The algorithm finds a uniformly random $(u, t)$ among (almost all) those
|
||||||
for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for
|
for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for
|
||||||
@@ -50,37 +51,40 @@ almost all x-coordinates on the curve (all but at most 39) is close to two times
|
|||||||
## 2. The decoding function
|
## 2. The decoding function
|
||||||
|
|
||||||
First some definitions:
|
First some definitions:
|
||||||
* $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$
|
|
||||||
* For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement.
|
- $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$
|
||||||
* Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$
|
- For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement.
|
||||||
|
- Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$
|
||||||
public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square.
|
public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square.
|
||||||
This implies that the order of $E$ is either odd, or a multiple of *4*.
|
This implies that the order of $E$ is either odd, or a multiple of _4_.
|
||||||
If $a=0$, this condition is always fulfilled.
|
If $a=0$, this condition is always fulfilled.
|
||||||
* For `secp256k1`, $a=0$ and $b=7.$
|
- For `secp256k1`, $a=0$ and $b=7.$
|
||||||
* Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$
|
- Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$
|
||||||
* Let the function $h(x) = 3x^3 + 4a.$
|
- Let the function $h(x) = 3x^3 + 4a.$
|
||||||
* Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$
|
- Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$
|
||||||
* Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$
|
- Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$
|
||||||
* $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below.
|
- $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below.
|
||||||
* $\psi_u$ is a function from $S_u$ to $V$ that will be defined below.
|
- $\psi_u$ is a function from $S_u$ to $V$ that will be defined below.
|
||||||
|
|
||||||
**Note**: In the paper:
|
**Note**: In the paper:
|
||||||
* $F_u$ corresponds to $F_{0,u}$ there.
|
|
||||||
* $P_u(t)$ is called $P$ there.
|
- $F_u$ corresponds to $F_{0,u}$ there.
|
||||||
* All $S_u$ sets together correspond to $S$ there.
|
- $P_u(t)$ is called $P$ there.
|
||||||
* All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there.
|
- All $S_u$ sets together correspond to $S$ there.
|
||||||
|
- All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there.
|
||||||
|
|
||||||
Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right
|
Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right
|
||||||
hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$,
|
hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$,
|
||||||
out of the three right-hand side factors an even number must be non-squares.
|
out of the three right-hand side factors an even number must be non-squares.
|
||||||
This implies that exactly *1* or exactly *3* out of
|
This implies that exactly _1_ or exactly _3_ out of
|
||||||
$\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$,
|
$\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$,
|
||||||
at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception
|
at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception
|
||||||
to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate.
|
to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate.
|
||||||
|
|
||||||
**Define** the decoding function $F_u(t)$ as:
|
**Define** the decoding function $F_u(t)$ as:
|
||||||
* Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$
|
|
||||||
* Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square).
|
- Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$
|
||||||
|
- Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square).
|
||||||
|
|
||||||
$P_u(t) = (X(u, t), Y(u, t))$, where:
|
$P_u(t) = (X(u, t), Y(u, t))$, where:
|
||||||
|
|
||||||
@@ -98,12 +102,13 @@ Y(u, t) & = & \left\\{\begin{array}{ll}
|
|||||||
$$
|
$$
|
||||||
|
|
||||||
$P_u(t)$ is defined:
|
$P_u(t)$ is defined:
|
||||||
* For $a=0$, unless:
|
|
||||||
* $u = 0$ or $t = 0$ (division by zero)
|
- For $a=0$, unless:
|
||||||
* $g(u) = -t^2$ (would give $Y=0$).
|
- $u = 0$ or $t = 0$ (division by zero)
|
||||||
* For $a \neq 0$, unless:
|
- $g(u) = -t^2$ (would give $Y=0$).
|
||||||
* $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero)
|
- For $a \neq 0$, unless:
|
||||||
* $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$).
|
- $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero)
|
||||||
|
- $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$).
|
||||||
|
|
||||||
The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$
|
The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$
|
||||||
|
|
||||||
@@ -123,20 +128,22 @@ $$
|
|||||||
Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is:
|
Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is:
|
||||||
|
|
||||||
**Define** $F_u(t)$ as:
|
**Define** $F_u(t)$ as:
|
||||||
* Let $X = \dfrac{u^3 + b - t^2}{2t}.$
|
|
||||||
* Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$
|
- Let $X = \dfrac{u^3 + b - t^2}{2t}.$
|
||||||
* Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square.
|
- Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$
|
||||||
|
- Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square.
|
||||||
|
|
||||||
To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case
|
To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case
|
||||||
$P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$):
|
$P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$):
|
||||||
|
|
||||||
**Define** $F_u(t)$ as:
|
**Define** $F_u(t)$ as:
|
||||||
* Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$).
|
|
||||||
* Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$).
|
- Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$).
|
||||||
* Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$).
|
- Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$).
|
||||||
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
- Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$).
|
||||||
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
||||||
* Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square.
|
- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
||||||
|
- Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square.
|
||||||
|
|
||||||
The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice,
|
The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice,
|
||||||
but the approach here is simple enough and gives fairly uniform output even in these cases.
|
but the approach here is simple enough and gives fairly uniform output even in these cases.
|
||||||
@@ -150,10 +157,11 @@ in `secp256k1_ellswift_xswiftec_var` (which outputs the actual x-coordinate).
|
|||||||
## 3. The encoding function
|
## 3. The encoding function
|
||||||
|
|
||||||
To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process:
|
To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process:
|
||||||
* Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$
|
|
||||||
* Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$
|
- Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$
|
||||||
* For each of the found $t$ values, verify that $F_u(t) = x.$
|
- Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$
|
||||||
* Return the remaining $t$ values.
|
- For each of the found $t$ values, verify that $F_u(t) = x.$
|
||||||
|
- Return the remaining $t$ values.
|
||||||
|
|
||||||
The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$
|
The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$
|
||||||
|
|
||||||
@@ -185,13 +193,14 @@ precedence over both. Because of this, the $g(-u-x)$ being square test for $x_1$
|
|||||||
values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder;
|
values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder;
|
||||||
any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder.
|
any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder.
|
||||||
|
|
||||||
### 3.1 Switching to *v, w* coordinates
|
### 3.1 Switching to _v, w_ coordinates
|
||||||
|
|
||||||
Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and
|
Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and
|
||||||
$w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$
|
$w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$
|
||||||
* $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$
|
|
||||||
* For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$
|
- $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$
|
||||||
* $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where
|
- For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$
|
||||||
|
- $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where
|
||||||
|
|
||||||
$$
|
$$
|
||||||
\begin{array}{lcl}
|
\begin{array}{lcl}
|
||||||
@@ -204,34 +213,37 @@ $$
|
|||||||
|
|
||||||
We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$
|
We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$
|
||||||
expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable:
|
expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable:
|
||||||
* Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
|
||||||
* Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
- Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
||||||
* Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions).
|
- Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
||||||
|
- Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions).
|
||||||
|
|
||||||
### 3.2 Avoiding computing all inverses
|
### 3.2 Avoiding computing all inverses
|
||||||
|
|
||||||
The *ElligatorSwift* algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the
|
The _ElligatorSwift_ algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the
|
||||||
set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary.
|
set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary.
|
||||||
|
|
||||||
Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a
|
Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a
|
||||||
uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8,
|
uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8,
|
||||||
picking a uniformly random element from that, restarting whenever $\bot$ is picked:
|
picking a uniformly random element from that, restarting whenever $\bot$ is picked:
|
||||||
|
|
||||||
**Define** *ElligatorSwift(x)* as:
|
**Define** _ElligatorSwift(x)_ as:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
- Loop:
|
||||||
* Compute the set $L = F_u^{-1}(x).$
|
- Pick a uniformly random field element $u.$
|
||||||
* Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$
|
- Compute the set $L = F_u^{-1}(x).$
|
||||||
* Select a uniformly random $t \in T.$
|
- Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$
|
||||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
- Select a uniformly random $t \in T.$
|
||||||
|
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||||
|
|
||||||
Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly
|
Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly
|
||||||
random element in it, so we do not need to have all $\bot$ values at the end.
|
random element in it, so we do not need to have all $\bot$ values at the end.
|
||||||
As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account),
|
As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account),
|
||||||
we can associate every index in $T$ with exactly one of those formulas, making sure that:
|
we can associate every index in $T$ with exactly one of those formulas, making sure that:
|
||||||
* Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$
|
|
||||||
* For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check).
|
- Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$
|
||||||
* In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those.
|
- For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check).
|
||||||
|
- In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those.
|
||||||
|
|
||||||
The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting
|
The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting
|
||||||
to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases)
|
to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases)
|
||||||
@@ -240,12 +252,13 @@ for an analysis of all the negligible cases.
|
|||||||
If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas,
|
If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas,
|
||||||
the loop can be simplified to only compute one of the inverses instead of all of them:
|
the loop can be simplified to only compute one of the inverses instead of all of them:
|
||||||
|
|
||||||
**Define** *ElligatorSwift(x)* as:
|
**Define** _ElligatorSwift(x)_ as:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
- Loop:
|
||||||
* Pick a uniformly random integer $c$ in $[0,8).$
|
- Pick a uniformly random field element $u.$
|
||||||
* Let $t = G_{c,u}(x).$
|
- Pick a uniformly random integer $c$ in $[0,8).$
|
||||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
- Let $t = G_{c,u}(x).$
|
||||||
|
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||||
|
|
||||||
This is implemented in `secp256k1_ellswift_xelligatorswift_var`.
|
This is implemented in `secp256k1_ellswift_xelligatorswift_var`.
|
||||||
|
|
||||||
@@ -256,18 +269,19 @@ Those are then repeated as $c=4$ through $c=7$ for the other sign of $w$ (noting
|
|||||||
Ignoring the negligible cases, we get:
|
Ignoring the negligible cases, we get:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x)$ as:
|
**Define** $G_{c,u}(x)$ as:
|
||||||
* If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas):
|
|
||||||
* If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence).
|
- If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas):
|
||||||
* If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula)
|
- If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence).
|
||||||
* Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows).
|
- If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula)
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas):
|
- Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows).
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas):
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
- Let $s = x-u.$
|
||||||
* Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise.
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
||||||
* Let $w = \sqrt{s}.$
|
- Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise.
|
||||||
* Depending on $c:$
|
- Let $w = \sqrt{s}.$
|
||||||
* If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$
|
- If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$
|
||||||
|
- If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$
|
||||||
|
|
||||||
Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly
|
Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly
|
||||||
50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen
|
50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen
|
||||||
@@ -284,20 +298,21 @@ transformation. Furthermore, that transformation has no effect on $s$ in the fir
|
|||||||
as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down:
|
as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x)$ as:
|
**Define** $G_{c,u}(x)$ as:
|
||||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
|
||||||
* If $g(-u-x)$ is square, return $\bot.$
|
- If $c \in \\{0, 1, 4, 5\\}:$
|
||||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a).$
|
- If $g(-u-x)$ is square, return $\bot.$
|
||||||
* Let $v = x.$
|
- Let $s = -g(u)/(u^2 + ux + x^2 + a).$
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
- Let $s = x-u.$
|
||||||
* Let $v = (r/s - u)/2.$
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
||||||
* Let $w = \sqrt{s}.$
|
- Let $v = (r/s - u)/2.$
|
||||||
* Depending on $c:$
|
- Let $w = \sqrt{s}.$
|
||||||
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$
|
- If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$
|
||||||
* If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$
|
- If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$
|
||||||
* If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$
|
- If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$
|
||||||
|
- If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$
|
||||||
|
|
||||||
This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input.
|
This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input.
|
||||||
There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values.
|
There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values.
|
||||||
@@ -310,58 +325,60 @@ we analyse them here. They generally fall into two categories: cases in which th
|
|||||||
do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same
|
do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same
|
||||||
$t$ value for multiple $c$ inputs (thereby biasing that encoding):
|
$t$ value for multiple $c$ inputs (thereby biasing that encoding):
|
||||||
|
|
||||||
* In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$):
|
- In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$):
|
||||||
* When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves.
|
- When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves.
|
||||||
Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve
|
Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve
|
||||||
fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square).
|
fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square).
|
||||||
This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$),
|
This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$),
|
||||||
the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the
|
the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the
|
||||||
encoder anyway as there will generally be more than 8.
|
encoder anyway as there will generally be more than 8.
|
||||||
* When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence
|
- When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence
|
||||||
as it can deal with $g(u)=0$.
|
as it can deal with $g(u)=0$.
|
||||||
This is again only possible on even-ordered curves.
|
This is again only possible on even-ordered curves.
|
||||||
* In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$):
|
- In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$):
|
||||||
* When $s=0$, a division by zero would occur.
|
- When $s=0$, a division by zero would occur.
|
||||||
* When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases.
|
- When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases.
|
||||||
It is equivalent to checking whether $r=0$.
|
It is equivalent to checking whether $r=0$.
|
||||||
This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition.
|
This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition.
|
||||||
A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first
|
A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first
|
||||||
it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero.
|
it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero.
|
||||||
* Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder:
|
- Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder:
|
||||||
* For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve.
|
- For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve.
|
||||||
* For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$.
|
- For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$.
|
||||||
|
|
||||||
**Define** a version of $G_{c,u}(x)$ which deals with all these cases:
|
**Define** a version of $G_{c,u}(x)$ which deals with all these cases:
|
||||||
* If $a=0$ and $u=0$, return $\bot.$
|
|
||||||
* If $a \neq 0$ and $X_0(u)=0$, return $\bot.$
|
- If $a=0$ and $u=0$, return $\bot.$
|
||||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
- If $a \neq 0$ and $X_0(u)=0$, return $\bot.$
|
||||||
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
- If $c \in \\{0, 1, 4, 5\\}:$
|
||||||
* If $g(-u-x)$ is square, return $\bot.$
|
- If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
||||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
- If $g(-u-x)$ is square, return $\bot.$
|
||||||
* Let $v = x.$
|
- Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
- Let $s = x-u.$
|
||||||
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
||||||
* If $s = 0$, return $\bot.$
|
- If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
||||||
* Let $v = (r/s - u)/2.$
|
- If $s = 0$, return $\bot.$
|
||||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
- Let $v = (r/s - u)/2.$
|
||||||
* If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$
|
- Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||||
* Depending on $c:$
|
- If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$
|
||||||
* If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$
|
- If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$
|
||||||
* If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$
|
- If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$
|
||||||
* If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$
|
- If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$
|
||||||
* If $a=0$ and $t=0$, return $\bot$ (even curves only).
|
- If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$
|
||||||
* If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$
|
- If $a=0$ and $t=0$, return $\bot$ (even curves only).
|
||||||
* Return $t.$
|
- If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$
|
||||||
|
- Return $t.$
|
||||||
|
|
||||||
Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once,
|
Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once,
|
||||||
for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached:
|
for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached:
|
||||||
* All cases where $P_u(t)$ is not defined:
|
|
||||||
* For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$
|
- All cases where $P_u(t)$ is not defined:
|
||||||
* For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$
|
- For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$
|
||||||
* When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch.
|
- For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$
|
||||||
|
- When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch.
|
||||||
|
|
||||||
These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves.
|
These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves.
|
||||||
|
|
||||||
@@ -370,40 +387,42 @@ These cases form a negligible subset of all $(u, t)$ for cryptographically sized
|
|||||||
Specialized for odd-ordered $a=0$ curves:
|
Specialized for odd-ordered $a=0$ curves:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x)$ as:
|
**Define** $G_{c,u}(x)$ as:
|
||||||
* If $u=0$, return $\bot.$
|
|
||||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
- If $u=0$, return $\bot.$
|
||||||
* If $(-u-x)^3 + b$ is square, return $\bot$
|
- If $c \in \\{0, 1, 4, 5\\}:$
|
||||||
* Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0).
|
- If $(-u-x)^3 + b$ is square, return $\bot$
|
||||||
* Let $v = x.$
|
- Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0).
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||||
* Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square.
|
- Let $s = x-u.$
|
||||||
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
- Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square.
|
||||||
* If $s = 0$, return $\bot.$
|
- If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
||||||
* Let $v = (r/s - u)/2.$
|
- If $s = 0$, return $\bot.$
|
||||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
- Let $v = (r/s - u)/2.$
|
||||||
* Depending on $c:$
|
- Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||||
* If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$
|
- If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$
|
||||||
* If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$
|
- If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$
|
||||||
* If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$
|
- If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$
|
||||||
|
- If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$
|
||||||
|
|
||||||
This is implemented in `secp256k1_ellswift_xswiftec_inv_var`.
|
This is implemented in `secp256k1_ellswift_xswiftec_inv_var`.
|
||||||
|
|
||||||
And the x-only ElligatorSwift encoding algorithm is still:
|
And the x-only ElligatorSwift encoding algorithm is still:
|
||||||
|
|
||||||
**Define** *ElligatorSwift(x)* as:
|
**Define** _ElligatorSwift(x)_ as:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
- Loop:
|
||||||
* Pick a uniformly random integer $c$ in $[0,8).$
|
- Pick a uniformly random field element $u.$
|
||||||
* Let $t = G_{c,u}(x).$
|
- Pick a uniformly random integer $c$ in $[0,8).$
|
||||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
- Let $t = G_{c,u}(x).$
|
||||||
|
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||||
|
|
||||||
Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them.
|
Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them.
|
||||||
While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$
|
While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$
|
||||||
combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity.
|
combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity.
|
||||||
|
|
||||||
## 4. Encoding and decoding full *(x, y)* coordinates
|
## 4. Encoding and decoding full _(x, y)_ coordinates
|
||||||
|
|
||||||
So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding
|
So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding
|
||||||
for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information
|
for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information
|
||||||
@@ -422,30 +441,32 @@ four distinct $P_u^{'-1}$ calls in the definition of $G_{u,c}.$
|
|||||||
|
|
||||||
To encode the sign of $y$ in the sign of $Y:$
|
To encode the sign of $y$ in the sign of $Y:$
|
||||||
|
|
||||||
**Define** *Decode(u, t)* for full $(x, y)$ as:
|
**Define** _Decode(u, t)_ for full $(x, y)$ as:
|
||||||
* Let $(X, Y) = P_u(t).$
|
|
||||||
* Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square.
|
- Let $(X, Y) = P_u(t).$
|
||||||
* Let $y = \sqrt{g(x)}.$
|
- Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square.
|
||||||
* If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$
|
- Let $y = \sqrt{g(x)}.$
|
||||||
|
- If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$
|
||||||
|
|
||||||
And encoding would be done using a $G_{c,u}(x, y)$ function defined as:
|
And encoding would be done using a $G_{c,u}(x, y)$ function defined as:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x, y)$ as:
|
**Define** $G_{c,u}(x, y)$ as:
|
||||||
* If $c \in \\{0, 1\\}:$
|
|
||||||
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
- If $c \in \\{0, 1\\}:$
|
||||||
* If $g(-u-x)$ is square, return $\bot.$
|
- If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
||||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
- If $g(-u-x)$ is square, return $\bot.$
|
||||||
* Let $v = x.$
|
- Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
||||||
* Otherwise, when $c \in \\{2, 3\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3\\}:$
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
- Let $s = x-u.$
|
||||||
* If $c = 3$ and $r = 0$, return $\bot.$
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
||||||
* Let $v = (r/s - u)/2.$
|
- If $c = 3$ and $r = 0$, return $\bot.$
|
||||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
- Let $v = (r/s - u)/2.$
|
||||||
* Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise.
|
- Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||||
* Depending on $c:$
|
- Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise.
|
||||||
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$
|
- If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$
|
||||||
|
- If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$
|
||||||
|
|
||||||
Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$
|
Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$
|
||||||
This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$.
|
This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$.
|
||||||
@@ -454,22 +475,23 @@ In the above logic, $sign$ can be implemented in several ways, such as parity of
|
|||||||
of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where
|
of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where
|
||||||
$-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$.
|
$-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$.
|
||||||
|
|
||||||
### 4.1 Full *(x, y)* coordinates for `secp256k1`
|
### 4.1 Full _(x, y)_ coordinates for `secp256k1`
|
||||||
|
|
||||||
For $a=0$ curves, there is another option. Note that for those,
|
For $a=0$ curves, there is another option. Note that for those,
|
||||||
the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to
|
the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to
|
||||||
encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get
|
encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get
|
||||||
as decoder:
|
as decoder:
|
||||||
|
|
||||||
**Define** *Decode(u, t)* as:
|
**Define** _Decode(u, t)_ as:
|
||||||
* Let $u'=u$ if $u \neq 0$; $1$ otherwise.
|
|
||||||
* Let $t'=t$ if $t \neq 0$; $1$ otherwise.
|
- Let $u'=u$ if $u \neq 0$; $1$ otherwise.
|
||||||
* Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise.
|
- Let $t'=t$ if $t \neq 0$; $1$ otherwise.
|
||||||
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
- Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise.
|
||||||
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
||||||
* Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square.
|
- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
||||||
* Let $y = \sqrt{g(x)}.$
|
- Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square.
|
||||||
* Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise.
|
- Let $y = \sqrt{g(x)}.$
|
||||||
|
- Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise.
|
||||||
|
|
||||||
This is implemented in `secp256k1_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$
|
This is implemented in `secp256k1_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$
|
||||||
|
|
||||||
|
|||||||
3
external/secp256k1/doc/musig.md
vendored
3
external/secp256k1/doc/musig.md
vendored
@@ -1,5 +1,4 @@
|
|||||||
Notes on the musig module API
|
# Notes on the musig module API
|
||||||
===========================
|
|
||||||
|
|
||||||
The following sections contain additional notes on the API of the musig module (`include/secp256k1_musig.h`).
|
The following sections contain additional notes on the API of the musig module (`include/secp256k1_musig.h`).
|
||||||
A usage example can be found in `examples/musig.c`.
|
A usage example can be found in `examples/musig.c`.
|
||||||
|
|||||||
40
external/secp256k1/doc/release-process.md
vendored
40
external/secp256k1/doc/release-process.md
vendored
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`.
|
This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`.
|
||||||
|
|
||||||
We distinguish between two types of releases: *regular* and *maintenance* releases.
|
We distinguish between two types of releases: _regular_ and _maintenance_ releases.
|
||||||
Regular releases are releases of a new major or minor version as well as patches of the most recent release.
|
Regular releases are releases of a new major or minor version as well as patches of the most recent release.
|
||||||
Maintenance releases, on the other hand, are required for patches of older releases.
|
Maintenance releases, on the other hand, are required for patches of older releases.
|
||||||
|
|
||||||
@@ -15,6 +15,7 @@ This process also assumes that there will be no minor releases for old major rel
|
|||||||
We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core.
|
We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core.
|
||||||
|
|
||||||
## Sanity checks
|
## Sanity checks
|
||||||
|
|
||||||
Perform these checks when reviewing the release PR (see below):
|
Perform these checks when reviewing the release PR (see below):
|
||||||
|
|
||||||
1. Ensure `make distcheck` doesn't fail.
|
1. Ensure `make distcheck` doesn't fail.
|
||||||
@@ -42,15 +43,15 @@ Perform these checks when reviewing the release PR (see below):
|
|||||||
## Regular release
|
## Regular release
|
||||||
|
|
||||||
1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that
|
1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that
|
||||||
* finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
|
- finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
|
||||||
* adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
|
- adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
|
||||||
* removing the `[Unreleased]` section header,
|
- removing the `[Unreleased]` section header,
|
||||||
* ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and
|
- ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and
|
||||||
* including an entry for `### ABI Compatibility` if it doesn't exist,
|
- including an entry for `### ABI Compatibility` if it doesn't exist,
|
||||||
* sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and,
|
- sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and,
|
||||||
* if this is not a patch release,
|
- if this is not a patch release,
|
||||||
* updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and
|
- updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and
|
||||||
* updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`.
|
- updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`.
|
||||||
2. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
2. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
||||||
3. After the PR is merged, tag the commit, and push the tag:
|
3. After the PR is merged, tag the commit, and push the tag:
|
||||||
```
|
```
|
||||||
@@ -59,11 +60,12 @@ Perform these checks when reviewing the release PR (see below):
|
|||||||
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
||||||
```
|
```
|
||||||
4. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that
|
4. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that
|
||||||
* sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`,
|
- sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`,
|
||||||
* increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and
|
- increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and
|
||||||
* adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md).
|
- adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md).
|
||||||
|
|
||||||
If other maintainers are not present to approve the PR, it can be merged without ACKs.
|
If other maintainers are not present to approve the PR, it can be merged without ACKs.
|
||||||
|
|
||||||
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
||||||
6. Send an announcement email to the bitcoin-dev mailing list.
|
6. Send an announcement email to the bitcoin-dev mailing list.
|
||||||
|
|
||||||
@@ -77,9 +79,9 @@ Note that bug fixes need to be backported only to releases for which no compatib
|
|||||||
git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR
|
git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR
|
||||||
```
|
```
|
||||||
2. Open a pull request to the `$MAJOR.$MINOR` branch that
|
2. Open a pull request to the `$MAJOR.$MINOR` branch that
|
||||||
* includes the bug fixes,
|
- includes the bug fixes,
|
||||||
* finalizes the release notes similar to a regular release,
|
- finalizes the release notes similar to a regular release,
|
||||||
* increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`
|
- increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`
|
||||||
and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`
|
and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`
|
||||||
(with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example).
|
(with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example).
|
||||||
3. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
3. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
||||||
@@ -89,6 +91,6 @@ Note that bug fixes need to be backported only to releases for which no compatib
|
|||||||
git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH"
|
git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH"
|
||||||
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
||||||
```
|
```
|
||||||
6. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
||||||
7. Send an announcement email to the bitcoin-dev mailing list.
|
6. Send an announcement email to the bitcoin-dev mailing list.
|
||||||
8. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md).
|
7. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md).
|
||||||
|
|||||||
301
external/secp256k1/doc/safegcd_implementation.md
vendored
301
external/secp256k1/doc/safegcd_implementation.md
vendored
@@ -29,65 +29,67 @@ def gcd(f, g):
|
|||||||
return abs(f)
|
return abs(f)
|
||||||
```
|
```
|
||||||
|
|
||||||
It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop
|
It computes the greatest common divisor of an odd integer _f_ and any integer _g_. Its inner loop
|
||||||
keeps rewriting the variables *f* and *g* alongside a state variable *δ* that starts at *1*, until
|
keeps rewriting the variables _f_ and _g_ alongside a state variable _δ_ that starts at _1_, until
|
||||||
*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a
|
_g=0_ is reached. At that point, _|f|_ gives the GCD. Each of the transitions in the loop is called a
|
||||||
"division step" (referred to as divstep in what follows).
|
"division step" (referred to as divstep in what follows).
|
||||||
|
|
||||||
For example, *gcd(21, 14)* would be computed as:
|
For example, _gcd(21, 14)_ would be computed as:
|
||||||
- Start with *δ=1 f=21 g=14*
|
|
||||||
- Take the third branch: *δ=2 f=21 g=7*
|
- Start with _δ=1 f=21 g=14_
|
||||||
- Take the first branch: *δ=-1 f=7 g=-7*
|
- Take the third branch: _δ=2 f=21 g=7_
|
||||||
- Take the second branch: *δ=0 f=7 g=0*
|
- Take the first branch: _δ=-1 f=7 g=-7_
|
||||||
- The answer *|f| = 7*.
|
- Take the second branch: _δ=0 f=7 g=0_
|
||||||
|
- The answer _|f| = 7_.
|
||||||
|
|
||||||
Why it works:
|
Why it works:
|
||||||
|
|
||||||
- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper):
|
- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper):
|
||||||
- (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*.
|
- (a) If _g_ is odd, replace _(f,g)_ with _(g,g-f)_ or (f,g+f), resulting in an even _g_.
|
||||||
- (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even).
|
- (b) Replace _(f,g)_ with _(f,g/2)_ (where _g_ is guaranteed to be even).
|
||||||
- Neither of those two operations change the GCD:
|
- Neither of those two operations change the GCD:
|
||||||
- For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a c* and *g=b c* for some integers *a*
|
- For (a), assume _gcd(f,g)=c_, then it must be the case that _f=a c_ and _g=b c_ for some integers _a_
|
||||||
and *b*. As *(g,g-f)=(b c,(b-a)c)* and *(f,f+g)=(a c,(a+b)c)*, the result clearly still has
|
and _b_. As _(g,g-f)=(b c,(b-a)c)_ and _(f,f+g)=(a c,(a+b)c)_, the result clearly still has
|
||||||
common factor *c*. Reasoning in the other direction shows that no common factor can be added by
|
common factor _c_. Reasoning in the other direction shows that no common factor can be added by
|
||||||
doing so either.
|
doing so either.
|
||||||
- For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove
|
- For (b), we know that _f_ is odd, so _gcd(f,g)_ clearly has no factor _2_, and we can remove
|
||||||
it from *g*.
|
it from _g_.
|
||||||
- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3).
|
- The algorithm will eventually converge to _g=0_. This is proven in the paper (see theorem G.3).
|
||||||
- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the
|
- It follows that eventually we find a final value _f'_ for which _gcd(f,g) = gcd(f',0)_. As the
|
||||||
gcd of *f'* and *0* is *|f'|* by definition, that is our answer.
|
gcd of _f'_ and _0_ is _|f'|_ by definition, that is our answer.
|
||||||
|
|
||||||
Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at
|
Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at
|
||||||
the low-order bits of the variables to decide the next steps, and being easy to make
|
the low-order bits of the variables to decide the next steps, and being easy to make
|
||||||
constant-time (in more low-level languages than Python). The *δ* parameter is necessary to
|
constant-time (in more low-level languages than Python). The _δ_ parameter is necessary to
|
||||||
guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look
|
guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look
|
||||||
at high order bits.
|
at high order bits.
|
||||||
|
|
||||||
Properties that will become important later:
|
Properties that will become important later:
|
||||||
- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*.
|
|
||||||
- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we
|
|
||||||
do not need to worry about rounding.
|
|
||||||
- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N*
|
|
||||||
bits of *f* and *g*, and on *δ*.
|
|
||||||
|
|
||||||
|
- Performing more divsteps than needed is not a problem, as _f_ does not change anymore after _g=0_.
|
||||||
|
- Only even numbers are divided by _2_. This means that when reasoning about it algebraically we
|
||||||
|
do not need to worry about rounding.
|
||||||
|
- At every point during the algorithm's execution the next _N_ steps only depend on the bottom _N_
|
||||||
|
bits of _f_ and _g_, and on _δ_.
|
||||||
|
|
||||||
## 2. From GCDs to modular inverses
|
## 2. From GCDs to modular inverses
|
||||||
|
|
||||||
We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a x=1
|
We want an algorithm to compute the inverse _a_ of _x_ modulo _M_, i.e. the number a such that _a x=1
|
||||||
mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is
|
mod M_. This inverse only exists if the GCD of _x_ and _M_ is _1_, but that is always the case if _M_ is
|
||||||
prime and *0 < x < M*. In what follows, assume that the modular inverse exists.
|
prime and _0 < x < M_. In what follows, assume that the modular inverse exists.
|
||||||
It turns out this inverse can be computed as a side effect of computing the GCD by keeping track
|
It turns out this inverse can be computed as a side effect of computing the GCD by keeping track
|
||||||
of how the internal variables can be written as linear combinations of the inputs at every step
|
of how the internal variables can be written as linear combinations of the inputs at every step
|
||||||
(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)).
|
(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)).
|
||||||
Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a x + b M = 1*.
|
Since the GCD is _1_, such an algorithm will compute numbers _a_ and _b_ such that a x + b M = 1*.
|
||||||
Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x
|
Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x
|
||||||
mod M*.
|
mod M\*.
|
||||||
|
|
||||||
A similar approach can be used to calculate modular inverses using the divsteps-based GCD
|
A similar approach can be used to calculate modular inverses using the divsteps-based GCD
|
||||||
algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping
|
algorithm shown above, if the modulus _M_ is odd. To do so, compute _gcd(f=M,g=x)_, while keeping
|
||||||
track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*.
|
track of extra variables _d_ and _e_, for which at every step _d = f/x (mod M)_ and _e = g/x (mod M)_.
|
||||||
*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M*
|
_f/x_ here means the number which multiplied with _x_ gives _f mod M_. As _f_ and _g_ are initialized to _M_
|
||||||
and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M
|
and _x_ respectively, _d_ and _e_ just start off being _0_ (_M/x mod M = 0/x mod M = 0_) and _1_ (_x/x mod M
|
||||||
= 1*).
|
= 1_).
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def div2(M, x):
|
def div2(M, x):
|
||||||
@@ -119,17 +121,16 @@ def modinv(M, x):
|
|||||||
return (d * f) % M
|
return (d * f) % M
|
||||||
```
|
```
|
||||||
|
|
||||||
Also note that this approach to track *d* and *e* throughout the computation to determine the inverse
|
Also note that this approach to track _d_ and _e_ throughout the computation to determine the inverse
|
||||||
is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the
|
is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the
|
||||||
entire computation is determined (see section 3 below) and the inverse is computed from that.
|
entire computation is determined (see section 3 below) and the inverse is computed from that.
|
||||||
The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to
|
The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to
|
||||||
be faster at the level of optimization we're able to do in C.
|
be faster at the level of optimization we're able to do in C.
|
||||||
|
|
||||||
|
|
||||||
## 3. Batching multiple divsteps
|
## 3. Batching multiple divsteps
|
||||||
|
|
||||||
Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)*
|
Every divstep can be expressed as a matrix multiplication, applying a transition matrix _(1/2 t)_
|
||||||
to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper):
|
to both vectors _[f, g]_ and _[d, e]_ (see paragraph 8.1 in the paper):
|
||||||
|
|
||||||
```
|
```
|
||||||
t = [ u, v ]
|
t = [ u, v ]
|
||||||
@@ -142,15 +143,15 @@ to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper):
|
|||||||
[ out_e ] [ in_e ]
|
[ out_e ] [ in_e ]
|
||||||
```
|
```
|
||||||
|
|
||||||
where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is
|
where _(u, v, q, r)_ is _(0, 2, -1, 1)_, _(2, 0, 1, 1)_, or _(2, 0, 0, 1)_, depending on which branch is
|
||||||
taken. As above, the resulting *f* and *g* are always integers.
|
taken. As above, the resulting _f_ and _g_ are always integers.
|
||||||
|
|
||||||
Performing multiple divsteps corresponds to a multiplication with the product of all the
|
Performing multiple divsteps corresponds to a multiplication with the product of all the
|
||||||
individual divsteps' transition matrices. As each transition matrix consists of integers
|
individual divsteps' transition matrices. As each transition matrix consists of integers
|
||||||
divided by *2*, the product of these matrices will consist of integers divided by *2<sup>N</sup>* (see also
|
divided by _2_, the product of these matrices will consist of integers divided by _2<sup>N</sup>_ (see also
|
||||||
theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay
|
theorem 9.2 in the paper). These divisions are expensive when updating _d_ and _e_, so we delay
|
||||||
them: we compute the integer coefficients of the combined transition matrix scaled by *2<sup>N</sup>*, and
|
them: we compute the integer coefficients of the combined transition matrix scaled by _2<sup>N</sup>_, and
|
||||||
do one division by *2<sup>N</sup>* as a final step:
|
do one division by _2<sup>N</sup>_ as a final step:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def divsteps_n_matrix(delta, f, g):
|
def divsteps_n_matrix(delta, f, g):
|
||||||
@@ -166,13 +167,13 @@ def divsteps_n_matrix(delta, f, g):
|
|||||||
return delta, (u, v, q, r)
|
return delta, (u, v, q, r)
|
||||||
```
|
```
|
||||||
|
|
||||||
As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this
|
As the branches in the divsteps are completely determined by the bottom _N_ bits of _f_ and _g_, this
|
||||||
function to compute the transition matrix only needs to see those bottom bits. Furthermore all
|
function to compute the transition matrix only needs to see those bottom bits. Furthermore all
|
||||||
intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*,
|
intermediate results and outputs fit in _(N+1)_-bit numbers (unsigned for _f_ and _g_; signed for _u_, _v_,
|
||||||
*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit
|
_q_, and _r_) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit
|
||||||
integers could set *N=62* and compute the full transition matrix for 62 steps at once without any
|
integers could set _N=62_ and compute the full transition matrix for 62 steps at once without any
|
||||||
big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs
|
big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs
|
||||||
to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps.
|
to update the full-size _f_, _g_, _d_, and _e_ numbers once every _N_ steps.
|
||||||
|
|
||||||
We still need functions to compute:
|
We still need functions to compute:
|
||||||
|
|
||||||
@@ -184,8 +185,8 @@ We still need functions to compute:
|
|||||||
[ out_e ] ( [ q, r ]) [ in_e ]
|
[ out_e ] ( [ q, r ]) [ in_e ]
|
||||||
```
|
```
|
||||||
|
|
||||||
Because the divsteps transformation only ever divides even numbers by two, the result of *t [f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f*
|
Because the divsteps transformation only ever divides even numbers by two, the result of _t [f,g]_ is always even. When _t_ is a composition of _N_ divsteps, it follows that the resulting _f_
|
||||||
and *g* will be multiple of *2<sup>N</sup>*, and division by *2<sup>N</sup>* is simply shifting them down:
|
and _g_ will be multiple of _2<sup>N</sup>_, and division by _2<sup>N</sup>_ is simply shifting them down:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def update_fg(f, g, t):
|
def update_fg(f, g, t):
|
||||||
@@ -199,8 +200,8 @@ def update_fg(f, g, t):
|
|||||||
return cf >> N, cg >> N
|
return cf >> N, cg >> N
|
||||||
```
|
```
|
||||||
|
|
||||||
The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2<sup>N</sup> mod M*.
|
The same is not true for _d_ and _e_, and we need an equivalent of the `div2` function for division by _2<sup>N</sup> mod M_.
|
||||||
This is easy if we have precomputed *1/M mod 2<sup>N</sup>* (which always exists for odd *M*):
|
This is easy if we have precomputed _1/M mod 2<sup>N</sup>_ (which always exists for odd _M_):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def div2n(M, Mi, x):
|
def div2n(M, Mi, x):
|
||||||
@@ -224,7 +225,7 @@ def update_de(d, e, t, M, Mi):
|
|||||||
return div2n(M, Mi, cd), div2n(M, Mi, ce)
|
return div2n(M, Mi, cd), div2n(M, Mi, ce)
|
||||||
```
|
```
|
||||||
|
|
||||||
With all of those, we can write a version of `modinv` that performs *N* divsteps at once:
|
With all of those, we can write a version of `modinv` that performs _N_ divsteps at once:
|
||||||
|
|
||||||
```python3
|
```python3
|
||||||
def modinv(M, Mi, x):
|
def modinv(M, Mi, x):
|
||||||
@@ -242,20 +243,19 @@ def modinv(M, Mi, x):
|
|||||||
return (d * f) % M
|
return (d * f) % M
|
||||||
```
|
```
|
||||||
|
|
||||||
This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem
|
This means that in practice we'll always perform a multiple of _N_ divsteps. This is not a problem
|
||||||
because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *δ* keeps
|
because once _g=0_, further divsteps do not affect _f_, _g_, _d_, or _e_ anymore (only _δ_ keeps
|
||||||
increasing). For variable time code such excess iterations will be mostly optimized away in later
|
increasing). For variable time code such excess iterations will be mostly optimized away in later
|
||||||
sections.
|
sections.
|
||||||
|
|
||||||
|
|
||||||
## 4. Avoiding modulus operations
|
## 4. Avoiding modulus operations
|
||||||
|
|
||||||
So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of
|
So far, there are two places where we compute a remainder of big numbers modulo _M_: at the end of
|
||||||
`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the
|
`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating _d_ due to the
|
||||||
sign of *f*. These are relatively expensive operations when done generically.
|
sign of _f_. These are relatively expensive operations when done generically.
|
||||||
|
|
||||||
To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range
|
To deal with the modulus operation in `div2n`, we simply stop requiring _d_ and _e_ to be in range
|
||||||
*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus
|
_[0,M)_ all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus
|
||||||
operation at the end:
|
operation at the end:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -272,15 +272,15 @@ def update_de(d, e, t, M, Mi):
|
|||||||
return cd >> N, ce >> N
|
return cd >> N, ce >> N
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|*
|
Let's look at bounds on the ranges of these numbers. It can be shown that _|u|+|v|_ and _|q|+|r|_
|
||||||
never exceed *2<sup>N</sup>* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have
|
never exceed _2<sup>N</sup>_ (see paragraph 8.3 in the paper), and thus a multiplication with _t_ will have
|
||||||
outputs whose absolute values are at most *2<sup>N</sup>* times the maximum absolute input value. In case the
|
outputs whose absolute values are at most _2<sup>N</sup>_ times the maximum absolute input value. In case the
|
||||||
inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming
|
inputs _d_ and _e_ are in _(-M,M)_, which is certainly true for the initial values _d=0_ and _e=1_ assuming
|
||||||
*M > 1*, the multiplication results in numbers in range *(-2<sup>N</sup>M,2<sup>N</sup>M)*. Subtracting less than *2<sup>N</sup>*
|
_M > 1_, the multiplication results in numbers in range _(-2<sup>N</sup>M,2<sup>N</sup>M)_. Subtracting less than _2<sup>N</sup>_
|
||||||
times *M* to cancel out *N* bits brings that up to *(-2<sup>N+1</sup>M,2<sup>N</sup>M)*, and
|
times _M_ to cancel out _N_ bits brings that up to _(-2<sup>N+1</sup>M,2<sup>N</sup>M)_, and
|
||||||
dividing by *2<sup>N</sup>* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that
|
dividing by _2<sup>N</sup>_ at the end takes it to _(-2M,M)_. Another application of `update_de` would take that
|
||||||
to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be
|
to _(-3M,2M)_, and so forth. This progressive expansion of the variables' ranges can be
|
||||||
counteracted by incrementing *d* and *e* by *M* whenever they're negative:
|
counteracted by incrementing _d_ and _e_ by _M_ whenever they're negative:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -293,12 +293,12 @@ counteracted by incrementing *d* and *e* by *M* whenever they're negative:
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the
|
With inputs in _(-2M,M)_, they will first be shifted into range _(-M,M)_, which means that the
|
||||||
output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de`
|
output will again be in _(-2M,M)_, and this remains the case regardless of how many `update_de`
|
||||||
invocations there are. In what follows, we will try to make this more efficient.
|
invocations there are. In what follows, we will try to make this more efficient.
|
||||||
|
|
||||||
Note that increasing *d* by *M* is equal to incrementing *cd* by *u M* and *ce* by *q M*. Similarly,
|
Note that increasing _d_ by _M_ is equal to incrementing _cd_ by _u M_ and _ce_ by _q M_. Similarly,
|
||||||
increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by *r M*. So we could instead write:
|
increasing _e_ by _M_ is equal to incrementing _cd_ by _v M_ and _ce_ by _r M_. So we could instead write:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -318,10 +318,10 @@ increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this
|
Now note that we have two steps of corrections to _cd_ and _ce_ that add multiples of _M_: this
|
||||||
increment, and the decrement that cancels out bottom bits. The second one depends on the first
|
increment, and the decrement that cancels out bottom bits. The second one depends on the first
|
||||||
one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce*
|
one, but they can still be efficiently combined by only computing the bottom bits of _cd_ and _ce_
|
||||||
at first, and using that to compute the final *md*, *me* values:
|
at first, and using that to compute the final _md_, _me_ values:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def update_de(d, e, t, M, Mi):
|
def update_de(d, e, t, M, Mi):
|
||||||
@@ -346,8 +346,8 @@ def update_de(d, e, t, M, Mi):
|
|||||||
return cd >> N, ce >> N
|
return cd >> N, ce >> N
|
||||||
```
|
```
|
||||||
|
|
||||||
One last optimization: we can avoid the *md M* and *me M* multiplications in the bottom bits of *cd*
|
One last optimization: we can avoid the _md M_ and _me M_ multiplications in the bottom bits of _cd_
|
||||||
and *ce* by moving them to the *md* and *me* correction:
|
and _ce_ by moving them to the _md_ and _me_ correction:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -362,10 +362,10 @@ and *ce* by moving them to the *md* and *me* correction:
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same
|
The resulting function takes _d_ and _e_ in range _(-2M,M)_ as inputs, and outputs values in the same
|
||||||
range. That also means that the *d* value at the end of `modinv` will be in that range, while we want
|
range. That also means that the _d_ value at the end of `modinv` will be in that range, while we want
|
||||||
a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the
|
a result in _[0,M)_. To do that, we need a normalization function. It's easy to integrate the
|
||||||
conditional negation of *d* (based on the sign of *f*) into it as well:
|
conditional negation of _d_ (based on the sign of _f_) into it as well:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def normalize(sign, v, M):
|
def normalize(sign, v, M):
|
||||||
@@ -391,22 +391,21 @@ And calling it in `modinv` is simply:
|
|||||||
return normalize(f, d, M)
|
return normalize(f, d, M)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 5. Constant-time operation
|
## 5. Constant-time operation
|
||||||
|
|
||||||
The primary selling point of the algorithm is fast constant-time operation. What code flow still
|
The primary selling point of the algorithm is fast constant-time operation. What code flow still
|
||||||
depends on the input data so far?
|
depends on the input data so far?
|
||||||
|
|
||||||
- the number of iterations of the while *g ≠ 0* loop in `modinv`
|
- the number of iterations of the while _g ≠ 0_ loop in `modinv`
|
||||||
- the branches inside `divsteps_n_matrix`
|
- the branches inside `divsteps_n_matrix`
|
||||||
- the sign checks in `update_de`
|
- the sign checks in `update_de`
|
||||||
- the sign checks in `normalize`
|
- the sign checks in `normalize`
|
||||||
|
|
||||||
To make the while loop in `modinv` constant time it can be replaced with a constant number of
|
To make the while loop in `modinv` constant time it can be replaced with a constant number of
|
||||||
iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit
|
iterations. The paper proves (Theorem 11.2) that _741_ divsteps are sufficient for any _256_-bit
|
||||||
inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is
|
inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound _724_ is
|
||||||
sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of
|
sufficient even. Given that every loop iteration performs _N_ divsteps, it will run a total of
|
||||||
*⌈724/N⌉* times.
|
_⌈724/N⌉_ times.
|
||||||
|
|
||||||
To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise
|
To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise
|
||||||
operations (and hope the C compiler isn't smart enough to turn them back into branches; see
|
operations (and hope the C compiler isn't smart enough to turn them back into branches; see
|
||||||
@@ -425,10 +424,10 @@ divstep can be written instead as (compare to the inner loop of `gcd` in section
|
|||||||
```
|
```
|
||||||
|
|
||||||
To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the
|
To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the
|
||||||
definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As
|
definition of negative numbers in two's complement, (_-v == ~v + 1_) holds for every number _v_. As
|
||||||
*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows
|
_-1_ in two's complement is all _1_ bits, bitflipping can be expressed as xor with _-1_. It follows
|
||||||
that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then
|
that _-v == (v ^ -1) - (-1)_. Thus, if we have a variable _c_ that takes on values _0_ or _-1_, then
|
||||||
*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*.
|
_(v ^ c) - c_ is _v_ if _c=0_ and _-v_ if _c=-1_.
|
||||||
|
|
||||||
Using this we can write:
|
Using this we can write:
|
||||||
|
|
||||||
@@ -444,13 +443,13 @@ in constant-time form as:
|
|||||||
x = (f ^ c1) - c1
|
x = (f ^ c1) - c1
|
||||||
```
|
```
|
||||||
|
|
||||||
To use that trick, we need a helper mask variable *c1* that resolves the condition *δ>0* to *-1*
|
To use that trick, we need a helper mask variable _c1_ that resolves the condition _δ>0_ to _-1_
|
||||||
(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by
|
(if true) or _0_ (if false). We compute _c1_ using right shifting, which is equivalent to dividing by
|
||||||
the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see
|
the specified power of _2_ and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see
|
||||||
`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all
|
`assumptions.h` for tests that this is the case). Right shifting by _63_ thus maps all
|
||||||
numbers in range *[-2<sup>63</sup>,0)* to *-1*, and numbers in range *[0,2<sup>63</sup>)* to *0*.
|
numbers in range _[-2<sup>63</sup>,0)_ to _-1_, and numbers in range _[0,2<sup>63</sup>)_ to _0_.
|
||||||
|
|
||||||
Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write:
|
Using the facts that _x&0=0_ and _x&(-1)=x_ (on two's complement systems again), we can write:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
if g & 1:
|
if g & 1:
|
||||||
@@ -498,8 +497,8 @@ becomes:
|
|||||||
```
|
```
|
||||||
|
|
||||||
It turns out that this can be implemented more efficiently by applying the substitution
|
It turns out that this can be implemented more efficiently by applying the substitution
|
||||||
*η=-δ*. In this representation, negating *δ* corresponds to negating *η*, and incrementing
|
_η=-δ_. In this representation, negating _δ_ corresponds to negating _η_, and incrementing
|
||||||
*δ* corresponds to decrementing *η*. This allows us to remove the negation in the *c1*
|
_δ_ corresponds to decrementing _η_. This allows us to remove the negation in the _c1_
|
||||||
computation:
|
computation:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -519,12 +518,12 @@ computation:
|
|||||||
g >>= 1
|
g >>= 1
|
||||||
```
|
```
|
||||||
|
|
||||||
A variant of divsteps with better worst-case performance can be used instead: starting *δ* at
|
A variant of divsteps with better worst-case performance can be used instead: starting _δ_ at
|
||||||
*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs
|
_1/2_ instead of _1_. This reduces the worst case number of iterations to _590_ for _256_-bit inputs
|
||||||
(which can be shown using convex hull analysis). In this case, the substitution *ζ=-(δ+1/2)*
|
(which can be shown using convex hull analysis). In this case, the substitution _ζ=-(δ+1/2)_
|
||||||
is used instead to keep the variable integral. Incrementing *δ* by *1* still translates to
|
is used instead to keep the variable integral. Incrementing _δ_ by _1_ still translates to
|
||||||
decrementing *ζ* by *1*, but negating *δ* now corresponds to going from *ζ* to *-(ζ+1)*, or
|
decrementing _ζ_ by _1_, but negating _δ_ now corresponds to going from _ζ_ to _-(ζ+1)_, or
|
||||||
*~ζ*. Doing that conditionally based on *c3* is simply:
|
_~ζ_. Doing that conditionally based on _c3_ is simply:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -534,13 +533,12 @@ decrementing *ζ* by *1*, but negating *δ* now corresponds to going fr
|
|||||||
```
|
```
|
||||||
|
|
||||||
By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to
|
By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to
|
||||||
also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of
|
also apply all _f_ operations to _u_, _v_ and all _g_ operations to _q_, _r_), a constant-time version of
|
||||||
`divsteps_n_matrix` is obtained. The full code will be in section 7.
|
`divsteps_n_matrix` is obtained. The full code will be in section 7.
|
||||||
|
|
||||||
These bit fiddling tricks can also be used to make the conditional negations and additions in
|
These bit fiddling tricks can also be used to make the conditional negations and additions in
|
||||||
`update_de` and `normalize` constant-time.
|
`update_de` and `normalize` constant-time.
|
||||||
|
|
||||||
|
|
||||||
## 6. Variable-time optimizations
|
## 6. Variable-time optimizations
|
||||||
|
|
||||||
In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time.
|
In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time.
|
||||||
@@ -550,7 +548,7 @@ faster non-constant time `divsteps_n_matrix` function.
|
|||||||
|
|
||||||
To do so, first consider yet another way of writing the inner loop of divstep operations in
|
To do so, first consider yet another way of writing the inner loop of divstep operations in
|
||||||
`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use
|
`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use
|
||||||
the original version with initial *δ=1* and *η=-δ* here.
|
the original version with initial _δ=1_ and _η=-δ_ here.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
for _ in range(N):
|
for _ in range(N):
|
||||||
@@ -562,7 +560,7 @@ for _ in range(N):
|
|||||||
g >>= 1
|
g >>= 1
|
||||||
```
|
```
|
||||||
|
|
||||||
Whenever *g* is even, the loop only shifts *g* down and decreases *η*. When *g* ends in multiple zero
|
Whenever _g_ is even, the loop only shifts _g_ down and decreases _η_. When _g_ ends in multiple zero
|
||||||
bits, these iterations can be consolidated into one step. This requires counting the bottom zero
|
bits, these iterations can be consolidated into one step. This requires counting the bottom zero
|
||||||
bits efficiently, which is possible on most platforms; it is abstracted here as the function
|
bits efficiently, which is possible on most platforms; it is abstracted here as the function
|
||||||
`count_trailing_zeros`.
|
`count_trailing_zeros`.
|
||||||
@@ -595,20 +593,20 @@ while True:
|
|||||||
# g is even now, and the eta decrement and g shift will happen in the next loop.
|
# g is even now, and the eta decrement and g shift will happen in the next loop.
|
||||||
```
|
```
|
||||||
|
|
||||||
We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever
|
We can now remove multiple bottom _0_ bits from _g_ at once, but still need a full iteration whenever
|
||||||
there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as
|
there is a bottom _1_ bit. In what follows, we will get rid of multiple _1_ bits simultaneously as
|
||||||
well.
|
well.
|
||||||
|
|
||||||
Observe that as long as *η ≥ 0*, the loop does not modify *f*. Instead, it cancels out bottom
|
Observe that as long as _η ≥ 0_, the loop does not modify _f_. Instead, it cancels out bottom
|
||||||
bits of *g* and shifts them out, and decreases *η* and *i* accordingly - interrupting only when *η*
|
bits of _g_ and shifts them out, and decreases _η_ and _i_ accordingly - interrupting only when _η_
|
||||||
becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to
|
becomes negative, or when _i_ reaches _0_. Combined, this is equivalent to adding a multiple of _f_ to
|
||||||
*g* to cancel out multiple bottom bits, and then shifting them out.
|
_g_ to cancel out multiple bottom bits, and then shifting them out.
|
||||||
|
|
||||||
It is easy to find what that multiple is: we want a number *w* such that *g+w f* has a few bottom
|
It is easy to find what that multiple is: we want a number _w_ such that _g+w f_ has a few bottom
|
||||||
zero bits. If that number of bits is *L*, we want *g+w f mod 2<sup>L</sup> = 0*, or *w = -g/f mod 2<sup>L</sup>*. Since *f*
|
zero bits. If that number of bits is _L_, we want _g+w f mod 2<sup>L</sup> = 0_, or _w = -g/f mod 2<sup>L</sup>_. Since _f_
|
||||||
is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before
|
is odd, such a _w_ exists for any _L_. _L_ cannot be more than _i_ steps (as we'd finish the loop before
|
||||||
doing more) or more than *η+1* steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but
|
doing more) or more than _η+1_ steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but
|
||||||
apart from that, we're only limited by the complexity of computing *w*.
|
apart from that, we're only limited by the complexity of computing _w_.
|
||||||
|
|
||||||
This code demonstrates how to cancel up to 4 bits per step:
|
This code demonstrates how to cancel up to 4 bits per step:
|
||||||
|
|
||||||
@@ -642,26 +640,25 @@ some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pag
|
|||||||
Here we need the negated modular inverse, which is a simple transformation of those:
|
Here we need the negated modular inverse, which is a simple transformation of those:
|
||||||
|
|
||||||
- Instead of a 3-bit table:
|
- Instead of a 3-bit table:
|
||||||
- *-f* or *f ^ 6*
|
- _-f_ or _f ^ 6_
|
||||||
- Instead of a 4-bit table:
|
- Instead of a 4-bit table:
|
||||||
- *1 - f(f + 1)*
|
- _1 - f(f + 1)_
|
||||||
- *-(f + (((f + 1) & 4) << 1))*
|
- _-(f + (((f + 1) & 4) << 1))_
|
||||||
- For larger tables the following technique can be used: if *w=-1/f mod 2<sup>L</sup>*, then *w(w f+2)* is
|
- For larger tables the following technique can be used: if _w=-1/f mod 2<sup>L</sup>_, then _w(w f+2)_ is
|
||||||
*-1/f mod 2<sup>2L</sup>*. This allows extending the previous formulas (or tables). In particular we
|
_-1/f mod 2<sup>2L</sup>_. This allows extending the previous formulas (or tables). In particular we
|
||||||
have this 6-bit function (based on the 3-bit function above):
|
have this 6-bit function (based on the 3-bit function above):
|
||||||
- *f(f<sup>2</sup> - 2)*
|
- _f(f<sup>2</sup> - 2)_
|
||||||
|
|
||||||
This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in
|
This loop, again extended to also handle _u_, _v_, _q_, and _r_ alongside _f_ and _g_, placed in
|
||||||
`divsteps_n_matrix`, gives a significantly faster, but non-constant time version.
|
`divsteps_n_matrix`, gives a significantly faster, but non-constant time version.
|
||||||
|
|
||||||
|
|
||||||
## 7. Final Python version
|
## 7. Final Python version
|
||||||
|
|
||||||
All together we need the following functions:
|
All together we need the following functions:
|
||||||
|
|
||||||
- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function
|
- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function
|
||||||
from section 2, but with its loop replaced by a variant of the constant-time divstep from
|
from section 2, but with its loop replaced by a variant of the constant-time divstep from
|
||||||
section 5, extended to handle *u*, *v*, *q*, *r*:
|
section 5, extended to handle _u_, _v_, _q_, _r_:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def divsteps_n_matrix(zeta, f, g):
|
def divsteps_n_matrix(zeta, f, g):
|
||||||
@@ -684,7 +681,7 @@ def divsteps_n_matrix(zeta, f, g):
|
|||||||
return zeta, (u, v, q, r)
|
return zeta, (u, v, q, r)
|
||||||
```
|
```
|
||||||
|
|
||||||
- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time
|
- The functions to update _f_ and _g_, and _d_ and _e_, from section 2 and section 4, with the constant-time
|
||||||
changes to `update_de` from section 5:
|
changes to `update_de` from section 5:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -723,7 +720,7 @@ def normalize(sign, v, M):
|
|||||||
return v
|
return v
|
||||||
```
|
```
|
||||||
|
|
||||||
- And finally the `modinv` function too, adapted to use *ζ* instead of *δ*, and using the fixed
|
- And finally the `modinv` function too, adapted to use _ζ_ instead of _δ_, and using the fixed
|
||||||
iteration count from section 5:
|
iteration count from section 5:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -772,20 +769,21 @@ def modinv_var(M, Mi, x):
|
|||||||
|
|
||||||
## 8. From GCDs to Jacobi symbol
|
## 8. From GCDs to Jacobi symbol
|
||||||
|
|
||||||
We can also use a similar approach to calculate Jacobi symbol *(x | M)* by keeping track of an
|
We can also use a similar approach to calculate Jacobi symbol _(x | M)_ by keeping track of an
|
||||||
extra variable *j*, for which at every step *(x | M) = j (g | f)*. As we update *f* and *g*, we
|
extra variable _j_, for which at every step _(x | M) = j (g | f)_. As we update _f_ and _g_, we
|
||||||
make corresponding updates to *j* using
|
make corresponding updates to _j_ using
|
||||||
[properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties):
|
[properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties):
|
||||||
* *((g/2) | f)* is either *(g | f)* or *-(g | f)*, depending on the value of *f mod 8* (negating if it's *3* or *5*).
|
|
||||||
* *(f | g)* is either *(g | f)* or *-(g | f)*, depending on *f mod 4* and *g mod 4* (negating if both are *3*).
|
|
||||||
|
|
||||||
These updates depend only on the values of *f* and *g* modulo *4* or *8*, and can thus be applied
|
- _((g/2) | f)_ is either _(g | f)_ or _-(g | f)_, depending on the value of _f mod 8_ (negating if it's _3_ or _5_).
|
||||||
very quickly, as long as we keep track of a few additional bits of *f* and *g*. Overall, this
|
- _(f | g)_ is either _(g | f)_ or _-(g | f)_, depending on _f mod 4_ and _g mod 4_ (negating if both are _3_).
|
||||||
|
|
||||||
|
These updates depend only on the values of _f_ and _g_ modulo _4_ or _8_, and can thus be applied
|
||||||
|
very quickly, as long as we keep track of a few additional bits of _f_ and _g_. Overall, this
|
||||||
calculation is slightly simpler than the one for the modular inverse because we no longer need to
|
calculation is slightly simpler than the one for the modular inverse because we no longer need to
|
||||||
keep track of *d* and *e*.
|
keep track of _d_ and _e_.
|
||||||
|
|
||||||
However, one difficulty of this approach is that the Jacobi symbol *(a | n)* is only defined for
|
However, one difficulty of this approach is that the Jacobi symbol _(a | n)_ is only defined for
|
||||||
positive odd integers *n*, whereas in the original safegcd algorithm, *f, g* can take negative
|
positive odd integers _n_, whereas in the original safegcd algorithm, _f, g_ can take negative
|
||||||
values. We resolve this by using the following modified steps:
|
values. We resolve this by using the following modified steps:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -799,15 +797,16 @@ values. We resolve this by using the following modified steps:
|
|||||||
```
|
```
|
||||||
|
|
||||||
The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4
|
The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4
|
||||||
and E.5 in the paper) preserves *gcd(f, g)*. However, there's no proof that the modified algorithm
|
and E.5 in the paper) preserves _gcd(f, g)_. However, there's no proof that the modified algorithm
|
||||||
will converge. The justification for posdivsteps is completely empirical: in practice, it appears
|
will converge. The justification for posdivsteps is completely empirical: in practice, it appears
|
||||||
that the vast majority of nonzero inputs converge to *f=g=gcd(f<sub>0</sub>, g<sub>0</sub>)* in a
|
that the vast majority of nonzero inputs converge to _f=g=gcd(f<sub>0</sub>, g<sub>0</sub>)_ in a
|
||||||
number of steps proportional to their logarithm.
|
number of steps proportional to their logarithm.
|
||||||
|
|
||||||
Note that:
|
Note that:
|
||||||
- We require inputs to satisfy *gcd(x, M) = 1*, as otherwise *f=1* is not reached.
|
|
||||||
- We require inputs *x &neq; 0*, because applying posdivstep with *g=0* has no effect.
|
- We require inputs to satisfy _gcd(x, M) = 1_, as otherwise _f=1_ is not reached.
|
||||||
- We need to update the termination condition from *g=0* to *f=1*.
|
- We require inputs _x &neq; 0_, because applying posdivstep with _g=0_ has no effect.
|
||||||
|
- We need to update the termination condition from _g=0_ to _f=1_.
|
||||||
|
|
||||||
We account for the possibility of nonconvergence by only performing a bounded number of
|
We account for the possibility of nonconvergence by only performing a bounded number of
|
||||||
posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not
|
posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not
|
||||||
@@ -815,5 +814,5 @@ yet been found.
|
|||||||
|
|
||||||
The optimizations in sections 3-7 above are described in the context of the original divsteps, but
|
The optimizations in sections 3-7 above are described in the context of the original divsteps, but
|
||||||
in the C implementation we also adapt most of them (not including "avoiding modulus operations",
|
in the C implementation we also adapt most of them (not including "avoiding modulus operations",
|
||||||
since it's not necessary to track *d, e*, and "constant-time operation", since we never calculate
|
since it's not necessary to track _d, e_, and "constant-time operation", since we never calculate
|
||||||
Jacobi symbols for secret data) to the posdivsteps version.
|
Jacobi symbols for secret data) to the posdivsteps version.
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
40
external/snappy/conandata.yml
vendored
40
external/snappy/conandata.yml
vendored
@@ -1,40 +0,0 @@
|
|||||||
sources:
|
|
||||||
"1.1.10":
|
|
||||||
url: "https://github.com/google/snappy/archive/1.1.10.tar.gz"
|
|
||||||
sha256: "49d831bffcc5f3d01482340fe5af59852ca2fe76c3e05df0e67203ebbe0f1d90"
|
|
||||||
"1.1.9":
|
|
||||||
url: "https://github.com/google/snappy/archive/1.1.9.tar.gz"
|
|
||||||
sha256: "75c1fbb3d618dd3a0483bff0e26d0a92b495bbe5059c8b4f1c962b478b6e06e7"
|
|
||||||
"1.1.8":
|
|
||||||
url: "https://github.com/google/snappy/archive/1.1.8.tar.gz"
|
|
||||||
sha256: "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f"
|
|
||||||
"1.1.7":
|
|
||||||
url: "https://github.com/google/snappy/archive/1.1.7.tar.gz"
|
|
||||||
sha256: "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4"
|
|
||||||
patches:
|
|
||||||
"1.1.10":
|
|
||||||
- patch_file: "patches/1.1.10-0001-fix-inlining-failure.patch"
|
|
||||||
patch_description: "disable inlining for compilation error"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/1.1.9-0002-no-Werror.patch"
|
|
||||||
patch_description: "disable 'warning as error' options"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/1.1.10-0003-fix-clobber-list-older-llvm.patch"
|
|
||||||
patch_description: "disable inline asm on apple-clang"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/1.1.9-0004-rtti-by-default.patch"
|
|
||||||
patch_description: "remove 'disable rtti'"
|
|
||||||
patch_type: "conan"
|
|
||||||
"1.1.9":
|
|
||||||
- patch_file: "patches/1.1.9-0001-fix-inlining-failure.patch"
|
|
||||||
patch_description: "disable inlining for compilation error"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/1.1.9-0002-no-Werror.patch"
|
|
||||||
patch_description: "disable 'warning as error' options"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/1.1.9-0003-fix-clobber-list-older-llvm.patch"
|
|
||||||
patch_description: "disable inline asm on apple-clang"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/1.1.9-0004-rtti-by-default.patch"
|
|
||||||
patch_description: "remove 'disable rtti'"
|
|
||||||
patch_type: "conan"
|
|
||||||
89
external/snappy/conanfile.py
vendored
89
external/snappy/conanfile.py
vendored
@@ -1,89 +0,0 @@
|
|||||||
from conan import ConanFile
|
|
||||||
from conan.tools.build import check_min_cppstd
|
|
||||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
|
||||||
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
|
|
||||||
from conan.tools.scm import Version
|
|
||||||
import os
|
|
||||||
|
|
||||||
required_conan_version = ">=1.54.0"
|
|
||||||
|
|
||||||
|
|
||||||
class SnappyConan(ConanFile):
|
|
||||||
name = "snappy"
|
|
||||||
description = "A fast compressor/decompressor"
|
|
||||||
topics = ("google", "compressor", "decompressor")
|
|
||||||
url = "https://github.com/conan-io/conan-center-index"
|
|
||||||
homepage = "https://github.com/google/snappy"
|
|
||||||
license = "BSD-3-Clause"
|
|
||||||
|
|
||||||
package_type = "library"
|
|
||||||
settings = "os", "arch", "compiler", "build_type"
|
|
||||||
options = {
|
|
||||||
"shared": [True, False],
|
|
||||||
"fPIC": [True, False],
|
|
||||||
}
|
|
||||||
default_options = {
|
|
||||||
"shared": False,
|
|
||||||
"fPIC": True,
|
|
||||||
}
|
|
||||||
|
|
||||||
def export_sources(self):
|
|
||||||
export_conandata_patches(self)
|
|
||||||
|
|
||||||
def config_options(self):
|
|
||||||
if self.settings.os == 'Windows':
|
|
||||||
del self.options.fPIC
|
|
||||||
|
|
||||||
def configure(self):
|
|
||||||
if self.options.shared:
|
|
||||||
self.options.rm_safe("fPIC")
|
|
||||||
|
|
||||||
def layout(self):
|
|
||||||
cmake_layout(self, src_folder="src")
|
|
||||||
|
|
||||||
def validate(self):
|
|
||||||
if self.settings.compiler.get_safe("cppstd"):
|
|
||||||
check_min_cppstd(self, 11)
|
|
||||||
|
|
||||||
def source(self):
|
|
||||||
get(self, **self.conan_data["sources"][self.version], strip_root=True)
|
|
||||||
|
|
||||||
def generate(self):
|
|
||||||
tc = CMakeToolchain(self)
|
|
||||||
tc.variables["SNAPPY_BUILD_TESTS"] = False
|
|
||||||
if Version(self.version) >= "1.1.8":
|
|
||||||
tc.variables["SNAPPY_FUZZING_BUILD"] = False
|
|
||||||
tc.variables["SNAPPY_REQUIRE_AVX"] = False
|
|
||||||
tc.variables["SNAPPY_REQUIRE_AVX2"] = False
|
|
||||||
tc.variables["SNAPPY_INSTALL"] = True
|
|
||||||
if Version(self.version) >= "1.1.9":
|
|
||||||
tc.variables["SNAPPY_BUILD_BENCHMARKS"] = False
|
|
||||||
tc.generate()
|
|
||||||
|
|
||||||
def build(self):
|
|
||||||
apply_conandata_patches(self)
|
|
||||||
cmake = CMake(self)
|
|
||||||
cmake.configure()
|
|
||||||
cmake.build()
|
|
||||||
|
|
||||||
def package(self):
|
|
||||||
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
|
|
||||||
cmake = CMake(self)
|
|
||||||
cmake.install()
|
|
||||||
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
|
|
||||||
|
|
||||||
def package_info(self):
|
|
||||||
self.cpp_info.set_property("cmake_file_name", "Snappy")
|
|
||||||
self.cpp_info.set_property("cmake_target_name", "Snappy::snappy")
|
|
||||||
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
|
|
||||||
self.cpp_info.components["snappylib"].libs = ["snappy"]
|
|
||||||
if not self.options.shared:
|
|
||||||
if self.settings.os in ["Linux", "FreeBSD"]:
|
|
||||||
self.cpp_info.components["snappylib"].system_libs.append("m")
|
|
||||||
|
|
||||||
# TODO: to remove in conan v2 once cmake_find_package* generators removed
|
|
||||||
self.cpp_info.names["cmake_find_package"] = "Snappy"
|
|
||||||
self.cpp_info.names["cmake_find_package_multi"] = "Snappy"
|
|
||||||
self.cpp_info.components["snappylib"].names["cmake_find_package"] = "snappy"
|
|
||||||
self.cpp_info.components["snappylib"].names["cmake_find_package_multi"] = "snappy"
|
|
||||||
self.cpp_info.components["snappylib"].set_property("cmake_target_name", "Snappy::snappy")
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h
|
|
||||||
index 1548ed7..3b4a9f3 100644
|
|
||||||
--- a/snappy-stubs-internal.h
|
|
||||||
+++ b/snappy-stubs-internal.h
|
|
||||||
@@ -100,7 +100,7 @@
|
|
||||||
|
|
||||||
// Inlining hints.
|
|
||||||
#if HAVE_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
-#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
|
|
||||||
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
#else
|
|
||||||
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
#endif // HAVE_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
diff --git a/snappy.cc b/snappy.cc
|
|
||||||
index d414718..e4efb59 100644
|
|
||||||
--- a/snappy.cc
|
|
||||||
+++ b/snappy.cc
|
|
||||||
@@ -1132,7 +1132,7 @@ inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) {
|
|
||||||
size_t literal_len = *tag >> 2;
|
|
||||||
size_t tag_type = *tag;
|
|
||||||
bool is_literal;
|
|
||||||
-#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__)
|
|
||||||
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) && ( (!defined(__clang__) && !defined(__APPLE__)) || (!defined(__APPLE__) && defined(__clang__) && (__clang_major__ >= 9)) || (defined(__APPLE__) && defined(__clang__) && (__clang_major__ > 11)) )
|
|
||||||
// TODO clang misses the fact that the (c & 3) already correctly
|
|
||||||
// sets the zero flag.
|
|
||||||
asm("and $3, %k[tag_type]\n\t"
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
Fixes the following error:
|
|
||||||
error: inlining failed in call to ‘always_inline’ ‘size_t snappy::AdvanceToNextTag(const uint8_t**, size_t*)’: function body can be overwritten at link time
|
|
||||||
|
|
||||||
--- snappy-stubs-internal.h
|
|
||||||
+++ snappy-stubs-internal.h
|
|
||||||
@@ -100,7 +100,7 @@
|
|
||||||
|
|
||||||
// Inlining hints.
|
|
||||||
#ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
-#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
|
|
||||||
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
#else
|
|
||||||
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
|
||||||
#endif
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
--- CMakeLists.txt
|
|
||||||
+++ CMakeLists.txt
|
|
||||||
@@ -69,7 +69,7 @@
|
|
||||||
- # Use -Werror for clang only.
|
|
||||||
+if(0)
|
|
||||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
|
||||||
if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
|
|
||||||
endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
|
|
||||||
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
|
||||||
-
|
|
||||||
+endif()
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
asm clobbers do not work for clang < 9 and apple-clang < 11 (found by SpaceIm)
|
|
||||||
--- snappy.cc
|
|
||||||
+++ snappy.cc
|
|
||||||
@@ -1026,7 +1026,7 @@
|
|
||||||
size_t literal_len = *tag >> 2;
|
|
||||||
size_t tag_type = *tag;
|
|
||||||
bool is_literal;
|
|
||||||
-#if defined(__GNUC__) && defined(__x86_64__)
|
|
||||||
+#if defined(__GNUC__) && defined(__x86_64__) && ( (!defined(__clang__) && !defined(__APPLE__)) || (!defined(__APPLE__) && defined(__clang__) && (__clang_major__ >= 9)) || (defined(__APPLE__) && defined(__clang__) && (__clang_major__ > 11)) )
|
|
||||||
// TODO clang misses the fact that the (c & 3) already correctly
|
|
||||||
// sets the zero flag.
|
|
||||||
asm("and $3, %k[tag_type]\n\t"
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
--- a/CMakeLists.txt
|
|
||||||
+++ b/CMakeLists.txt
|
|
||||||
@@ -53,8 +53,6 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
|
||||||
add_definitions(-D_HAS_EXCEPTIONS=0)
|
|
||||||
|
|
||||||
# Disable RTTI.
|
|
||||||
- string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
|
||||||
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
|
|
||||||
else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
|
||||||
# Use -Wall for clang and gcc.
|
|
||||||
if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall")
|
|
||||||
@@ -78,8 +76,6 @@ endif()
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
|
|
||||||
|
|
||||||
# Disable RTTI.
|
|
||||||
- string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
|
||||||
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
|
|
||||||
endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
|
||||||
|
|
||||||
# BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to make
|
|
||||||
12
external/soci/conandata.yml
vendored
12
external/soci/conandata.yml
vendored
@@ -1,12 +0,0 @@
|
|||||||
sources:
|
|
||||||
"4.0.3":
|
|
||||||
url: "https://github.com/SOCI/soci/archive/v4.0.3.tar.gz"
|
|
||||||
sha256: "4b1ff9c8545c5d802fbe06ee6cd2886630e5c03bf740e269bb625b45cf934928"
|
|
||||||
patches:
|
|
||||||
"4.0.3":
|
|
||||||
- patch_file: "patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch"
|
|
||||||
patch_description: "Generate relocatable libraries on MacOS"
|
|
||||||
patch_type: "portability"
|
|
||||||
- patch_file: "patches/0002-Fix-soci_backend.patch"
|
|
||||||
patch_description: "Fix variable names for dependencies"
|
|
||||||
patch_type: "conan"
|
|
||||||
212
external/soci/conanfile.py
vendored
212
external/soci/conanfile.py
vendored
@@ -1,212 +0,0 @@
|
|||||||
from conan import ConanFile
|
|
||||||
from conan.tools.build import check_min_cppstd
|
|
||||||
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
|
|
||||||
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
|
|
||||||
from conan.tools.microsoft import is_msvc
|
|
||||||
from conan.tools.scm import Version
|
|
||||||
from conan.errors import ConanInvalidConfiguration
|
|
||||||
import os
|
|
||||||
|
|
||||||
required_conan_version = ">=1.55.0"
|
|
||||||
|
|
||||||
|
|
||||||
class SociConan(ConanFile):
|
|
||||||
name = "soci"
|
|
||||||
homepage = "https://github.com/SOCI/soci"
|
|
||||||
url = "https://github.com/conan-io/conan-center-index"
|
|
||||||
description = "The C++ Database Access Library "
|
|
||||||
topics = ("mysql", "odbc", "postgresql", "sqlite3")
|
|
||||||
license = "BSL-1.0"
|
|
||||||
|
|
||||||
settings = "os", "arch", "compiler", "build_type"
|
|
||||||
options = {
|
|
||||||
"shared": [True, False],
|
|
||||||
"fPIC": [True, False],
|
|
||||||
"empty": [True, False],
|
|
||||||
"with_sqlite3": [True, False],
|
|
||||||
"with_db2": [True, False],
|
|
||||||
"with_odbc": [True, False],
|
|
||||||
"with_oracle": [True, False],
|
|
||||||
"with_firebird": [True, False],
|
|
||||||
"with_mysql": [True, False],
|
|
||||||
"with_postgresql": [True, False],
|
|
||||||
"with_boost": [True, False],
|
|
||||||
}
|
|
||||||
default_options = {
|
|
||||||
"shared": False,
|
|
||||||
"fPIC": True,
|
|
||||||
"empty": False,
|
|
||||||
"with_sqlite3": False,
|
|
||||||
"with_db2": False,
|
|
||||||
"with_odbc": False,
|
|
||||||
"with_oracle": False,
|
|
||||||
"with_firebird": False,
|
|
||||||
"with_mysql": False,
|
|
||||||
"with_postgresql": False,
|
|
||||||
"with_boost": False,
|
|
||||||
}
|
|
||||||
|
|
||||||
def export_sources(self):
|
|
||||||
export_conandata_patches(self)
|
|
||||||
|
|
||||||
def layout(self):
|
|
||||||
cmake_layout(self, src_folder="src")
|
|
||||||
|
|
||||||
def config_options(self):
|
|
||||||
if self.settings.os == "Windows":
|
|
||||||
self.options.rm_safe("fPIC")
|
|
||||||
|
|
||||||
def configure(self):
|
|
||||||
if self.options.shared:
|
|
||||||
self.options.rm_safe("fPIC")
|
|
||||||
|
|
||||||
def requirements(self):
|
|
||||||
if self.options.with_sqlite3:
|
|
||||||
self.requires("sqlite3/3.47.0")
|
|
||||||
if self.options.with_odbc and self.settings.os != "Windows":
|
|
||||||
self.requires("odbc/2.3.11")
|
|
||||||
if self.options.with_mysql:
|
|
||||||
self.requires("libmysqlclient/8.1.0")
|
|
||||||
if self.options.with_postgresql:
|
|
||||||
self.requires("libpq/15.5")
|
|
||||||
if self.options.with_boost:
|
|
||||||
self.requires("boost/1.83.0")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _minimum_compilers_version(self):
|
|
||||||
return {
|
|
||||||
"Visual Studio": "14",
|
|
||||||
"gcc": "4.8",
|
|
||||||
"clang": "3.8",
|
|
||||||
"apple-clang": "8.0"
|
|
||||||
}
|
|
||||||
|
|
||||||
def validate(self):
|
|
||||||
if self.settings.compiler.get_safe("cppstd"):
|
|
||||||
check_min_cppstd(self, 11)
|
|
||||||
|
|
||||||
compiler = str(self.settings.compiler)
|
|
||||||
compiler_version = Version(self.settings.compiler.version.value)
|
|
||||||
if compiler not in self._minimum_compilers_version:
|
|
||||||
self.output.warning("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler))
|
|
||||||
elif compiler_version < self._minimum_compilers_version[compiler]:
|
|
||||||
raise ConanInvalidConfiguration("{} requires a {} version >= {}".format(self.name, compiler, compiler_version))
|
|
||||||
|
|
||||||
prefix = "Dependencies for"
|
|
||||||
message = "not configured in this conan package."
|
|
||||||
if self.options.with_db2:
|
|
||||||
# self.requires("db2/0.0.0") # TODO add support for db2
|
|
||||||
raise ConanInvalidConfiguration("{} DB2 {} ".format(prefix, message))
|
|
||||||
if self.options.with_oracle:
|
|
||||||
# self.requires("oracle_db/0.0.0") # TODO add support for oracle
|
|
||||||
raise ConanInvalidConfiguration("{} ORACLE {} ".format(prefix, message))
|
|
||||||
if self.options.with_firebird:
|
|
||||||
# self.requires("firebird/0.0.0") # TODO add support for firebird
|
|
||||||
raise ConanInvalidConfiguration("{} firebird {} ".format(prefix, message))
|
|
||||||
|
|
||||||
def source(self):
|
|
||||||
get(self, **self.conan_data["sources"][self.version], strip_root=True)
|
|
||||||
|
|
||||||
def generate(self):
|
|
||||||
tc = CMakeToolchain(self)
|
|
||||||
|
|
||||||
tc.variables["SOCI_SHARED"] = self.options.shared
|
|
||||||
tc.variables["SOCI_STATIC"] = not self.options.shared
|
|
||||||
tc.variables["SOCI_TESTS"] = False
|
|
||||||
tc.variables["SOCI_CXX11"] = True
|
|
||||||
tc.variables["SOCI_EMPTY"] = self.options.empty
|
|
||||||
tc.variables["WITH_SQLITE3"] = self.options.with_sqlite3
|
|
||||||
tc.variables["WITH_DB2"] = self.options.with_db2
|
|
||||||
tc.variables["WITH_ODBC"] = self.options.with_odbc
|
|
||||||
tc.variables["WITH_ORACLE"] = self.options.with_oracle
|
|
||||||
tc.variables["WITH_FIREBIRD"] = self.options.with_firebird
|
|
||||||
tc.variables["WITH_MYSQL"] = self.options.with_mysql
|
|
||||||
tc.variables["WITH_POSTGRESQL"] = self.options.with_postgresql
|
|
||||||
tc.variables["WITH_BOOST"] = self.options.with_boost
|
|
||||||
tc.generate()
|
|
||||||
|
|
||||||
deps = CMakeDeps(self)
|
|
||||||
deps.generate()
|
|
||||||
|
|
||||||
def build(self):
|
|
||||||
apply_conandata_patches(self)
|
|
||||||
cmake = CMake(self)
|
|
||||||
cmake.configure()
|
|
||||||
cmake.build()
|
|
||||||
|
|
||||||
def package(self):
|
|
||||||
copy(self, "LICENSE_1_0.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
|
|
||||||
|
|
||||||
cmake = CMake(self)
|
|
||||||
cmake.install()
|
|
||||||
|
|
||||||
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
|
|
||||||
|
|
||||||
def package_info(self):
|
|
||||||
self.cpp_info.set_property("cmake_file_name", "SOCI")
|
|
||||||
|
|
||||||
target_suffix = "" if self.options.shared else "_static"
|
|
||||||
lib_prefix = "lib" if is_msvc(self) and not self.options.shared else ""
|
|
||||||
version = Version(self.version)
|
|
||||||
lib_suffix = "_{}_{}".format(version.major, version.minor) if self.settings.os == "Windows" else ""
|
|
||||||
|
|
||||||
# soci_core
|
|
||||||
self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix))
|
|
||||||
self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)]
|
|
||||||
if self.options.with_boost:
|
|
||||||
self.cpp_info.components["soci_core"].requires.append("boost::boost")
|
|
||||||
|
|
||||||
# soci_empty
|
|
||||||
if self.options.empty:
|
|
||||||
self.cpp_info.components["soci_empty"].set_property("cmake_target_name", "SOCI::soci_empty{}".format(target_suffix))
|
|
||||||
self.cpp_info.components["soci_empty"].libs = ["{}soci_empty{}".format(lib_prefix, lib_suffix)]
|
|
||||||
self.cpp_info.components["soci_empty"].requires = ["soci_core"]
|
|
||||||
|
|
||||||
# soci_sqlite3
|
|
||||||
if self.options.with_sqlite3:
|
|
||||||
self.cpp_info.components["soci_sqlite3"].set_property("cmake_target_name", "SOCI::soci_sqlite3{}".format(target_suffix))
|
|
||||||
self.cpp_info.components["soci_sqlite3"].libs = ["{}soci_sqlite3{}".format(lib_prefix, lib_suffix)]
|
|
||||||
self.cpp_info.components["soci_sqlite3"].requires = ["soci_core", "sqlite3::sqlite3"]
|
|
||||||
|
|
||||||
# soci_odbc
|
|
||||||
if self.options.with_odbc:
|
|
||||||
self.cpp_info.components["soci_odbc"].set_property("cmake_target_name", "SOCI::soci_odbc{}".format(target_suffix))
|
|
||||||
self.cpp_info.components["soci_odbc"].libs = ["{}soci_odbc{}".format(lib_prefix, lib_suffix)]
|
|
||||||
self.cpp_info.components["soci_odbc"].requires = ["soci_core"]
|
|
||||||
if self.settings.os == "Windows":
|
|
||||||
self.cpp_info.components["soci_odbc"].system_libs.append("odbc32")
|
|
||||||
else:
|
|
||||||
self.cpp_info.components["soci_odbc"].requires.append("odbc::odbc")
|
|
||||||
|
|
||||||
# soci_mysql
|
|
||||||
if self.options.with_mysql:
|
|
||||||
self.cpp_info.components["soci_mysql"].set_property("cmake_target_name", "SOCI::soci_mysql{}".format(target_suffix))
|
|
||||||
self.cpp_info.components["soci_mysql"].libs = ["{}soci_mysql{}".format(lib_prefix, lib_suffix)]
|
|
||||||
self.cpp_info.components["soci_mysql"].requires = ["soci_core", "libmysqlclient::libmysqlclient"]
|
|
||||||
|
|
||||||
# soci_postgresql
|
|
||||||
if self.options.with_postgresql:
|
|
||||||
self.cpp_info.components["soci_postgresql"].set_property("cmake_target_name", "SOCI::soci_postgresql{}".format(target_suffix))
|
|
||||||
self.cpp_info.components["soci_postgresql"].libs = ["{}soci_postgresql{}".format(lib_prefix, lib_suffix)]
|
|
||||||
self.cpp_info.components["soci_postgresql"].requires = ["soci_core", "libpq::libpq"]
|
|
||||||
|
|
||||||
# TODO: to remove in conan v2 once cmake_find_package* generators removed
|
|
||||||
self.cpp_info.names["cmake_find_package"] = "SOCI"
|
|
||||||
self.cpp_info.names["cmake_find_package_multi"] = "SOCI"
|
|
||||||
self.cpp_info.components["soci_core"].names["cmake_find_package"] = "soci_core{}".format(target_suffix)
|
|
||||||
self.cpp_info.components["soci_core"].names["cmake_find_package_multi"] = "soci_core{}".format(target_suffix)
|
|
||||||
if self.options.empty:
|
|
||||||
self.cpp_info.components["soci_empty"].names["cmake_find_package"] = "soci_empty{}".format(target_suffix)
|
|
||||||
self.cpp_info.components["soci_empty"].names["cmake_find_package_multi"] = "soci_empty{}".format(target_suffix)
|
|
||||||
if self.options.with_sqlite3:
|
|
||||||
self.cpp_info.components["soci_sqlite3"].names["cmake_find_package"] = "soci_sqlite3{}".format(target_suffix)
|
|
||||||
self.cpp_info.components["soci_sqlite3"].names["cmake_find_package_multi"] = "soci_sqlite3{}".format(target_suffix)
|
|
||||||
if self.options.with_odbc:
|
|
||||||
self.cpp_info.components["soci_odbc"].names["cmake_find_package"] = "soci_odbc{}".format(target_suffix)
|
|
||||||
self.cpp_info.components["soci_odbc"].names["cmake_find_package_multi"] = "soci_odbc{}".format(target_suffix)
|
|
||||||
if self.options.with_mysql:
|
|
||||||
self.cpp_info.components["soci_mysql"].names["cmake_find_package"] = "soci_mysql{}".format(target_suffix)
|
|
||||||
self.cpp_info.components["soci_mysql"].names["cmake_find_package_multi"] = "soci_mysql{}".format(target_suffix)
|
|
||||||
if self.options.with_postgresql:
|
|
||||||
self.cpp_info.components["soci_postgresql"].names["cmake_find_package"] = "soci_postgresql{}".format(target_suffix)
|
|
||||||
self.cpp_info.components["soci_postgresql"].names["cmake_find_package_multi"] = "soci_postgresql{}".format(target_suffix)
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
From d491bf7b5040d314ffd0c6310ba01f78ff44c85e Mon Sep 17 00:00:00 2001
|
|
||||||
From: Rasmus Thomsen <rasmus.thomsen@dampsoft.de>
|
|
||||||
Date: Fri, 14 Apr 2023 09:16:29 +0200
|
|
||||||
Subject: [PATCH] Remove hardcoded INSTALL_NAME_DIR for relocatable libraries
|
|
||||||
on MacOS
|
|
||||||
|
|
||||||
---
|
|
||||||
cmake/SociBackend.cmake | 2 +-
|
|
||||||
src/core/CMakeLists.txt | 1 -
|
|
||||||
2 files changed, 1 insertion(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake
|
|
||||||
index 5d4ef0df..39fe1f77 100644
|
|
||||||
--- a/cmake/SociBackend.cmake
|
|
||||||
+++ b/cmake/SociBackend.cmake
|
|
||||||
@@ -171,7 +171,7 @@ macro(soci_backend NAME)
|
|
||||||
set_target_properties(${THIS_BACKEND_TARGET}
|
|
||||||
PROPERTIES
|
|
||||||
SOVERSION ${${PROJECT_NAME}_SOVERSION}
|
|
||||||
- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib)
|
|
||||||
+ )
|
|
||||||
|
|
||||||
if(APPLE)
|
|
||||||
set_target_properties(${THIS_BACKEND_TARGET}
|
|
||||||
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
|
|
||||||
index 3e7deeae..f9eae564 100644
|
|
||||||
--- a/src/core/CMakeLists.txt
|
|
||||||
+++ b/src/core/CMakeLists.txt
|
|
||||||
@@ -59,7 +59,6 @@ if (SOCI_SHARED)
|
|
||||||
PROPERTIES
|
|
||||||
VERSION ${SOCI_VERSION}
|
|
||||||
SOVERSION ${SOCI_SOVERSION}
|
|
||||||
- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib
|
|
||||||
CLEAN_DIRECT_OUTPUT 1)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
--
|
|
||||||
2.25.1
|
|
||||||
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake
|
|
||||||
index 0a664667..3fa2ed95 100644
|
|
||||||
--- a/cmake/SociBackend.cmake
|
|
||||||
+++ b/cmake/SociBackend.cmake
|
|
||||||
@@ -31,14 +31,13 @@ macro(soci_backend_deps_found NAME DEPS SUCCESS)
|
|
||||||
if(NOT DEPEND_FOUND)
|
|
||||||
list(APPEND DEPS_NOT_FOUND ${dep})
|
|
||||||
else()
|
|
||||||
- string(TOUPPER "${dep}" DEPU)
|
|
||||||
- if( ${DEPU}_INCLUDE_DIR )
|
|
||||||
- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIR})
|
|
||||||
+ if( ${dep}_INCLUDE_DIR )
|
|
||||||
+ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIR})
|
|
||||||
endif()
|
|
||||||
- if( ${DEPU}_INCLUDE_DIRS )
|
|
||||||
- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIRS})
|
|
||||||
+ if( ${dep}_INCLUDE_DIRS )
|
|
||||||
+ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIRS})
|
|
||||||
endif()
|
|
||||||
- list(APPEND DEPS_LIBRARIES ${${DEPU}_LIBRARIES})
|
|
||||||
+ list(APPEND DEPS_LIBRARIES ${${dep}_LIBRARIES})
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
@@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,7 @@
|
|||||||
#include <boost/beast/core/string.hpp>
|
#include <boost/beast/core/string.hpp>
|
||||||
#include <boost/filesystem.hpp>
|
#include <boost/filesystem.hpp>
|
||||||
|
|
||||||
|
#include <fstream>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
@@ -186,7 +187,10 @@ public:
|
|||||||
operator[](std::string const& name);
|
operator[](std::string const& name);
|
||||||
|
|
||||||
beast::Journal
|
beast::Journal
|
||||||
journal(std::string const& name);
|
journal(
|
||||||
|
std::string const& name,
|
||||||
|
std::optional<beast::Journal::JsonLogAttributes> attributes =
|
||||||
|
std::nullopt);
|
||||||
|
|
||||||
beast::severities::Severity
|
beast::severities::Severity
|
||||||
threshold() const;
|
threshold() const;
|
||||||
@@ -236,19 +240,19 @@ public:
|
|||||||
static LogSeverity
|
static LogSeverity
|
||||||
fromString(std::string const& s);
|
fromString(std::string const& s);
|
||||||
|
|
||||||
private:
|
|
||||||
enum {
|
|
||||||
// Maximum line length for log messages.
|
|
||||||
// If the message exceeds this length it will be truncated with elipses.
|
|
||||||
maximumMessageCharacters = 12 * 1024
|
|
||||||
};
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
format(
|
format(
|
||||||
std::string& output,
|
std::string& output,
|
||||||
std::string const& message,
|
std::string const& message,
|
||||||
beast::severities::Severity severity,
|
beast::severities::Severity severity,
|
||||||
std::string const& partition);
|
std::string const& partition);
|
||||||
|
|
||||||
|
private:
|
||||||
|
enum {
|
||||||
|
// Maximum line length for log messages.
|
||||||
|
// If the message exceeds this length it will be truncated with elipses.
|
||||||
|
maximumMessageCharacters = 12 * 1024
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
// Wraps a Journal::Stream to skip evaluation of
|
// Wraps a Journal::Stream to skip evaluation of
|
||||||
|
|||||||
@@ -4,37 +4,34 @@ Utility functions and classes.
|
|||||||
|
|
||||||
ripple/basic should contain no dependencies on other modules.
|
ripple/basic should contain no dependencies on other modules.
|
||||||
|
|
||||||
|
# Choosing a rippled container.
|
||||||
|
|
||||||
Choosing a rippled container.
|
- `std::vector`
|
||||||
=============================
|
- For ordered containers with most insertions or erases at the end.
|
||||||
|
|
||||||
* `std::vector`
|
- `std::deque`
|
||||||
* For ordered containers with most insertions or erases at the end.
|
- For ordered containers with most insertions or erases at the start or end.
|
||||||
|
|
||||||
* `std::deque`
|
- `std::list`
|
||||||
* For ordered containers with most insertions or erases at the start or end.
|
- For ordered containers with inserts and erases to the middle.
|
||||||
|
- For containers with iterators stable over insert and erase.
|
||||||
* `std::list`
|
- Generally slower and bigger than `std::vector` or `std::deque` except for
|
||||||
* For ordered containers with inserts and erases to the middle.
|
|
||||||
* For containers with iterators stable over insert and erase.
|
|
||||||
* Generally slower and bigger than `std::vector` or `std::deque` except for
|
|
||||||
those cases.
|
those cases.
|
||||||
|
|
||||||
* `std::set`
|
- `std::set`
|
||||||
* For sorted containers.
|
- For sorted containers.
|
||||||
|
|
||||||
* `ripple::hash_set`
|
- `ripple::hash_set`
|
||||||
* Where inserts and contains need to be O(1).
|
- Where inserts and contains need to be O(1).
|
||||||
* For "small" sets, `std::set` might be faster and smaller.
|
- For "small" sets, `std::set` might be faster and smaller.
|
||||||
|
|
||||||
* `ripple::hardened_hash_set`
|
- `ripple::hardened_hash_set`
|
||||||
* For data sets where the key could be manipulated by an attacker
|
- For data sets where the key could be manipulated by an attacker
|
||||||
in an attempt to mount an algorithmic complexity attack: see
|
in an attempt to mount an algorithmic complexity attack: see
|
||||||
http://en.wikipedia.org/wiki/Algorithmic_complexity_attack
|
http://en.wikipedia.org/wiki/Algorithmic_complexity_attack
|
||||||
|
|
||||||
|
|
||||||
The following container is deprecated
|
The following container is deprecated
|
||||||
|
|
||||||
* `std::unordered_set`
|
- `std::unordered_set`
|
||||||
* Use `ripple::hash_set` instead, which uses a better hashing algorithm.
|
- Use `ripple::hash_set` instead, which uses a better hashing algorithm.
|
||||||
* Or use `ripple::hardened_hash_set` to prevent algorithmic complexity attacks.
|
- Or use `ripple::hardened_hash_set` to prevent algorithmic complexity attacks.
|
||||||
|
|||||||
@@ -21,7 +21,6 @@
|
|||||||
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
|
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
|
||||||
|
|
||||||
#include <xrpl/basics/base_uint.h>
|
#include <xrpl/basics/base_uint.h>
|
||||||
#include <xrpl/basics/partitioned_unordered_map.h>
|
|
||||||
|
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,6 @@
|
|||||||
#include <array>
|
#include <array>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <sstream>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user