mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-10 22:25:52 +00:00
Compare commits
4 Commits
Bronek/add
...
2.6.1-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c46888f8f7 | ||
|
|
2ae65d2fdb | ||
|
|
8d01f35eb9 | ||
|
|
1020a32d76 |
@@ -1,20 +1,4 @@
|
|||||||
---
|
---
|
||||||
BreakBeforeBraces: Custom
|
|
||||||
BraceWrapping:
|
|
||||||
AfterClass: true
|
|
||||||
AfterControlStatement: true
|
|
||||||
AfterEnum: false
|
|
||||||
AfterFunction: true
|
|
||||||
AfterNamespace: false
|
|
||||||
AfterObjCDeclaration: true
|
|
||||||
AfterStruct: true
|
|
||||||
AfterUnion: true
|
|
||||||
BeforeCatch: true
|
|
||||||
BeforeElse: true
|
|
||||||
IndentBraces: false
|
|
||||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
|
||||||
MaxEmptyLinesToKeep: 1
|
|
||||||
---
|
|
||||||
Language: Cpp
|
Language: Cpp
|
||||||
AccessModifierOffset: -4
|
AccessModifierOffset: -4
|
||||||
AlignAfterOpenBracket: AlwaysBreak
|
AlignAfterOpenBracket: AlwaysBreak
|
||||||
@@ -34,7 +18,20 @@ AlwaysBreakBeforeMultilineStrings: true
|
|||||||
AlwaysBreakTemplateDeclarations: true
|
AlwaysBreakTemplateDeclarations: true
|
||||||
BinPackArguments: false
|
BinPackArguments: false
|
||||||
BinPackParameters: false
|
BinPackParameters: false
|
||||||
|
BraceWrapping:
|
||||||
|
AfterClass: true
|
||||||
|
AfterControlStatement: true
|
||||||
|
AfterEnum: false
|
||||||
|
AfterFunction: true
|
||||||
|
AfterNamespace: false
|
||||||
|
AfterObjCDeclaration: true
|
||||||
|
AfterStruct: true
|
||||||
|
AfterUnion: true
|
||||||
|
BeforeCatch: true
|
||||||
|
BeforeElse: true
|
||||||
|
IndentBraces: false
|
||||||
BreakBeforeBinaryOperators: false
|
BreakBeforeBinaryOperators: false
|
||||||
|
BreakBeforeBraces: Custom
|
||||||
BreakBeforeTernaryOperators: true
|
BreakBeforeTernaryOperators: true
|
||||||
BreakConstructorInitializersBeforeComma: true
|
BreakConstructorInitializersBeforeComma: true
|
||||||
ColumnLimit: 80
|
ColumnLimit: 80
|
||||||
@@ -69,6 +66,8 @@ IndentFunctionDeclarationAfterType: false
|
|||||||
IndentRequiresClause: true
|
IndentRequiresClause: true
|
||||||
IndentWidth: 4
|
IndentWidth: 4
|
||||||
IndentWrappedFunctionNames: false
|
IndentWrappedFunctionNames: false
|
||||||
|
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||||
|
MaxEmptyLinesToKeep: 1
|
||||||
NamespaceIndentation: None
|
NamespaceIndentation: None
|
||||||
ObjCSpaceAfterProperty: false
|
ObjCSpaceAfterProperty: false
|
||||||
ObjCSpaceBeforeProtocolList: false
|
ObjCSpaceBeforeProtocolList: false
|
||||||
@@ -97,7 +96,7 @@ TabWidth: 8
|
|||||||
UseTab: Never
|
UseTab: Never
|
||||||
QualifierAlignment: Right
|
QualifierAlignment: Right
|
||||||
---
|
---
|
||||||
Language: Proto
|
Language: JavaScript
|
||||||
BasedOnStyle: Google
|
---
|
||||||
ColumnLimit: 0
|
Language: Json
|
||||||
IndentWidth: 2
|
IndentWidth: 2
|
||||||
|
|||||||
@@ -12,5 +12,3 @@ fe9a5365b8a52d4acc42eb27369247e6f238a4f9
|
|||||||
9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8
|
9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8
|
||||||
552377c76f55b403a1c876df873a23d780fcc81c
|
552377c76f55b403a1c876df873a23d780fcc81c
|
||||||
97f0747e103f13e26e45b731731059b32f7679ac
|
97f0747e103f13e26e45b731731059b32f7679ac
|
||||||
b13370ac0d207217354f1fc1c29aef87769fb8a1
|
|
||||||
896b8c3b54a22b0497cb0d1ce95e1095f9a227ce
|
|
||||||
|
|||||||
62
.github/actions/build-deps/action.yml
vendored
62
.github/actions/build-deps/action.yml
vendored
@@ -1,62 +0,0 @@
|
|||||||
# This action installs and optionally uploads Conan dependencies to a remote
|
|
||||||
# repository. The dependencies will only be uploaded if the credentials are
|
|
||||||
# provided.
|
|
||||||
name: Build Conan dependencies
|
|
||||||
|
|
||||||
# Note that actions do not support 'type' and all inputs are strings, see
|
|
||||||
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
|
|
||||||
inputs:
|
|
||||||
build_dir:
|
|
||||||
description: "The directory where to build."
|
|
||||||
required: true
|
|
||||||
build_type:
|
|
||||||
description: 'The build type to use ("Debug", "Release").'
|
|
||||||
required: true
|
|
||||||
conan_remote_name:
|
|
||||||
description: "The name of the Conan remote to use."
|
|
||||||
required: true
|
|
||||||
conan_remote_url:
|
|
||||||
description: "The URL of the Conan endpoint to use."
|
|
||||||
required: true
|
|
||||||
conan_remote_username:
|
|
||||||
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
conan_remote_password:
|
|
||||||
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
force_build:
|
|
||||||
description: 'Force building of all dependencies ("true", "false").'
|
|
||||||
required: false
|
|
||||||
default: "false"
|
|
||||||
force_upload:
|
|
||||||
description: 'Force uploading of all dependencies ("true", "false").'
|
|
||||||
required: false
|
|
||||||
default: "false"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install Conan dependencies
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo 'Installing dependencies.'
|
|
||||||
mkdir -p ${{ inputs.build_dir }}
|
|
||||||
cd ${{ inputs.build_dir }}
|
|
||||||
conan install \
|
|
||||||
--output-folder . \
|
|
||||||
--build ${{ inputs.force_build == 'true' && '"*"' || 'missing' }} \
|
|
||||||
--options:host '&:tests=True' \
|
|
||||||
--options:host '&:xrpld=True' \
|
|
||||||
--settings:all build_type=${{ inputs.build_type }} \
|
|
||||||
--format=json ..
|
|
||||||
- name: Upload Conan dependencies
|
|
||||||
if: ${{ inputs.conan_remote_username != '' && inputs.conan_remote_password != '' }}
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{ inputs.build_dir }}
|
|
||||||
run: |
|
|
||||||
echo "Logging into Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
|
|
||||||
conan remote login ${{ inputs.conan_remote_name }} "${{ inputs.conan_remote_username }}" --password "${{ inputs.conan_remote_password }}"
|
|
||||||
echo 'Uploading dependencies.'
|
|
||||||
conan upload '*' --confirm --check ${{ inputs.force_upload == 'true' && '--force' || '' }} --remote=${{ inputs.conan_remote_name }}
|
|
||||||
95
.github/actions/build-test/action.yml
vendored
95
.github/actions/build-test/action.yml
vendored
@@ -1,95 +0,0 @@
|
|||||||
# This action build and tests the binary. The Conan dependencies must have
|
|
||||||
# already been installed (see the build-deps action).
|
|
||||||
name: Build and Test
|
|
||||||
|
|
||||||
# Note that actions do not support 'type' and all inputs are strings, see
|
|
||||||
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
|
|
||||||
inputs:
|
|
||||||
build_dir:
|
|
||||||
description: "The directory where to build."
|
|
||||||
required: true
|
|
||||||
build_only:
|
|
||||||
description: 'Whether to only build or to build and test the code ("true", "false").'
|
|
||||||
required: false
|
|
||||||
default: "false"
|
|
||||||
build_type:
|
|
||||||
description: 'The build type to use ("Debug", "Release").'
|
|
||||||
required: true
|
|
||||||
cmake_args:
|
|
||||||
description: "Additional arguments to pass to CMake."
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
cmake_target:
|
|
||||||
description: "The CMake target to build."
|
|
||||||
required: true
|
|
||||||
codecov_token:
|
|
||||||
description: "The Codecov token to use for uploading coverage reports."
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
os:
|
|
||||||
description: 'The operating system to use for the build ("linux", "macos", "windows").'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Configure CMake
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{ inputs.build_dir }}
|
|
||||||
run: |
|
|
||||||
echo 'Configuring CMake.'
|
|
||||||
cmake \
|
|
||||||
-G '${{ inputs.os == 'windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
|
|
||||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
|
||||||
-DCMAKE_BUILD_TYPE=${{ inputs.build_type }} \
|
|
||||||
${{ inputs.cmake_args }} \
|
|
||||||
..
|
|
||||||
- name: Build the binary
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{ inputs.build_dir }}
|
|
||||||
run: |
|
|
||||||
echo 'Building binary.'
|
|
||||||
cmake \
|
|
||||||
--build . \
|
|
||||||
--config ${{ inputs.build_type }} \
|
|
||||||
--parallel $(nproc) \
|
|
||||||
--target ${{ inputs.cmake_target }}
|
|
||||||
- name: Check linking
|
|
||||||
if: ${{ inputs.os == 'linux' }}
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{ inputs.build_dir }}
|
|
||||||
run: |
|
|
||||||
echo 'Checking linking.'
|
|
||||||
ldd ./rippled
|
|
||||||
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
|
|
||||||
echo 'The binary is statically linked.'
|
|
||||||
else
|
|
||||||
echo 'The binary is dynamically linked.'
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
- name: Verify voidstar
|
|
||||||
if: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{ inputs.build_dir }}
|
|
||||||
run: |
|
|
||||||
echo 'Verifying presence of instrumentation.'
|
|
||||||
./rippled --version | grep libvoidstar
|
|
||||||
- name: Test the binary
|
|
||||||
if: ${{ inputs.build_only == 'false' }}
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{ inputs.build_dir }}/${{ inputs.os == 'windows' && inputs.build_type || '' }}
|
|
||||||
run: |
|
|
||||||
echo 'Testing binary.'
|
|
||||||
./rippled --unittest --unittest-jobs $(nproc)
|
|
||||||
ctest -j $(nproc) --output-on-failure
|
|
||||||
- name: Upload coverage report
|
|
||||||
if: ${{ inputs.cmake_target == 'coverage' }}
|
|
||||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
|
||||||
with:
|
|
||||||
disable_search: true
|
|
||||||
disable_telem: true
|
|
||||||
fail_ci_if_error: true
|
|
||||||
files: ${{ inputs.build_dir }}/coverage.xml
|
|
||||||
plugins: noop
|
|
||||||
token: ${{ inputs.codecov_token }}
|
|
||||||
verbose: true
|
|
||||||
34
.github/actions/build/action.yml
vendored
Normal file
34
.github/actions/build/action.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
name: build
|
||||||
|
inputs:
|
||||||
|
generator:
|
||||||
|
default: null
|
||||||
|
configuration:
|
||||||
|
required: true
|
||||||
|
cmake-args:
|
||||||
|
default: null
|
||||||
|
cmake-target:
|
||||||
|
default: all
|
||||||
|
# An implicit input is the environment variable `build_dir`.
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: configure
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd ${build_dir}
|
||||||
|
cmake \
|
||||||
|
${{ inputs.generator && format('-G "{0}"', inputs.generator) || '' }} \
|
||||||
|
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \
|
||||||
|
-Dtests=TRUE \
|
||||||
|
-Dxrpld=TRUE \
|
||||||
|
${{ inputs.cmake-args }} \
|
||||||
|
..
|
||||||
|
- name: build
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cmake \
|
||||||
|
--build ${build_dir} \
|
||||||
|
--config ${{ inputs.configuration }} \
|
||||||
|
--parallel ${NUM_PROCESSORS:-$(nproc)} \
|
||||||
|
--target ${{ inputs.cmake-target }}
|
||||||
38
.github/actions/dependencies/action.yml
vendored
Normal file
38
.github/actions/dependencies/action.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: dependencies
|
||||||
|
inputs:
|
||||||
|
configuration:
|
||||||
|
required: true
|
||||||
|
# Implicit inputs are the environment variables `build_dir`, CONAN_REMOTE_URL,
|
||||||
|
# CONAN_REMOTE_USERNAME, and CONAN_REMOTE_PASSWORD. The latter two are only
|
||||||
|
# used to upload newly built dependencies to the Conan remote.
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: add Conan remote
|
||||||
|
if: ${{ env.CONAN_REMOTE_URL != '' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
|
||||||
|
conan remote add --index 0 --force xrplf ${{ env.CONAN_REMOTE_URL }}
|
||||||
|
echo "Listing Conan remotes."
|
||||||
|
conan remote list
|
||||||
|
- name: install dependencies
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mkdir -p ${{ env.build_dir }}
|
||||||
|
cd ${{ env.build_dir }}
|
||||||
|
conan install \
|
||||||
|
--output-folder . \
|
||||||
|
--build missing \
|
||||||
|
--options:host "&:tests=True" \
|
||||||
|
--options:host "&:xrpld=True" \
|
||||||
|
--settings:all build_type=${{ inputs.configuration }} \
|
||||||
|
..
|
||||||
|
- name: upload dependencies
|
||||||
|
if: ${{ env.CONAN_REMOTE_URL != '' && env.CONAN_REMOTE_USERNAME != '' && env.CONAN_REMOTE_PASSWORD != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Logging into Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
|
||||||
|
conan remote login xrplf "${{ env.CONAN_REMOTE_USERNAME }}" --password "${{ env.CONAN_REMOTE_PASSWORD }}"
|
||||||
|
echo "Uploading dependencies."
|
||||||
|
conan upload '*' --confirm --check --remote xrplf
|
||||||
178
.github/scripts/strategy-matrix/generate.py
vendored
178
.github/scripts/strategy-matrix/generate.py
vendored
@@ -1,178 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
import argparse
|
|
||||||
import itertools
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
|
|
||||||
'''
|
|
||||||
Generate a strategy matrix for GitHub Actions CI.
|
|
||||||
|
|
||||||
On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and
|
|
||||||
Windows configurations, while upon merge into the develop, release, or master
|
|
||||||
branches, we will build all configurations, and test most of them.
|
|
||||||
|
|
||||||
We will further set additional CMake arguments as follows:
|
|
||||||
- All builds will have the `tests`, `werr`, and `xrpld` options.
|
|
||||||
- All builds will have the `wextra` option except for GCC 12 and Clang 16.
|
|
||||||
- All release builds will have the `assert` option.
|
|
||||||
- Certain Debian Bookworm configurations will change the reference fee, enable
|
|
||||||
codecov, and enable voidstar in PRs.
|
|
||||||
'''
|
|
||||||
def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict], build_type: list[str], cmake_args: list[str]) -> dict:
|
|
||||||
configurations = []
|
|
||||||
for architecture, os, build_type, cmake_args in itertools.product(architecture, os, build_type, cmake_args):
|
|
||||||
# The default CMake target is 'all' for Linux and MacOS and 'install'
|
|
||||||
# for Windows, but it can get overridden for certain configurations.
|
|
||||||
cmake_target = 'install' if os["distro_name"] == 'windows' else 'all'
|
|
||||||
|
|
||||||
# We build and test all configurations by default, except for Windows in
|
|
||||||
# Debug, because it is too slow, as well as when code coverage is
|
|
||||||
# enabled as that mode already runs the tests.
|
|
||||||
build_only = False
|
|
||||||
if os['distro_name'] == 'windows' and build_type == 'Debug':
|
|
||||||
build_only = True
|
|
||||||
|
|
||||||
# Only generate a subset of configurations in PRs.
|
|
||||||
if not all:
|
|
||||||
# Debian:
|
|
||||||
# - Bookworm using GCC 13: Release and Unity on linux/arm64, set
|
|
||||||
# the reference fee to 500.
|
|
||||||
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
|
|
||||||
# code coverage (which will be done below).
|
|
||||||
# - Bookworm using Clang 16: Debug and no Unity on linux/arm64,
|
|
||||||
# enable voidstar.
|
|
||||||
# - Bookworm using Clang 17: Release and no Unity on linux/amd64,
|
|
||||||
# set the reference fee to 1000.
|
|
||||||
# - Bookworm using Clang 20: Debug and Unity on linux/amd64.
|
|
||||||
if os['distro_name'] == 'debian':
|
|
||||||
skip = True
|
|
||||||
if os['distro_version'] == 'bookworm':
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64':
|
|
||||||
cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}'
|
|
||||||
skip = False
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
skip = False
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-16' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64':
|
|
||||||
cmake_args = f'-Dvoidstar=ON {cmake_args}'
|
|
||||||
skip = False
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-17' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}'
|
|
||||||
skip = False
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
skip = False
|
|
||||||
if skip:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# RHEL:
|
|
||||||
# - 9.4 using GCC 12: Debug and Unity on linux/amd64.
|
|
||||||
# - 9.6 using Clang: Release and no Unity on linux/amd64.
|
|
||||||
if os['distro_name'] == 'rhel':
|
|
||||||
skip = True
|
|
||||||
if os['distro_version'] == '9.4':
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
skip = False
|
|
||||||
elif os['distro_version'] == '9.6':
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
skip = False
|
|
||||||
if skip:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Ubuntu:
|
|
||||||
# - Jammy using GCC 12: Debug and no Unity on linux/arm64.
|
|
||||||
# - Noble using GCC 14: Release and Unity on linux/amd64.
|
|
||||||
# - Noble using Clang 18: Debug and no Unity on linux/amd64.
|
|
||||||
# - Noble using Clang 19: Release and Unity on linux/arm64.
|
|
||||||
if os['distro_name'] == 'ubuntu':
|
|
||||||
skip = True
|
|
||||||
if os['distro_version'] == 'jammy':
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64':
|
|
||||||
skip = False
|
|
||||||
elif os['distro_version'] == 'noble':
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-14' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
skip = False
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-18' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
skip = False
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-19' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64':
|
|
||||||
skip = False
|
|
||||||
if skip:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# MacOS:
|
|
||||||
# - Debug and no Unity on macos/arm64.
|
|
||||||
if os['distro_name'] == 'macos' and not (build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'macos/arm64'):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Windows:
|
|
||||||
# - Release and Unity on windows/amd64.
|
|
||||||
if os['distro_name'] == 'windows' and not (build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'windows/amd64'):
|
|
||||||
continue
|
|
||||||
|
|
||||||
|
|
||||||
# Additional CMake arguments.
|
|
||||||
cmake_args = f'{cmake_args} -Dtests=ON -Dwerr=ON -Dxrpld=ON'
|
|
||||||
if not f'{os['compiler_name']}-{os['compiler_version']}' in ['gcc-12', 'clang-16']:
|
|
||||||
cmake_args = f'{cmake_args} -Dwextra=ON'
|
|
||||||
if build_type == 'Release':
|
|
||||||
cmake_args = f'{cmake_args} -Dassert=ON'
|
|
||||||
|
|
||||||
# We skip all RHEL on arm64 due to a build failure that needs further
|
|
||||||
# investigation.
|
|
||||||
if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64':
|
|
||||||
continue
|
|
||||||
|
|
||||||
# We skip all clang-20 on arm64 due to boost 1.86 build error
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64':
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
|
|
||||||
# Unity on linux/amd64
|
|
||||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
|
|
||||||
cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}'
|
|
||||||
cmake_target = 'coverage'
|
|
||||||
build_only = True
|
|
||||||
|
|
||||||
# Generate a unique name for the configuration, e.g. macos-arm64-debug
|
|
||||||
# or debian-bookworm-gcc-12-amd64-release-unity.
|
|
||||||
config_name = os['distro_name']
|
|
||||||
if (n := os['distro_version']) != '':
|
|
||||||
config_name += f'-{n}'
|
|
||||||
if (n := os['compiler_name']) != '':
|
|
||||||
config_name += f'-{n}'
|
|
||||||
if (n := os['compiler_version']) != '':
|
|
||||||
config_name += f'-{n}'
|
|
||||||
config_name += f'-{architecture['platform'][architecture['platform'].find('/')+1:]}'
|
|
||||||
config_name += f'-{build_type.lower()}'
|
|
||||||
if '-Dunity=ON' in cmake_args:
|
|
||||||
config_name += '-unity'
|
|
||||||
|
|
||||||
# Add the configuration to the list, with the most unique fields first,
|
|
||||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
|
||||||
# names get truncated.
|
|
||||||
configurations.append({
|
|
||||||
'config_name': config_name,
|
|
||||||
'cmake_args': cmake_args,
|
|
||||||
'cmake_target': cmake_target,
|
|
||||||
'build_only': 'true' if build_only else 'false',
|
|
||||||
'build_type': build_type,
|
|
||||||
'os': os,
|
|
||||||
'architecture': architecture,
|
|
||||||
})
|
|
||||||
|
|
||||||
return {'include': configurations}
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument('-a', '--all', help='Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).', action="store_true")
|
|
||||||
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=True, type=str)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# Load the JSON configuration file.
|
|
||||||
config = None
|
|
||||||
with open(args.config, 'r') as f:
|
|
||||||
config = json.load(f)
|
|
||||||
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
|
|
||||||
raise Exception('Invalid configuration file.')
|
|
||||||
|
|
||||||
# Generate the strategy matrix.
|
|
||||||
print(f'matrix={json.dumps(generate_strategy_matrix(args.all, config['architecture'], config['os'], config['build_type'], config['cmake_args']))}')
|
|
||||||
154
.github/scripts/strategy-matrix/linux.json
vendored
154
.github/scripts/strategy-matrix/linux.json
vendored
@@ -1,154 +0,0 @@
|
|||||||
{
|
|
||||||
"architecture": [
|
|
||||||
{
|
|
||||||
"platform": "linux/amd64",
|
|
||||||
"runner": ["self-hosted", "Linux", "X64", "heavy"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"platform": "linux/arm64",
|
|
||||||
"runner": ["self-hosted", "Linux", "ARM64", "heavy-arm64"]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"os": [
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "12"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "13"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "14"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "15"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "16"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "17"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "18"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "19"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "debian",
|
|
||||||
"distro_version": "bookworm",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "20"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "rhel",
|
|
||||||
"distro_version": "9.4",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "12"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "rhel",
|
|
||||||
"distro_version": "9.4",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "13"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "rhel",
|
|
||||||
"distro_version": "9.4",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "14"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "rhel",
|
|
||||||
"distro_version": "9.6",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "13"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "rhel",
|
|
||||||
"distro_version": "9.6",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "14"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "rhel",
|
|
||||||
"distro_version": "9.4",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "any"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "rhel",
|
|
||||||
"distro_version": "9.6",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "any"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "ubuntu",
|
|
||||||
"distro_version": "jammy",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "12"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "ubuntu",
|
|
||||||
"distro_version": "noble",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "13"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "ubuntu",
|
|
||||||
"distro_version": "noble",
|
|
||||||
"compiler_name": "gcc",
|
|
||||||
"compiler_version": "14"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "ubuntu",
|
|
||||||
"distro_version": "noble",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "16"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "ubuntu",
|
|
||||||
"distro_version": "noble",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "17"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "ubuntu",
|
|
||||||
"distro_version": "noble",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "18"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"distro_name": "ubuntu",
|
|
||||||
"distro_version": "noble",
|
|
||||||
"compiler_name": "clang",
|
|
||||||
"compiler_version": "19"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"build_type": ["Debug", "Release"],
|
|
||||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
|
||||||
}
|
|
||||||
21
.github/scripts/strategy-matrix/macos.json
vendored
21
.github/scripts/strategy-matrix/macos.json
vendored
@@ -1,21 +0,0 @@
|
|||||||
{
|
|
||||||
"architecture": [
|
|
||||||
{
|
|
||||||
"platform": "macos/arm64",
|
|
||||||
"runner": ["self-hosted", "macOS", "ARM64", "mac-runner-m1"]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"os": [
|
|
||||||
{
|
|
||||||
"distro_name": "macos",
|
|
||||||
"distro_version": "",
|
|
||||||
"compiler_name": "",
|
|
||||||
"compiler_version": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"build_type": ["Debug", "Release"],
|
|
||||||
"cmake_args": [
|
|
||||||
"-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5",
|
|
||||||
"-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
18
.github/scripts/strategy-matrix/windows.json
vendored
18
.github/scripts/strategy-matrix/windows.json
vendored
@@ -1,18 +0,0 @@
|
|||||||
{
|
|
||||||
"architecture": [
|
|
||||||
{
|
|
||||||
"platform": "windows/amd64",
|
|
||||||
"runner": ["windows-latest"]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"os": [
|
|
||||||
{
|
|
||||||
"distro_name": "windows",
|
|
||||||
"distro_version": "",
|
|
||||||
"compiler_name": "",
|
|
||||||
"compiler_version": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"build_type": ["Debug", "Release"],
|
|
||||||
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
|
|
||||||
}
|
|
||||||
163
.github/workflows/build-selected-commit.yml
vendored
163
.github/workflows/build-selected-commit.yml
vendored
@@ -1,163 +0,0 @@
|
|||||||
# This workflow builds the binary from the selected commit (not earlier than 2.5.0 release)
|
|
||||||
name: Build selected commit
|
|
||||||
|
|
||||||
# This workflow can only be triggered manually, by a project maintainer
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
commit:
|
|
||||||
description: "Commit to build from."
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
build_container:
|
|
||||||
description: "Build container image to use"
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: ghcr.io/xrplf/ci/debian-bullseye:gcc-12
|
|
||||||
strip_symbols:
|
|
||||||
description: "Strip debug symbols"
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
archive_archive:
|
|
||||||
description: "Archive rippled binary"
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
build_only:
|
|
||||||
description: "Only build, do not run unit tests"
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
build_type:
|
|
||||||
description: "Build type (Debug or Release)"
|
|
||||||
required: true
|
|
||||||
type: choice
|
|
||||||
default: Release
|
|
||||||
options:
|
|
||||||
- Debug
|
|
||||||
- Release
|
|
||||||
cmake_args:
|
|
||||||
description: "CMake args for build"
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: "-Dxrpld=ON -Dtests=ON -Dassert=OFF -Dunity=OFF"
|
|
||||||
dependencies_force_build:
|
|
||||||
description: "Force building of all dependencies."
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
env:
|
|
||||||
CONAN_REMOTE_NAME: xrplf
|
|
||||||
CONAN_REMOTE_URL: https://conan.ripplex.io
|
|
||||||
BUILD_DIR: .build
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ["self-hosted", "Linux", "X64", "heavy"]
|
|
||||||
container: ${{ inputs.build_container }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout this workflow
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
with:
|
|
||||||
sparse-checkout: |
|
|
||||||
.github
|
|
||||||
conan
|
|
||||||
- name: Move workflow files on a side
|
|
||||||
run: |
|
|
||||||
mkdir -p ${{ runner.temp }}
|
|
||||||
mv .github conan ${{ runner.temp }}
|
|
||||||
rm -rf .git
|
|
||||||
- name: Checkout the commit to build
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
with:
|
|
||||||
ref: ${{ inputs.commit }}
|
|
||||||
- name: Restore workflow files
|
|
||||||
run: |
|
|
||||||
rm -rf .github conan
|
|
||||||
mv ${{ runner.temp }}/.github .
|
|
||||||
mv ${{ runner.temp }}/conan .
|
|
||||||
- name: Prepare runner
|
|
||||||
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
|
|
||||||
with:
|
|
||||||
disable_ccache: true
|
|
||||||
- name: Check configuration
|
|
||||||
run: |
|
|
||||||
echo 'Checking path.'
|
|
||||||
echo ${PATH} | tr ':' '\n'
|
|
||||||
|
|
||||||
echo 'Checking environment variables.'
|
|
||||||
env | sort
|
|
||||||
|
|
||||||
echo 'Checking CMake version.'
|
|
||||||
cmake --version
|
|
||||||
|
|
||||||
echo 'Checking compiler version.'
|
|
||||||
${CC} --version
|
|
||||||
|
|
||||||
echo 'Checking Conan version.'
|
|
||||||
conan --version
|
|
||||||
|
|
||||||
echo 'Checking Ninja version.'
|
|
||||||
ninja --version
|
|
||||||
|
|
||||||
echo 'Checking nproc version.'
|
|
||||||
nproc --version
|
|
||||||
- name: Set up Conan configuration
|
|
||||||
run: |
|
|
||||||
echo 'Installing configuration.'
|
|
||||||
cat conan/global.conf >> $(conan config home)/global.conf
|
|
||||||
|
|
||||||
echo 'Conan configuration:'
|
|
||||||
conan config show '*'
|
|
||||||
- name: Set up Conan profile
|
|
||||||
run: |
|
|
||||||
echo 'Installing profile.'
|
|
||||||
conan config install conan/profiles/default -tf $(conan config home)/profiles/
|
|
||||||
|
|
||||||
echo 'Conan profile:'
|
|
||||||
conan profile show
|
|
||||||
- name: Set up Conan remote
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "Adding Conan remote '${{ env.CONAN_REMOTE_NAME }}' at ${{ env.CONAN_REMOTE_URL }}."
|
|
||||||
conan remote add --index 0 --force ${{ env.CONAN_REMOTE_NAME }} ${{ env.CONAN_REMOTE_URL }}
|
|
||||||
|
|
||||||
echo 'Listing Conan remotes.'
|
|
||||||
conan remote list
|
|
||||||
- name: Build dependencies
|
|
||||||
uses: ./.github/actions/build-deps
|
|
||||||
with:
|
|
||||||
build_dir: ${{ env.BUILD_DIR }}
|
|
||||||
build_type: ${{ inputs.build_type }}
|
|
||||||
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
|
|
||||||
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
|
|
||||||
force_build: ${{ inputs.dependencies_force_build }}
|
|
||||||
force_upload: false
|
|
||||||
- name: Build and test binary
|
|
||||||
uses: ./.github/actions/build-test
|
|
||||||
with:
|
|
||||||
build_dir: ${{ env.BUILD_DIR }}
|
|
||||||
build_only: ${{ inputs.build_only }}
|
|
||||||
build_type: ${{ inputs.build_type }}
|
|
||||||
cmake_args: ${{ inputs.cmake_args }}
|
|
||||||
cmake_target: "all"
|
|
||||||
os: "linux"
|
|
||||||
- name: Strip symbols
|
|
||||||
if: ${{ inputs.strip_symbols == 'true' }}
|
|
||||||
run: |
|
|
||||||
strip -D --strip-unneeded ${{ env.BUILD_DIR }}/rippled
|
|
||||||
${{ env.BUILD_DIR }}/rippled --version
|
|
||||||
- name: Move the binary
|
|
||||||
run: |
|
|
||||||
mv ${{ env.BUILD_DIR }}/rippled .
|
|
||||||
- name: Archive rippled binary
|
|
||||||
if: ${{ inputs.archive_archive == 'true' }}
|
|
||||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
|
||||||
with:
|
|
||||||
name: rippled
|
|
||||||
path: ./rippled
|
|
||||||
retention-days: 90
|
|
||||||
compression-level: 8
|
|
||||||
overwrite: true
|
|
||||||
199
.github/workflows/build-test.yml
vendored
199
.github/workflows/build-test.yml
vendored
@@ -1,199 +0,0 @@
|
|||||||
# This workflow builds and tests the binary for various configurations.
|
|
||||||
name: Build and test
|
|
||||||
|
|
||||||
# This workflow can only be triggered by other workflows. Note that the
|
|
||||||
# workflow_call event does not support the 'choice' input type, see
|
|
||||||
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
|
|
||||||
# so we use 'string' instead.
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
build_dir:
|
|
||||||
description: "The directory where to build."
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: ".build"
|
|
||||||
conan_remote_name:
|
|
||||||
description: "The name of the Conan remote to use."
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
conan_remote_url:
|
|
||||||
description: "The URL of the Conan endpoint to use."
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
dependencies_force_build:
|
|
||||||
description: "Force building of all dependencies."
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
dependencies_force_upload:
|
|
||||||
description: "Force uploading of all dependencies."
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
os:
|
|
||||||
description: 'The operating system to use for the build ("linux", "macos", "windows").'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
strategy_matrix:
|
|
||||||
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
|
|
||||||
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: "minimal"
|
|
||||||
secrets:
|
|
||||||
codecov_token:
|
|
||||||
description: "The Codecov token to use for uploading coverage reports."
|
|
||||||
required: false
|
|
||||||
conan_remote_username:
|
|
||||||
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
|
|
||||||
required: false
|
|
||||||
conan_remote_password:
|
|
||||||
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
|
|
||||||
required: false
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
# Generate the strategy matrix to be used by the following job.
|
|
||||||
generate-matrix:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
|
||||||
with:
|
|
||||||
python-version: 3.13
|
|
||||||
- name: Generate strategy matrix
|
|
||||||
working-directory: .github/scripts/strategy-matrix
|
|
||||||
id: generate
|
|
||||||
run: python generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}"
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.generate.outputs.matrix }}
|
|
||||||
|
|
||||||
# Build and test the binary.
|
|
||||||
build-test:
|
|
||||||
needs:
|
|
||||||
- generate-matrix
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
|
||||||
runs-on: ${{ matrix.architecture.runner }}
|
|
||||||
container: ${{ inputs.os == 'linux' && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
|
|
||||||
steps:
|
|
||||||
- name: Check strategy matrix
|
|
||||||
run: |
|
|
||||||
echo 'Operating system distro name: ${{ matrix.os.distro_name }}'
|
|
||||||
echo 'Operating system distro version: ${{ matrix.os.distro_version }}'
|
|
||||||
echo 'Operating system compiler name: ${{ matrix.os.compiler_name }}'
|
|
||||||
echo 'Operating system compiler version: ${{ matrix.os.compiler_version }}'
|
|
||||||
echo 'Architecture platform: ${{ matrix.architecture.platform }}'
|
|
||||||
echo 'Architecture runner: ${{ toJson(matrix.architecture.runner) }}'
|
|
||||||
echo 'Build type: ${{ matrix.build_type }}'
|
|
||||||
echo 'Build only: ${{ matrix.build_only }}'
|
|
||||||
echo 'CMake arguments: ${{ matrix.cmake_args }}'
|
|
||||||
echo 'CMake target: ${{ matrix.cmake_target }}'
|
|
||||||
echo 'Config name: ${{ matrix.config_name }}'
|
|
||||||
|
|
||||||
- name: Clean workspace (MacOS)
|
|
||||||
if: ${{ inputs.os == 'macos' }}
|
|
||||||
run: |
|
|
||||||
WORKSPACE=${{ github.workspace }}
|
|
||||||
echo "Cleaning workspace '${WORKSPACE}'."
|
|
||||||
if [ -z "${WORKSPACE}" ] || [ "${WORKSPACE}" = "/" ]; then
|
|
||||||
echo "Invalid working directory '${WORKSPACE}'."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
find "${WORKSPACE}" -depth 1 | xargs rm -rfv
|
|
||||||
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
- name: Prepare runner
|
|
||||||
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
|
|
||||||
|
|
||||||
- name: Check configuration (Windows)
|
|
||||||
if: ${{ inputs.os == 'windows' }}
|
|
||||||
run: |
|
|
||||||
echo 'Checking environment variables.'
|
|
||||||
set
|
|
||||||
|
|
||||||
echo 'Checking CMake version.'
|
|
||||||
cmake --version
|
|
||||||
|
|
||||||
echo 'Checking Conan version.'
|
|
||||||
conan --version
|
|
||||||
- name: Check configuration (Linux and MacOS)
|
|
||||||
if: ${{ inputs.os == 'linux' || inputs.os == 'macos' }}
|
|
||||||
run: |
|
|
||||||
echo 'Checking path.'
|
|
||||||
echo ${PATH} | tr ':' '\n'
|
|
||||||
|
|
||||||
echo 'Checking environment variables.'
|
|
||||||
env | sort
|
|
||||||
|
|
||||||
echo 'Checking CMake version.'
|
|
||||||
cmake --version
|
|
||||||
|
|
||||||
echo 'Checking compiler version.'
|
|
||||||
${{ inputs.os == 'linux' && '${CC}' || 'clang' }} --version
|
|
||||||
|
|
||||||
echo 'Checking Conan version.'
|
|
||||||
conan --version
|
|
||||||
|
|
||||||
echo 'Checking Ninja version.'
|
|
||||||
ninja --version
|
|
||||||
|
|
||||||
echo 'Checking nproc version.'
|
|
||||||
nproc --version
|
|
||||||
|
|
||||||
- name: Set up Conan configuration
|
|
||||||
run: |
|
|
||||||
echo 'Installing configuration.'
|
|
||||||
cat conan/global.conf ${{ inputs.os == 'linux' && '>>' || '>' }} $(conan config home)/global.conf
|
|
||||||
|
|
||||||
echo 'Conan configuration:'
|
|
||||||
conan config show '*'
|
|
||||||
- name: Set up Conan profile
|
|
||||||
run: |
|
|
||||||
echo 'Installing profile.'
|
|
||||||
conan config install conan/profiles/default -tf $(conan config home)/profiles/
|
|
||||||
|
|
||||||
echo 'Conan profile:'
|
|
||||||
conan profile show
|
|
||||||
- name: Set up Conan remote
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
|
|
||||||
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
|
|
||||||
|
|
||||||
echo 'Listing Conan remotes.'
|
|
||||||
conan remote list
|
|
||||||
|
|
||||||
- name: Build dependencies
|
|
||||||
uses: ./.github/actions/build-deps
|
|
||||||
with:
|
|
||||||
build_dir: ${{ inputs.build_dir }}
|
|
||||||
build_type: ${{ matrix.build_type }}
|
|
||||||
conan_remote_name: ${{ inputs.conan_remote_name }}
|
|
||||||
conan_remote_url: ${{ inputs.conan_remote_url }}
|
|
||||||
conan_remote_username: ${{ secrets.conan_remote_username }}
|
|
||||||
conan_remote_password: ${{ secrets.conan_remote_password }}
|
|
||||||
force_build: ${{ inputs.dependencies_force_build }}
|
|
||||||
force_upload: ${{ inputs.dependencies_force_upload }}
|
|
||||||
- name: Build and test binary
|
|
||||||
uses: ./.github/actions/build-test
|
|
||||||
with:
|
|
||||||
build_dir: ${{ inputs.build_dir }}
|
|
||||||
build_only: ${{ matrix.build_only }}
|
|
||||||
build_type: ${{ matrix.build_type }}
|
|
||||||
cmake_args: ${{ matrix.cmake_args }}
|
|
||||||
cmake_target: ${{ matrix.cmake_target }}
|
|
||||||
codecov_token: ${{ secrets.codecov_token }}
|
|
||||||
os: ${{ inputs.os }}
|
|
||||||
75
.github/workflows/check-format.yml
vendored
75
.github/workflows/check-format.yml
vendored
@@ -1,75 +0,0 @@
|
|||||||
# This workflow checks if the code is properly formatted.
|
|
||||||
name: Check format
|
|
||||||
|
|
||||||
# This workflow can only be triggered by other workflows.
|
|
||||||
on: workflow_call
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-format
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pre-commit:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container: ghcr.io/xrplf/ci/tools-rippled-pre-commit
|
|
||||||
steps:
|
|
||||||
# The $GITHUB_WORKSPACE and ${{ github.workspace }} might not point to the
|
|
||||||
# same directory for jobs running in containers. The actions/checkout step
|
|
||||||
# is *supposed* to checkout into $GITHUB_WORKSPACE and then add it to
|
|
||||||
# safe.directory (see instructions at https://github.com/actions/checkout)
|
|
||||||
# but that is apparently not happening for some container images. We
|
|
||||||
# therefore preemptively add both directories to safe.directory. See also
|
|
||||||
# https://github.com/actions/runner/issues/2058 for more details.
|
|
||||||
- name: Configure git safe.directory
|
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory $GITHUB_WORKSPACE
|
|
||||||
git config --global --add safe.directory ${{ github.workspace }}
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
- name: Check configuration
|
|
||||||
run: |
|
|
||||||
echo 'Checking path.'
|
|
||||||
echo ${PATH} | tr ':' '\n'
|
|
||||||
|
|
||||||
echo 'Checking environment variables.'
|
|
||||||
env | sort
|
|
||||||
|
|
||||||
echo 'Checking pre-commit version.'
|
|
||||||
pre-commit --version
|
|
||||||
|
|
||||||
echo 'Checking clang-format version.'
|
|
||||||
clang-format --version
|
|
||||||
|
|
||||||
echo 'Checking NPM version.'
|
|
||||||
npm --version
|
|
||||||
|
|
||||||
echo 'Checking Node.js version.'
|
|
||||||
node --version
|
|
||||||
|
|
||||||
echo 'Checking prettier version.'
|
|
||||||
prettier --version
|
|
||||||
- name: Format code
|
|
||||||
run: pre-commit run --show-diff-on-failure --color=always --all-files
|
|
||||||
- name: Check for differences
|
|
||||||
env:
|
|
||||||
MESSAGE: |
|
|
||||||
One or more files did not conform to the formatting. Maybe you did
|
|
||||||
not run 'pre-commit' before committing, or your version of
|
|
||||||
'clang-format' or 'prettier' has an incompatibility with the ones
|
|
||||||
used here (see the "Check configuration" step above).
|
|
||||||
|
|
||||||
Run 'pre-commit run --all-files' in your repo, and then commit and
|
|
||||||
push the changes.
|
|
||||||
run: |
|
|
||||||
DIFF=$(git status --porcelain)
|
|
||||||
if [ -n "${DIFF}" ]; then
|
|
||||||
# Print the files that changed to give the contributor a hint about
|
|
||||||
# what to expect when running pre-commit on their own machine.
|
|
||||||
git status
|
|
||||||
echo "${MESSAGE}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
46
.github/workflows/check-levelization.yml
vendored
46
.github/workflows/check-levelization.yml
vendored
@@ -1,46 +0,0 @@
|
|||||||
# This workflow checks if the dependencies between the modules are correctly
|
|
||||||
# indexed.
|
|
||||||
name: Check levelization
|
|
||||||
|
|
||||||
# This workflow can only be triggered by other workflows.
|
|
||||||
on: workflow_call
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-levelization
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
levelization:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
- name: Check levelization
|
|
||||||
run: .github/scripts/levelization/generate.sh
|
|
||||||
- name: Check for differences
|
|
||||||
env:
|
|
||||||
MESSAGE: |
|
|
||||||
|
|
||||||
The dependency relationships between the modules in rippled have
|
|
||||||
changed, which may be an improvement or a regression.
|
|
||||||
|
|
||||||
A rule of thumb is that if your changes caused something to be
|
|
||||||
removed from loops.txt, it's probably an improvement, while if
|
|
||||||
something was added, it's probably a regression.
|
|
||||||
|
|
||||||
Run '.github/scripts/levelization/generate.sh' in your repo, commit
|
|
||||||
and push the changes. See .github/scripts/levelization/README.md for
|
|
||||||
more info.
|
|
||||||
run: |
|
|
||||||
DIFF=$(git status --porcelain)
|
|
||||||
if [ -n "${DIFF}" ]; then
|
|
||||||
# Print the differences to give the contributor a hint about what to
|
|
||||||
# expect when running levelization on their own machine.
|
|
||||||
git diff
|
|
||||||
echo "${MESSAGE}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
62
.github/workflows/check-missing-commits.yml
vendored
62
.github/workflows/check-missing-commits.yml
vendored
@@ -1,62 +0,0 @@
|
|||||||
# This workflow checks that all commits in the "master" branch are also in the
|
|
||||||
# "release" and "develop" branches, and that all commits in the "release" branch
|
|
||||||
# are also in the "develop" branch.
|
|
||||||
name: Check for missing commits
|
|
||||||
|
|
||||||
# This workflow can only be triggered by other workflows.
|
|
||||||
on: workflow_call
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-missing-commits
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Check for missing commits
|
|
||||||
env:
|
|
||||||
MESSAGE: |
|
|
||||||
|
|
||||||
If you are reading this, then the commits indicated above are missing
|
|
||||||
from the "develop" and/or "release" branch. Do a reverse-merge as soon
|
|
||||||
as possible. See CONTRIBUTING.md for instructions.
|
|
||||||
run: |
|
|
||||||
set -o pipefail
|
|
||||||
# Branches are ordered by how "canonical" they are. Every commit in one
|
|
||||||
# branch should be in all the branches behind it.
|
|
||||||
order=(master release develop)
|
|
||||||
branches=()
|
|
||||||
for branch in "${order[@]}"; do
|
|
||||||
# Check that the branches exist so that this job will work on forked
|
|
||||||
# repos, which don't necessarily have master and release branches.
|
|
||||||
echo "Checking if ${branch} exists."
|
|
||||||
if git ls-remote --exit-code --heads origin \
|
|
||||||
refs/heads/${branch} > /dev/null; then
|
|
||||||
branches+=(origin/${branch})
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
prior=()
|
|
||||||
for branch in "${branches[@]}"; do
|
|
||||||
if [[ ${#prior[@]} -ne 0 ]]; then
|
|
||||||
echo "Checking ${prior[@]} for commits missing from ${branch}."
|
|
||||||
git log --oneline --no-merges "${prior[@]}" \
|
|
||||||
^$branch | tee -a "missing-commits.txt"
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
prior+=("${branch}")
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then
|
|
||||||
echo "${MESSAGE}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
64
.github/workflows/clang-format.yml
vendored
Normal file
64
.github/workflows/clang-format.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
name: clang-format
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, reopened, synchronize, ready_for_review]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
container: ghcr.io/xrplf/ci/tools-rippled-clang-format
|
||||||
|
steps:
|
||||||
|
# For jobs running in containers, $GITHUB_WORKSPACE and ${{ github.workspace }} might not be the
|
||||||
|
# same directory. The actions/checkout step is *supposed* to checkout into $GITHUB_WORKSPACE and
|
||||||
|
# then add it to safe.directory (see instructions at https://github.com/actions/checkout)
|
||||||
|
# but that's apparently not happening for some container images. We can't be sure what is actually
|
||||||
|
# happening, so let's pre-emptively add both directories to safe.directory. There's a
|
||||||
|
# Github issue opened in 2022 and not resolved in 2025 https://github.com/actions/runner/issues/2058 ¯\_(ツ)_/¯
|
||||||
|
- run: |
|
||||||
|
git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||||
|
git config --global --add safe.directory ${{ github.workspace }}
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Format first-party sources
|
||||||
|
run: |
|
||||||
|
clang-format --version
|
||||||
|
find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format -i {} +
|
||||||
|
- name: Check for differences
|
||||||
|
id: assert
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
git diff --exit-code | tee "clang-format.patch"
|
||||||
|
- name: Upload patch
|
||||||
|
if: failure() && steps.assert.outcome == 'failure'
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
continue-on-error: true
|
||||||
|
with:
|
||||||
|
name: clang-format.patch
|
||||||
|
if-no-files-found: ignore
|
||||||
|
path: clang-format.patch
|
||||||
|
- name: What happened?
|
||||||
|
if: failure() && steps.assert.outcome == 'failure'
|
||||||
|
env:
|
||||||
|
PREAMBLE: |
|
||||||
|
If you are reading this, you are looking at a failed Github Actions
|
||||||
|
job. That means you pushed one or more files that did not conform
|
||||||
|
to the formatting specified in .clang-format. That may be because
|
||||||
|
you neglected to run 'git clang-format' or 'clang-format' before
|
||||||
|
committing, or that your version of clang-format has an
|
||||||
|
incompatibility with the one on this
|
||||||
|
machine, which is:
|
||||||
|
SUGGESTION: |
|
||||||
|
|
||||||
|
To fix it, you can do one of two things:
|
||||||
|
1. Download and apply the patch generated as an artifact of this
|
||||||
|
job to your repo, commit, and push.
|
||||||
|
2. Run 'git-clang-format --extensions cpp,h,hpp,ipp develop'
|
||||||
|
in your repo, commit, and push.
|
||||||
|
run: |
|
||||||
|
echo "${PREAMBLE}"
|
||||||
|
clang-format --version
|
||||||
|
echo "${SUGGESTION}"
|
||||||
|
exit 1
|
||||||
37
.github/workflows/doxygen.yml
vendored
Normal file
37
.github/workflows/doxygen.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
name: Build and publish Doxygen documentation
|
||||||
|
# To test this workflow, push your changes to your fork's `develop` branch.
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
- doxygen
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
documentation:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
|
||||||
|
steps:
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
cmake --version
|
||||||
|
doxygen --version
|
||||||
|
env | sort
|
||||||
|
- name: build
|
||||||
|
run: |
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -Donly_docs=TRUE ..
|
||||||
|
cmake --build . --target docs --parallel $(nproc)
|
||||||
|
- name: publish
|
||||||
|
uses: peaceiris/actions-gh-pages@v3
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
publish_dir: build/docs/html
|
||||||
53
.github/workflows/levelization.yml
vendored
Normal file
53
.github/workflows/levelization.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
name: levelization
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, reopened, synchronize, ready_for_review]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
CLANG_VERSION: 10
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Check levelization
|
||||||
|
run: Builds/levelization/levelization.sh
|
||||||
|
- name: Check for differences
|
||||||
|
id: assert
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
git diff --exit-code | tee "levelization.patch"
|
||||||
|
- name: Upload patch
|
||||||
|
if: failure() && steps.assert.outcome == 'failure'
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
continue-on-error: true
|
||||||
|
with:
|
||||||
|
name: levelization.patch
|
||||||
|
if-no-files-found: ignore
|
||||||
|
path: levelization.patch
|
||||||
|
- name: What happened?
|
||||||
|
if: failure() && steps.assert.outcome == 'failure'
|
||||||
|
env:
|
||||||
|
MESSAGE: |
|
||||||
|
If you are reading this, you are looking at a failed Github
|
||||||
|
Actions job. That means you changed the dependency relationships
|
||||||
|
between the modules in rippled. That may be an improvement or a
|
||||||
|
regression. This check doesn't judge.
|
||||||
|
|
||||||
|
A rule of thumb, though, is that if your changes caused
|
||||||
|
something to be removed from loops.txt, that's probably an
|
||||||
|
improvement. If something was added, it's probably a regression.
|
||||||
|
|
||||||
|
To fix it, you can do one of two things:
|
||||||
|
1. Download and apply the patch generated as an artifact of this
|
||||||
|
job to your repo, commit, and push.
|
||||||
|
2. Run './Builds/levelization/levelization.sh' in your repo,
|
||||||
|
commit, and push.
|
||||||
|
|
||||||
|
See Builds/levelization/README.md for more info.
|
||||||
|
run: |
|
||||||
|
echo "${MESSAGE}"
|
||||||
|
exit 1
|
||||||
91
.github/workflows/libxrpl.yml
vendored
Normal file
91
.github/workflows/libxrpl.yml
vendored
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
name: Check libXRPL compatibility with Clio
|
||||||
|
env:
|
||||||
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
|
CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "src/libxrpl/protocol/BuildInfo.cpp"
|
||||||
|
- ".github/workflows/libxrpl.yml"
|
||||||
|
types: [opened, reopened, synchronize, ready_for_review]
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
|
name: Publish libXRPL
|
||||||
|
outputs:
|
||||||
|
outcome: ${{ steps.upload.outputs.outcome }}
|
||||||
|
version: ${{ steps.version.outputs.version }}
|
||||||
|
channel: ${{ steps.channel.outputs.channel }}
|
||||||
|
runs-on: [self-hosted, heavy]
|
||||||
|
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
|
||||||
|
steps:
|
||||||
|
- name: Wait for essential checks to succeed
|
||||||
|
uses: lewagon/wait-on-check-action@v1.3.4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
running-workflow-name: wait-for-check-regexp
|
||||||
|
check-regexp: "(dependencies|test).*linux.*" # Ignore windows and mac tests but make sure linux passes
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
wait-interval: 10
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Generate channel
|
||||||
|
id: channel
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee ${GITHUB_OUTPUT}
|
||||||
|
- name: Export new package
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
conan export . ${{ steps.channel.outputs.channel }}
|
||||||
|
- name: Add Conan remote
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
|
||||||
|
conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force
|
||||||
|
echo "Listing Conan remotes."
|
||||||
|
conan remote list
|
||||||
|
- name: Parse new version
|
||||||
|
id: version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
|
||||||
|
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
|
||||||
|
- name: Try to authenticate to Conan remote
|
||||||
|
id: remote
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# `conan user` implicitly uses the environment variables CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
|
||||||
|
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
|
||||||
|
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
|
||||||
|
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
|
||||||
|
echo outcome=$(conan user --remote xrplf --password >&2 \
|
||||||
|
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
|
||||||
|
- name: Upload new package
|
||||||
|
id: upload
|
||||||
|
if: (steps.remote.outputs.outcome == 'success')
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "conan upload version ${{ steps.version.outputs.version }} on channel ${{ steps.channel.outputs.channel }}"
|
||||||
|
echo outcome=$(conan upload xrpl/${{ steps.version.outputs.version }}@${{ steps.channel.outputs.channel }} --remote ripple --confirm >&2 \
|
||||||
|
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
|
||||||
|
notify_clio:
|
||||||
|
name: Notify Clio
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: publish
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
||||||
|
steps:
|
||||||
|
- name: Notify Clio about new version
|
||||||
|
if: (needs.publish.outputs.outcome == 'success')
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
||||||
|
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
||||||
|
-F "client_payload[version]=${{ needs.publish.outputs.version }}@${{ needs.publish.outputs.channel }}" \
|
||||||
|
-F "client_payload[pr]=${{ github.event.pull_request.number }}"
|
||||||
112
.github/workflows/macos.yml
vendored
Normal file
112
.github/workflows/macos.yml
vendored
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
name: macos
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, reopened, synchronize, ready_for_review]
|
||||||
|
push:
|
||||||
|
# If the branches list is ever changed, be sure to change it on all
|
||||||
|
# build/test jobs (nix, macos, windows, instrumentation)
|
||||||
|
branches:
|
||||||
|
# Always build the package branches
|
||||||
|
- develop
|
||||||
|
- release
|
||||||
|
- master
|
||||||
|
# Branches that opt-in to running
|
||||||
|
- "ci/**"
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
# This part of Conan configuration is specific to this workflow only; we do not want
|
||||||
|
# to pollute conan/profiles directory with settings which might not work for others
|
||||||
|
env:
|
||||||
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
|
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
# This part of the Conan configuration is specific to this workflow only; we
|
||||||
|
# do not want to pollute the 'conan/profiles' directory with settings that
|
||||||
|
# might not work for other workflows.
|
||||||
|
CONAN_GLOBAL_CONF: |
|
||||||
|
core.download:parallel={{os.cpu_count()}}
|
||||||
|
core.upload:parallel={{os.cpu_count()}}
|
||||||
|
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
|
||||||
|
tools.build:verbosity=verbose
|
||||||
|
tools.compilation:verbosity=verbose
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- macos
|
||||||
|
generator:
|
||||||
|
- Ninja
|
||||||
|
configuration:
|
||||||
|
- Release
|
||||||
|
runs-on: [self-hosted, macOS, mac-runner-m1]
|
||||||
|
env:
|
||||||
|
# The `build` action requires these variables.
|
||||||
|
build_dir: .build
|
||||||
|
NUM_PROCESSORS: 12
|
||||||
|
steps:
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
- name: install Conan
|
||||||
|
run: |
|
||||||
|
brew install conan
|
||||||
|
- name: install Ninja
|
||||||
|
if: matrix.generator == 'Ninja'
|
||||||
|
run: brew install ninja
|
||||||
|
- name: install python
|
||||||
|
run: |
|
||||||
|
if which python > /dev/null 2>&1; then
|
||||||
|
echo "Python executable exists"
|
||||||
|
else
|
||||||
|
brew install python@3.13
|
||||||
|
ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python
|
||||||
|
fi
|
||||||
|
- name: install cmake
|
||||||
|
run: |
|
||||||
|
if which cmake > /dev/null 2>&1; then
|
||||||
|
echo "cmake executable exists"
|
||||||
|
else
|
||||||
|
brew install cmake
|
||||||
|
fi
|
||||||
|
- name: install nproc
|
||||||
|
run: |
|
||||||
|
brew install coreutils
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
env | sort
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
python --version
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
nproc --version
|
||||||
|
echo -n "nproc returns: "
|
||||||
|
nproc
|
||||||
|
system_profiler SPHardwareDataType
|
||||||
|
sysctl -n hw.logicalcpu
|
||||||
|
clang --version
|
||||||
|
- name: configure Conan
|
||||||
|
run: |
|
||||||
|
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
|
||||||
|
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||||
|
conan profile show
|
||||||
|
- name: build dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
- name: build
|
||||||
|
uses: ./.github/actions/build
|
||||||
|
with:
|
||||||
|
generator: ${{ matrix.generator }}
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
|
||||||
|
- name: test
|
||||||
|
run: |
|
||||||
|
n=$(nproc)
|
||||||
|
echo "Using $n test jobs"
|
||||||
|
|
||||||
|
cd ${build_dir}
|
||||||
|
./rippled --unittest --unittest-jobs $n
|
||||||
|
ctest -j $n --output-on-failure
|
||||||
60
.github/workflows/missing-commits.yml
vendored
Normal file
60
.github/workflows/missing-commits.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
name: missing-commits
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
# Only check that the branches are up to date when updating the
|
||||||
|
# relevant branches.
|
||||||
|
- develop
|
||||||
|
- release
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
up_to_date:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Check for missing commits
|
||||||
|
id: commits
|
||||||
|
env:
|
||||||
|
SUGGESTION: |
|
||||||
|
|
||||||
|
If you are reading this, then the commits indicated above are
|
||||||
|
missing from "develop" and/or "release". Do a reverse-merge
|
||||||
|
as soon as possible. See CONTRIBUTING.md for instructions.
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
# Branches ordered by how "canonical" they are. Every commit in
|
||||||
|
# one branch should be in all the branches behind it
|
||||||
|
order=( master release develop )
|
||||||
|
branches=()
|
||||||
|
for branch in "${order[@]}"
|
||||||
|
do
|
||||||
|
# Check that the branches exist so that this job will work on
|
||||||
|
# forked repos, which don't necessarily have master and
|
||||||
|
# release branches.
|
||||||
|
if git ls-remote --exit-code --heads origin \
|
||||||
|
refs/heads/${branch} > /dev/null
|
||||||
|
then
|
||||||
|
branches+=( origin/${branch} )
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
prior=()
|
||||||
|
for branch in "${branches[@]}"
|
||||||
|
do
|
||||||
|
if [[ ${#prior[@]} -ne 0 ]]
|
||||||
|
then
|
||||||
|
echo "Checking ${prior[@]} for commits missing from ${branch}"
|
||||||
|
git log --oneline --no-merges "${prior[@]}" \
|
||||||
|
^$branch | tee -a "missing-commits.txt"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
prior+=( "${branch}" )
|
||||||
|
done
|
||||||
|
if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]]
|
||||||
|
then
|
||||||
|
echo "${SUGGESTION}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
422
.github/workflows/nix.yml
vendored
Normal file
422
.github/workflows/nix.yml
vendored
Normal file
@@ -0,0 +1,422 @@
|
|||||||
|
name: nix
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, reopened, synchronize, ready_for_review]
|
||||||
|
push:
|
||||||
|
# If the branches list is ever changed, be sure to change it on all
|
||||||
|
# build/test jobs (nix, macos, windows)
|
||||||
|
branches:
|
||||||
|
# Always build the package branches
|
||||||
|
- develop
|
||||||
|
- release
|
||||||
|
- master
|
||||||
|
# Branches that opt-in to running
|
||||||
|
- "ci/**"
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
|
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
# This part of the Conan configuration is specific to this workflow only; we
|
||||||
|
# do not want to pollute the 'conan/profiles' directory with settings that
|
||||||
|
# might not work for other workflows.
|
||||||
|
CONAN_GLOBAL_CONF: |
|
||||||
|
core.download:parallel={{ os.cpu_count() }}
|
||||||
|
core.upload:parallel={{ os.cpu_count() }}
|
||||||
|
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
|
||||||
|
tools.build:verbosity=verbose
|
||||||
|
tools.compilation:verbosity=verbose
|
||||||
|
|
||||||
|
# This workflow has multiple job matrixes.
|
||||||
|
# They can be considered phases because most of the matrices ("test",
|
||||||
|
# "coverage", "conan", ) depend on the first ("dependencies").
|
||||||
|
#
|
||||||
|
# The first phase has a job in the matrix for each combination of
|
||||||
|
# variables that affects dependency ABI:
|
||||||
|
# platform, compiler, and configuration.
|
||||||
|
# It creates a GitHub artifact holding the Conan profile,
|
||||||
|
# and builds and caches binaries for all the dependencies.
|
||||||
|
# If an Artifactory remote is configured, they are cached there.
|
||||||
|
# If not, they are added to the GitHub artifact.
|
||||||
|
# GitHub's "cache" action has a size limit (10 GB) that is too small
|
||||||
|
# to hold the binaries if they are built locally.
|
||||||
|
# We must use the "{upload,download}-artifact" actions instead.
|
||||||
|
#
|
||||||
|
# The remaining phases have a job in the matrix for each test
|
||||||
|
# configuration. They install dependency binaries from the cache,
|
||||||
|
# whichever was used, and build and test rippled.
|
||||||
|
#
|
||||||
|
# "instrumentation" is independent, but is included here because it also
|
||||||
|
# builds on linux in the same "on:" conditions.
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dependencies:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- linux
|
||||||
|
compiler:
|
||||||
|
- gcc
|
||||||
|
- clang
|
||||||
|
configuration:
|
||||||
|
- Debug
|
||||||
|
- Release
|
||||||
|
include:
|
||||||
|
- compiler: gcc
|
||||||
|
compiler_version: 12
|
||||||
|
distro: ubuntu
|
||||||
|
codename: jammy
|
||||||
|
- compiler: clang
|
||||||
|
compiler_version: 16
|
||||||
|
distro: debian
|
||||||
|
codename: bookworm
|
||||||
|
runs-on: [self-hosted, heavy]
|
||||||
|
container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }}
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
|
steps:
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
lsb_release -a || true
|
||||||
|
${{ matrix.compiler }}-${{ matrix.compiler_version }} --version
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
env | sort
|
||||||
|
- name: configure Conan
|
||||||
|
run: |
|
||||||
|
echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf
|
||||||
|
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||||
|
conan profile show
|
||||||
|
- name: archive profile
|
||||||
|
# Create this archive before dependencies are added to the local cache.
|
||||||
|
run: tar -czf conan.tar.gz -C ${CONAN_HOME} .
|
||||||
|
- name: build dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
- name: upload archive
|
||||||
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
|
path: conan.tar.gz
|
||||||
|
if-no-files-found: error
|
||||||
|
|
||||||
|
test:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- linux
|
||||||
|
compiler:
|
||||||
|
- gcc
|
||||||
|
- clang
|
||||||
|
configuration:
|
||||||
|
- Debug
|
||||||
|
- Release
|
||||||
|
include:
|
||||||
|
- compiler: gcc
|
||||||
|
compiler_version: 12
|
||||||
|
distro: ubuntu
|
||||||
|
codename: jammy
|
||||||
|
- compiler: clang
|
||||||
|
compiler_version: 16
|
||||||
|
distro: debian
|
||||||
|
codename: bookworm
|
||||||
|
cmake-args:
|
||||||
|
-
|
||||||
|
- "-Dunity=ON"
|
||||||
|
needs: dependencies
|
||||||
|
runs-on: [self-hosted, heavy]
|
||||||
|
container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }}
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
|
steps:
|
||||||
|
- name: download cache
|
||||||
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
|
- name: extract cache
|
||||||
|
run: |
|
||||||
|
mkdir -p ${CONAN_HOME}
|
||||||
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
env | sort
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
- name: dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
- name: build
|
||||||
|
uses: ./.github/actions/build
|
||||||
|
with:
|
||||||
|
generator: Ninja
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
|
||||||
|
- name: check linking
|
||||||
|
run: |
|
||||||
|
cd ${build_dir}
|
||||||
|
ldd ./rippled
|
||||||
|
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
|
||||||
|
echo 'The binary is statically linked.'
|
||||||
|
else
|
||||||
|
echo 'The binary is dynamically linked.'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
- name: test
|
||||||
|
run: |
|
||||||
|
cd ${build_dir}
|
||||||
|
./rippled --unittest --unittest-jobs $(nproc)
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
|
|
||||||
|
reference-fee-test:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- linux
|
||||||
|
compiler:
|
||||||
|
- gcc
|
||||||
|
configuration:
|
||||||
|
- Debug
|
||||||
|
cmake-args:
|
||||||
|
- "-DUNIT_TEST_REFERENCE_FEE=200"
|
||||||
|
- "-DUNIT_TEST_REFERENCE_FEE=1000"
|
||||||
|
needs: dependencies
|
||||||
|
runs-on: [self-hosted, heavy]
|
||||||
|
container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
|
steps:
|
||||||
|
- name: download cache
|
||||||
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
|
- name: extract cache
|
||||||
|
run: |
|
||||||
|
mkdir -p ${CONAN_HOME}
|
||||||
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
env | sort
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
- name: dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
- name: build
|
||||||
|
uses: ./.github/actions/build
|
||||||
|
with:
|
||||||
|
generator: Ninja
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
|
||||||
|
- name: test
|
||||||
|
run: |
|
||||||
|
cd ${build_dir}
|
||||||
|
./rippled --unittest --unittest-jobs $(nproc)
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- linux
|
||||||
|
compiler:
|
||||||
|
- gcc
|
||||||
|
configuration:
|
||||||
|
- Debug
|
||||||
|
needs: dependencies
|
||||||
|
runs-on: [self-hosted, heavy]
|
||||||
|
container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
|
steps:
|
||||||
|
- name: download cache
|
||||||
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||||
|
- name: extract cache
|
||||||
|
run: |
|
||||||
|
mkdir -p ${CONAN_HOME}
|
||||||
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
gcovr --version
|
||||||
|
env | sort
|
||||||
|
ls ${CONAN_HOME}
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
- name: dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
- name: build
|
||||||
|
uses: ./.github/actions/build
|
||||||
|
with:
|
||||||
|
generator: Ninja
|
||||||
|
configuration: ${{ matrix.configuration }}
|
||||||
|
cmake-args: >-
|
||||||
|
-Dassert=TRUE
|
||||||
|
-Dwerr=TRUE
|
||||||
|
-Dcoverage=ON
|
||||||
|
-Dcoverage_format=xml
|
||||||
|
-DCODE_COVERAGE_VERBOSE=ON
|
||||||
|
-DCMAKE_CXX_FLAGS="-O0"
|
||||||
|
-DCMAKE_C_FLAGS="-O0"
|
||||||
|
cmake-target: coverage
|
||||||
|
- name: move coverage report
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mv "${build_dir}/coverage.xml" ./
|
||||||
|
- name: archive coverage report
|
||||||
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||||
|
with:
|
||||||
|
name: coverage.xml
|
||||||
|
path: coverage.xml
|
||||||
|
retention-days: 30
|
||||||
|
- name: upload coverage report
|
||||||
|
uses: wandalen/wretry.action@v1.4.10
|
||||||
|
with:
|
||||||
|
action: codecov/codecov-action@v4.5.0
|
||||||
|
with: |
|
||||||
|
files: coverage.xml
|
||||||
|
fail_ci_if_error: true
|
||||||
|
disable_search: true
|
||||||
|
verbose: true
|
||||||
|
plugin: noop
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
attempt_limit: 5
|
||||||
|
attempt_delay: 210000 # in milliseconds
|
||||||
|
|
||||||
|
conan:
|
||||||
|
needs: dependencies
|
||||||
|
runs-on: [self-hosted, heavy]
|
||||||
|
container:
|
||||||
|
image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
|
platform: linux
|
||||||
|
compiler: gcc
|
||||||
|
compiler_version: 12
|
||||||
|
configuration: Release
|
||||||
|
steps:
|
||||||
|
- name: download cache
|
||||||
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
|
with:
|
||||||
|
name: ${{ env.platform }}-${{ env.compiler }}-${{ env.configuration }}
|
||||||
|
- name: extract cache
|
||||||
|
run: |
|
||||||
|
mkdir -p ${CONAN_HOME}
|
||||||
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
env | sort
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
- name: dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: ${{ env.configuration }}
|
||||||
|
- name: export
|
||||||
|
run: |
|
||||||
|
conan export . --version head
|
||||||
|
- name: build
|
||||||
|
run: |
|
||||||
|
cd tests/conan
|
||||||
|
mkdir ${build_dir} && cd ${build_dir}
|
||||||
|
conan install .. \
|
||||||
|
--settings:all build_type=${configuration} \
|
||||||
|
--output-folder . \
|
||||||
|
--build missing
|
||||||
|
cmake .. \
|
||||||
|
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=./build/${configuration}/generators/conan_toolchain.cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=${configuration}
|
||||||
|
cmake --build .
|
||||||
|
./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
|
||||||
|
|
||||||
|
instrumentation-build:
|
||||||
|
needs: dependencies
|
||||||
|
runs-on: [self-hosted, heavy]
|
||||||
|
container: ghcr.io/xrplf/ci/debian-bookworm:clang-16
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
|
steps:
|
||||||
|
- name: download cache
|
||||||
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
|
||||||
|
with:
|
||||||
|
name: linux-clang-Debug
|
||||||
|
|
||||||
|
- name: extract cache
|
||||||
|
run: |
|
||||||
|
mkdir -p ${CONAN_HOME}
|
||||||
|
tar -xzf conan.tar.gz -C ${CONAN_HOME}
|
||||||
|
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
echo ${PATH} | tr ':' '\n'
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
env | sort
|
||||||
|
ls ${CONAN_HOME}
|
||||||
|
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
|
||||||
|
- name: dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: Debug
|
||||||
|
|
||||||
|
- name: prepare environment
|
||||||
|
run: |
|
||||||
|
mkdir -p ${build_dir}
|
||||||
|
echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV
|
||||||
|
echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: build with instrumentation
|
||||||
|
run: |
|
||||||
|
cd ${BUILD_DIR}
|
||||||
|
cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \
|
||||||
|
-Dvoidstar=ON \
|
||||||
|
-Dtests=ON \
|
||||||
|
-Dxrpld=ON \
|
||||||
|
-DCMAKE_BUILD_TYPE=Debug \
|
||||||
|
-DSECP256K1_BUILD_BENCHMARK=OFF \
|
||||||
|
-DSECP256K1_BUILD_TESTS=OFF \
|
||||||
|
-DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \
|
||||||
|
-DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake
|
||||||
|
cmake --build . --parallel $(nproc)
|
||||||
|
|
||||||
|
- name: verify instrumentation enabled
|
||||||
|
run: |
|
||||||
|
cd ${BUILD_DIR}
|
||||||
|
./rippled --version | grep libvoidstar
|
||||||
|
|
||||||
|
- name: run unit tests
|
||||||
|
run: |
|
||||||
|
cd ${BUILD_DIR}
|
||||||
|
./rippled -u --unittest-jobs $(( $(nproc)/4 ))
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
80
.github/workflows/notify-clio.yml
vendored
80
.github/workflows/notify-clio.yml
vendored
@@ -1,80 +0,0 @@
|
|||||||
# This workflow exports the built libxrpl package to the Conan remote on a
|
|
||||||
# a channel named after the pull request, and notifies the Clio repository about
|
|
||||||
# the new version so it can check for compatibility.
|
|
||||||
name: Notify Clio
|
|
||||||
|
|
||||||
# This workflow can only be triggered by other workflows.
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
conan_remote_name:
|
|
||||||
description: "The name of the Conan remote to use."
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
conan_remote_url:
|
|
||||||
description: "The URL of the Conan endpoint to use."
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
secrets:
|
|
||||||
clio_notify_token:
|
|
||||||
description: "The GitHub token to notify Clio about new versions."
|
|
||||||
required: true
|
|
||||||
conan_remote_username:
|
|
||||||
description: "The username for logging into the Conan remote."
|
|
||||||
required: true
|
|
||||||
conan_remote_password:
|
|
||||||
description: "The password for logging into the Conan remote."
|
|
||||||
required: true
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-clio
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
upload:
|
|
||||||
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
- name: Generate outputs
|
|
||||||
id: generate
|
|
||||||
run: |
|
|
||||||
echo 'Generating user and channel.'
|
|
||||||
echo "user=clio" >> "${GITHUB_OUTPUT}"
|
|
||||||
echo "channel=pr_${{ github.event.pull_request.number }}" >> "${GITHUB_OUTPUT}"
|
|
||||||
echo 'Extracting version.'
|
|
||||||
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
|
|
||||||
- name: Add Conan remote
|
|
||||||
run: |
|
|
||||||
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
|
|
||||||
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
|
|
||||||
echo 'Listing Conan remotes.'
|
|
||||||
conan remote list
|
|
||||||
- name: Log into Conan remote
|
|
||||||
run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
|
|
||||||
- name: Upload package
|
|
||||||
run: |
|
|
||||||
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
|
|
||||||
conan upload --confirm --check --remote=${{ inputs.conan_remote_name }} xrpl/${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}
|
|
||||||
outputs:
|
|
||||||
channel: ${{ steps.generate.outputs.channel }}
|
|
||||||
version: ${{ steps.generate.outputs.version }}
|
|
||||||
|
|
||||||
notify:
|
|
||||||
needs: upload
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.clio_notify_token }}
|
|
||||||
steps:
|
|
||||||
- name: Notify Clio
|
|
||||||
run: |
|
|
||||||
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
|
||||||
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
|
||||||
-F "client_payload[version]=${{ needs.upload.outputs.version }}@${{ needs.upload.outputs.user }}/${{ needs.upload.outputs.channel }}" \
|
|
||||||
-F "client_payload[pr]=${{ github.event.pull_request.number }}"
|
|
||||||
154
.github/workflows/on-pr.yml
vendored
154
.github/workflows/on-pr.yml
vendored
@@ -1,154 +0,0 @@
|
|||||||
# This workflow runs all workflows to check, build and test the project on
|
|
||||||
# various Linux flavors, as well as on MacOS and Windows, on every push to a
|
|
||||||
# user branch. However, it will not run if the pull request is a draft unless it
|
|
||||||
# has the 'DraftRunCI' label.
|
|
||||||
name: PR
|
|
||||||
|
|
||||||
on:
|
|
||||||
merge_group:
|
|
||||||
types:
|
|
||||||
- checks_requested
|
|
||||||
pull_request:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- reopened
|
|
||||||
- synchronize
|
|
||||||
- ready_for_review
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
env:
|
|
||||||
CONAN_REMOTE_NAME: xrplf
|
|
||||||
CONAN_REMOTE_URL: https://conan.ripplex.io
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
# This job determines whether the rest of the workflow should run. It runs
|
|
||||||
# when the PR is not a draft (which should also cover merge-group) or
|
|
||||||
# has the 'DraftRunCI' label.
|
|
||||||
should-run:
|
|
||||||
if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
- name: Determine changed files
|
|
||||||
# This step checks whether any files have changed that should
|
|
||||||
# cause the next jobs to run. We do it this way rather than
|
|
||||||
# using `paths` in the `on:` section, because all required
|
|
||||||
# checks must pass, even for changes that do not modify anything
|
|
||||||
# that affects those checks. We would therefore like to make the
|
|
||||||
# checks required only if the job runs, but GitHub does not
|
|
||||||
# support that directly. By always executing the workflow on new
|
|
||||||
# commits and by using the changed-files action below, we ensure
|
|
||||||
# that Github considers any skipped jobs to have passed, and in
|
|
||||||
# turn the required checks as well.
|
|
||||||
id: changes
|
|
||||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
# These paths are unique to `on-pr.yml`.
|
|
||||||
.github/scripts/levelization/**
|
|
||||||
.github/workflows/check-format.yml
|
|
||||||
.github/workflows/check-levelization.yml
|
|
||||||
.github/workflows/notify-clio.yml
|
|
||||||
.github/workflows/on-pr.yml
|
|
||||||
.clang-format
|
|
||||||
.pre-commit-config.yaml
|
|
||||||
|
|
||||||
# Keep the paths below in sync with those in `on-trigger.yml`.
|
|
||||||
.github/actions/build-deps/**
|
|
||||||
.github/actions/build-test/**
|
|
||||||
.github/scripts/strategy-matrix/**
|
|
||||||
.github/workflows/build-test.yml
|
|
||||||
.codecov.yml
|
|
||||||
cmake/**
|
|
||||||
conan/**
|
|
||||||
external/**
|
|
||||||
include/**
|
|
||||||
src/**
|
|
||||||
tests/**
|
|
||||||
CMakeLists.txt
|
|
||||||
conanfile.py
|
|
||||||
- name: Check whether to run
|
|
||||||
# This step determines whether the rest of the workflow should
|
|
||||||
# run. The rest of the workflow will run if this job runs AND at
|
|
||||||
# least one of:
|
|
||||||
# * Any of the files checked in the `changes` step were modified
|
|
||||||
# * The PR is NOT a draft and is labeled "Ready to merge"
|
|
||||||
# * The workflow is running from the merge queue
|
|
||||||
id: go
|
|
||||||
env:
|
|
||||||
FILES: ${{ steps.changes.outputs.any_changed }}
|
|
||||||
DRAFT: ${{ github.event.pull_request.draft }}
|
|
||||||
READY: ${{ contains(github.event.pull_request.labels.*.name, 'Ready to merge') }}
|
|
||||||
MERGE: ${{ github.event_name == 'merge_group' }}
|
|
||||||
run: |
|
|
||||||
echo "go=${{ (env.DRAFT != 'true' && env.READY == 'true') || env.FILES == 'true' || env.MERGE == 'true' }}" >> "${GITHUB_OUTPUT}"
|
|
||||||
cat "${GITHUB_OUTPUT}"
|
|
||||||
outputs:
|
|
||||||
go: ${{ steps.go.outputs.go == 'true' }}
|
|
||||||
|
|
||||||
check-format:
|
|
||||||
needs: should-run
|
|
||||||
if: needs.should-run.outputs.go == 'true'
|
|
||||||
uses: ./.github/workflows/check-format.yml
|
|
||||||
|
|
||||||
check-levelization:
|
|
||||||
needs: should-run
|
|
||||||
if: needs.should-run.outputs.go == 'true'
|
|
||||||
uses: ./.github/workflows/check-levelization.yml
|
|
||||||
|
|
||||||
# This job works around the limitation that GitHub Actions does not support
|
|
||||||
# using environment variables as inputs for reusable workflows.
|
|
||||||
generate-outputs:
|
|
||||||
needs: should-run
|
|
||||||
if: needs.should-run.outputs.go == 'true'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: No-op
|
|
||||||
run: true
|
|
||||||
outputs:
|
|
||||||
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
|
|
||||||
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
|
|
||||||
|
|
||||||
build-test:
|
|
||||||
needs: generate-outputs
|
|
||||||
uses: ./.github/workflows/build-test.yml
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [linux, macos, windows]
|
|
||||||
with:
|
|
||||||
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
|
|
||||||
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
|
|
||||||
os: ${{ matrix.os }}
|
|
||||||
secrets:
|
|
||||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
|
||||||
|
|
||||||
notify-clio:
|
|
||||||
needs:
|
|
||||||
- generate-outputs
|
|
||||||
- build-test
|
|
||||||
uses: ./.github/workflows/notify-clio.yml
|
|
||||||
with:
|
|
||||||
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
|
|
||||||
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
|
|
||||||
secrets:
|
|
||||||
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
|
||||||
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
|
||||||
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
|
||||||
|
|
||||||
passed:
|
|
||||||
needs:
|
|
||||||
- build-test
|
|
||||||
- check-format
|
|
||||||
- check-levelization
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: No-op
|
|
||||||
run: true
|
|
||||||
118
.github/workflows/on-trigger.yml
vendored
118
.github/workflows/on-trigger.yml
vendored
@@ -1,118 +0,0 @@
|
|||||||
# This workflow runs all workflows to build the dependencies required for the
|
|
||||||
# project on various Linux flavors, as well as on MacOS and Windows, on a
|
|
||||||
# scheduled basis, on merge into the 'develop', 'release', or 'master' branches,
|
|
||||||
# or manually. The missing commits check is only run when the code is merged
|
|
||||||
# into the 'develop' or 'release' branches, and the documentation is built when
|
|
||||||
# the code is merged into the 'develop' branch.
|
|
||||||
name: Trigger
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- develop
|
|
||||||
- release
|
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
# These paths are unique to `on-trigger.yml`.
|
|
||||||
- ".github/workflows/check-missing-commits.yml"
|
|
||||||
- ".github/workflows/on-trigger.yml"
|
|
||||||
- ".github/workflows/publish-docs.yml"
|
|
||||||
|
|
||||||
# Keep the paths below in sync with those in `on-pr.yml`.
|
|
||||||
- ".github/actions/build-deps/**"
|
|
||||||
- ".github/actions/build-test/**"
|
|
||||||
- ".github/scripts/strategy-matrix/**"
|
|
||||||
- ".github/workflows/build-test.yml"
|
|
||||||
- ".codecov.yml"
|
|
||||||
- "cmake/**"
|
|
||||||
- "conan/**"
|
|
||||||
- "external/**"
|
|
||||||
- "include/**"
|
|
||||||
- "src/**"
|
|
||||||
- "tests/**"
|
|
||||||
- "CMakeLists.txt"
|
|
||||||
- "conanfile.py"
|
|
||||||
|
|
||||||
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
|
|
||||||
# will force all dependencies to be rebuilt, which is useful to verify that
|
|
||||||
# all dependencies can be built successfully. Only the dependencies that
|
|
||||||
# are actually missing from the remote will be uploaded.
|
|
||||||
schedule:
|
|
||||||
- cron: "32 6 * * 1-5"
|
|
||||||
|
|
||||||
# Run when manually triggered via the GitHub UI or API. If `force_upload` is
|
|
||||||
# true, then the dependencies that were missing (`force_rebuild` is false) or
|
|
||||||
# rebuilt (`force_rebuild` is true) will be uploaded, overwriting existing
|
|
||||||
# dependencies if needed.
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dependencies_force_build:
|
|
||||||
description: "Force building of all dependencies."
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
dependencies_force_upload:
|
|
||||||
description: "Force uploading of all dependencies."
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
env:
|
|
||||||
CONAN_REMOTE_NAME: xrplf
|
|
||||||
CONAN_REMOTE_URL: https://conan.ripplex.io
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-missing-commits:
|
|
||||||
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
|
|
||||||
uses: ./.github/workflows/check-missing-commits.yml
|
|
||||||
|
|
||||||
# This job works around the limitation that GitHub Actions does not support
|
|
||||||
# using environment variables as inputs for reusable workflows. It also sets
|
|
||||||
# outputs that depend on the event that triggered the workflow.
|
|
||||||
generate-outputs:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Check inputs and set outputs
|
|
||||||
id: generate
|
|
||||||
run: |
|
|
||||||
if [[ '${{ github.event_name }}' == 'push' ]]; then
|
|
||||||
echo 'dependencies_force_build=false' >> "${GITHUB_OUTPUT}"
|
|
||||||
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
|
|
||||||
elif [[ '${{ github.event_name }}' == 'schedule' ]]; then
|
|
||||||
echo 'dependencies_force_build=true' >> "${GITHUB_OUTPUT}"
|
|
||||||
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
|
|
||||||
else
|
|
||||||
echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' >> "${GITHUB_OUTPUT}"
|
|
||||||
echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' >> "${GITHUB_OUTPUT}"
|
|
||||||
fi
|
|
||||||
outputs:
|
|
||||||
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
|
|
||||||
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
|
|
||||||
dependencies_force_build: ${{ steps.generate.outputs.dependencies_force_build }}
|
|
||||||
dependencies_force_upload: ${{ steps.generate.outputs.dependencies_force_upload }}
|
|
||||||
|
|
||||||
build-test:
|
|
||||||
needs: generate-outputs
|
|
||||||
uses: ./.github/workflows/build-test.yml
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [linux, macos, windows]
|
|
||||||
with:
|
|
||||||
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
|
|
||||||
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
|
|
||||||
dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }}
|
|
||||||
dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }}
|
|
||||||
os: ${{ matrix.os }}
|
|
||||||
strategy_matrix: "all"
|
|
||||||
secrets:
|
|
||||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
|
||||||
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
|
||||||
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
|
||||||
60
.github/workflows/publish-docs.yml
vendored
60
.github/workflows/publish-docs.yml
vendored
@@ -1,60 +0,0 @@
|
|||||||
# This workflow builds the documentation for the repository, and publishes it to
|
|
||||||
# GitHub Pages when changes are merged into the default branch.
|
|
||||||
name: Build and publish documentation
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- ".github/workflows/publish-docs.yml"
|
|
||||||
- "*.md"
|
|
||||||
- "**/*.md"
|
|
||||||
- "docs/**"
|
|
||||||
- "include/**"
|
|
||||||
- "src/libxrpl/**"
|
|
||||||
- "src/xrpld/**"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
env:
|
|
||||||
BUILD_DIR: .build
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
publish:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container: ghcr.io/xrplf/ci/tools-rippled-documentation
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
|
||||||
- name: Check configuration
|
|
||||||
run: |
|
|
||||||
echo 'Checking path.'
|
|
||||||
echo ${PATH} | tr ':' '\n'
|
|
||||||
|
|
||||||
echo 'Checking environment variables.'
|
|
||||||
env | sort
|
|
||||||
|
|
||||||
echo 'Checking CMake version.'
|
|
||||||
cmake --version
|
|
||||||
|
|
||||||
echo 'Checking Doxygen version.'
|
|
||||||
doxygen --version
|
|
||||||
- name: Build documentation
|
|
||||||
run: |
|
|
||||||
mkdir -p ${{ env.BUILD_DIR }}
|
|
||||||
cd ${{ env.BUILD_DIR }}
|
|
||||||
cmake -Donly_docs=ON ..
|
|
||||||
cmake --build . --target docs --parallel $(nproc)
|
|
||||||
- name: Publish documentation
|
|
||||||
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
|
|
||||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
|
||||||
with:
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
publish_dir: ${{ env.BUILD_DIR }}/docs/html
|
|
||||||
106
.github/workflows/windows.yml
vendored
Normal file
106
.github/workflows/windows.yml
vendored
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
name: windows
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, reopened, synchronize, ready_for_review]
|
||||||
|
push:
|
||||||
|
# If the branches list is ever changed, be sure to change it on all
|
||||||
|
# build/test jobs (nix, macos, windows, instrumentation)
|
||||||
|
branches:
|
||||||
|
# Always build the package branches
|
||||||
|
- develop
|
||||||
|
- release
|
||||||
|
- master
|
||||||
|
# Branches that opt-in to running
|
||||||
|
- "ci/**"
|
||||||
|
|
||||||
|
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
env:
|
||||||
|
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||||
|
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||||
|
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||||
|
# This part of the Conan configuration is specific to this workflow only; we
|
||||||
|
# do not want to pollute the 'conan/profiles' directory with settings that
|
||||||
|
# might not work for other workflows.
|
||||||
|
CONAN_GLOBAL_CONF: |
|
||||||
|
core.download:parallel={{os.cpu_count()}}
|
||||||
|
core.upload:parallel={{os.cpu_count()}}
|
||||||
|
tools.build:jobs=24
|
||||||
|
tools.build:verbosity=verbose
|
||||||
|
tools.compilation:verbosity=verbose
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
version:
|
||||||
|
- generator: Visual Studio 17 2022
|
||||||
|
runs-on: windows-2022
|
||||||
|
configuration:
|
||||||
|
- type: Release
|
||||||
|
tests: true
|
||||||
|
- type: Debug
|
||||||
|
# Skip running unit tests on debug builds, because they
|
||||||
|
# take an unreasonable amount of time
|
||||||
|
tests: false
|
||||||
|
runtime: d
|
||||||
|
runs-on: ${{ matrix.version.runs-on }}
|
||||||
|
env:
|
||||||
|
build_dir: .build
|
||||||
|
steps:
|
||||||
|
- name: checkout
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
- name: choose Python
|
||||||
|
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065
|
||||||
|
with:
|
||||||
|
python-version: 3.13
|
||||||
|
- name: learn Python cache directory
|
||||||
|
id: pip-cache
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT}
|
||||||
|
- name: restore Python cache directory
|
||||||
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684
|
||||||
|
with:
|
||||||
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
|
key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }}
|
||||||
|
- name: install Conan
|
||||||
|
run: pip install wheel conan
|
||||||
|
- name: check environment
|
||||||
|
run: |
|
||||||
|
dir env:
|
||||||
|
$env:PATH -split ';'
|
||||||
|
python --version
|
||||||
|
conan --version
|
||||||
|
cmake --version
|
||||||
|
- name: configure Conan
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
|
||||||
|
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||||
|
conan profile show
|
||||||
|
- name: build dependencies
|
||||||
|
uses: ./.github/actions/dependencies
|
||||||
|
with:
|
||||||
|
configuration: ${{ matrix.configuration.type }}
|
||||||
|
- name: build
|
||||||
|
uses: ./.github/actions/build
|
||||||
|
with:
|
||||||
|
generator: "${{ matrix.version.generator }}"
|
||||||
|
configuration: ${{ matrix.configuration.type }}
|
||||||
|
# Hard code for now. Move to the matrix if varied options are needed
|
||||||
|
cmake-args: "-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON"
|
||||||
|
cmake-target: install
|
||||||
|
- name: test
|
||||||
|
shell: bash
|
||||||
|
if: ${{ matrix.configuration.tests }}
|
||||||
|
run: |
|
||||||
|
cd ${build_dir}/${{ matrix.configuration.type }}
|
||||||
|
./rippled --unittest --unittest-jobs $(nproc)
|
||||||
|
ctest -j $(nproc) --output-on-failure
|
||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -37,9 +37,10 @@ Release/*.*
|
|||||||
*.gcov
|
*.gcov
|
||||||
|
|
||||||
# Levelization checking
|
# Levelization checking
|
||||||
.github/scripts/levelization/results/*
|
Builds/levelization/results/rawincludes.txt
|
||||||
!.github/scripts/levelization/results/loops.txt
|
Builds/levelization/results/paths.txt
|
||||||
!.github/scripts/levelization/results/ordering.txt
|
Builds/levelization/results/includes/
|
||||||
|
Builds/levelization/results/includedby/
|
||||||
|
|
||||||
# Ignore tmp directory.
|
# Ignore tmp directory.
|
||||||
tmp
|
tmp
|
||||||
@@ -110,4 +111,4 @@ bld.rippled/
|
|||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
# Suggested in-tree build directory
|
# Suggested in-tree build directory
|
||||||
/.build*/
|
/.build/
|
||||||
|
|||||||
@@ -1,64 +1,6 @@
|
|||||||
# To run pre-commit hooks, first install pre-commit:
|
# .pre-commit-config.yaml
|
||||||
# - `pip install pre-commit==${PRE_COMMIT_VERSION}`
|
|
||||||
# - `pip install pre-commit-hooks==${PRE_COMMIT_HOOKS_VERSION}`
|
|
||||||
#
|
|
||||||
# Depending on your system, you can use `brew install` or `apt install` as well
|
|
||||||
# for installing the pre-commit package, but `pip` is needed to install the
|
|
||||||
# hooks; you can also use `pipx` if you prefer.
|
|
||||||
# Next, install the required formatters:
|
|
||||||
# - `pip install clang-format==${CLANG_VERSION}`
|
|
||||||
# - `npm install prettier@${PRETTIER_VERSION}`
|
|
||||||
#
|
|
||||||
# See https://github.com/XRPLF/ci/blob/main/.github/workflows/tools-rippled.yml
|
|
||||||
# for the versions used in the CI pipeline. You will need to have the exact same
|
|
||||||
# versions of the tools installed on your system to produce the same results as
|
|
||||||
# the pipeline.
|
|
||||||
#
|
|
||||||
# Then, run the following command to install the git hook scripts:
|
|
||||||
# - `pre-commit install`
|
|
||||||
# You can run all configured hooks against all files with:
|
|
||||||
# - `pre-commit run --all-files`
|
|
||||||
# To manually run a specific hook, use:
|
|
||||||
# - `pre-commit run <hook_id> --all-files`
|
|
||||||
# To run the hooks against only the files changed in the current commit, use:
|
|
||||||
# - `pre-commit run`
|
|
||||||
repos:
|
repos:
|
||||||
- repo: local
|
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||||
|
rev: v18.1.8
|
||||||
hooks:
|
hooks:
|
||||||
- id: clang-format
|
- id: clang-format
|
||||||
name: clang-format
|
|
||||||
language: system
|
|
||||||
entry: clang-format -i
|
|
||||||
files: '\.(cpp|hpp|h|ipp|proto)$'
|
|
||||||
- id: trailing-whitespace
|
|
||||||
name: trailing-whitespace
|
|
||||||
entry: trailing-whitespace-fixer
|
|
||||||
language: system
|
|
||||||
types: [text]
|
|
||||||
- id: end-of-file
|
|
||||||
name: end-of-file
|
|
||||||
entry: end-of-file-fixer
|
|
||||||
language: system
|
|
||||||
types: [text]
|
|
||||||
- id: mixed-line-ending
|
|
||||||
name: mixed-line-ending
|
|
||||||
entry: mixed-line-ending
|
|
||||||
language: system
|
|
||||||
types: [text]
|
|
||||||
- id: check-merge-conflict
|
|
||||||
name: check-merge-conflict
|
|
||||||
entry: check-merge-conflict --assume-in-merge
|
|
||||||
language: system
|
|
||||||
types: [text]
|
|
||||||
- repo: local
|
|
||||||
hooks:
|
|
||||||
- id: prettier
|
|
||||||
name: prettier
|
|
||||||
language: system
|
|
||||||
entry: prettier --ignore-unknown --write
|
|
||||||
|
|
||||||
exclude: |
|
|
||||||
(?x)^(
|
|
||||||
external/.*|
|
|
||||||
.github/scripts/levelization/results/.*\.txt
|
|
||||||
)$
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
external
|
|
||||||
@@ -50,7 +50,7 @@ that `test` code should _never_ be included in `ripple` code.)
|
|||||||
|
|
||||||
## Validation
|
## Validation
|
||||||
|
|
||||||
The [levelization](generate.sh) script takes no parameters,
|
The [levelization.sh](levelization.sh) script takes no parameters,
|
||||||
reads no environment variables, and can be run from any directory,
|
reads no environment variables, and can be run from any directory,
|
||||||
as long as it is in the expected location in the rippled repo.
|
as long as it is in the expected location in the rippled repo.
|
||||||
It can be run at any time from within a checked out repo, and will
|
It can be run at any time from within a checked out repo, and will
|
||||||
@@ -72,15 +72,15 @@ It generates many files of [results](results):
|
|||||||
desired as described above. In a perfect repo, this file will be
|
desired as described above. In a perfect repo, this file will be
|
||||||
empty.
|
empty.
|
||||||
This file is committed to the repo, and is used by the [levelization
|
This file is committed to the repo, and is used by the [levelization
|
||||||
Github workflow](../../workflows/check-levelization.yml) to validate
|
Github workflow](../../.github/workflows/levelization.yml) to validate
|
||||||
that nothing changed.
|
that nothing changed.
|
||||||
- [`ordering.txt`](results/ordering.txt): A list showing relationships
|
- [`ordering.txt`](results/ordering.txt): A list showing relationships
|
||||||
between modules where there are no loops as they actually exist, as
|
between modules where there are no loops as they actually exist, as
|
||||||
opposed to how they are desired as described above.
|
opposed to how they are desired as described above.
|
||||||
This file is committed to the repo, and is used by the [levelization
|
This file is committed to the repo, and is used by the [levelization
|
||||||
Github workflow](../../workflows/check-levelization.yml) to validate
|
Github workflow](../../.github/workflows/levelization.yml) to validate
|
||||||
that nothing changed.
|
that nothing changed.
|
||||||
- [`levelization.yml`](../../workflows/check-levelization.yml)
|
- [`levelization.yml`](../../.github/workflows/levelization.yml)
|
||||||
Github Actions workflow to test that levelization loops haven't
|
Github Actions workflow to test that levelization loops haven't
|
||||||
changed. Unfortunately, if changes are detected, it can't tell if
|
changed. Unfortunately, if changes are detected, it can't tell if
|
||||||
they are improvements or not, so if you have resolved any issues or
|
they are improvements or not, so if you have resolved any issues or
|
||||||
@@ -111,4 +111,4 @@ get those details locally.
|
|||||||
1. Run `levelization.sh`
|
1. Run `levelization.sh`
|
||||||
2. Grep the modules in `paths.txt`.
|
2. Grep the modules in `paths.txt`.
|
||||||
- For example, if a cycle is found `A ~= B`, simply `grep -w
|
- For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||||
A .github/scripts/levelization/results/paths.txt | grep -w B`
|
A Builds/levelization/results/paths.txt | grep -w B`
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Usage: generate.sh
|
# Usage: levelization.sh
|
||||||
# This script takes no parameters, reads no environment variables,
|
# This script takes no parameters, reads no environment variables,
|
||||||
# and can be run from any directory, as long as it is in the expected
|
# and can be run from any directory, as long as it is in the expected
|
||||||
# location in the repo.
|
# location in the repo.
|
||||||
@@ -19,7 +19,7 @@ export LANG=C
|
|||||||
rm -rfv results
|
rm -rfv results
|
||||||
mkdir results
|
mkdir results
|
||||||
includes="$( pwd )/results/rawincludes.txt"
|
includes="$( pwd )/results/rawincludes.txt"
|
||||||
pushd ../../..
|
pushd ../..
|
||||||
echo Raw includes:
|
echo Raw includes:
|
||||||
grep -r '^[ ]*#include.*/.*\.h' include src | \
|
grep -r '^[ ]*#include.*/.*\.h' include src | \
|
||||||
grep -v boost | tee ${includes}
|
grep -v boost | tee ${includes}
|
||||||
@@ -10,6 +10,9 @@ Loop: xrpld.app xrpld.core
|
|||||||
Loop: xrpld.app xrpld.ledger
|
Loop: xrpld.app xrpld.ledger
|
||||||
xrpld.app > xrpld.ledger
|
xrpld.app > xrpld.ledger
|
||||||
|
|
||||||
|
Loop: xrpld.app xrpld.net
|
||||||
|
xrpld.app > xrpld.net
|
||||||
|
|
||||||
Loop: xrpld.app xrpld.overlay
|
Loop: xrpld.app xrpld.overlay
|
||||||
xrpld.overlay > xrpld.app
|
xrpld.overlay > xrpld.app
|
||||||
|
|
||||||
@@ -22,9 +25,15 @@ Loop: xrpld.app xrpld.rpc
|
|||||||
Loop: xrpld.app xrpld.shamap
|
Loop: xrpld.app xrpld.shamap
|
||||||
xrpld.app > xrpld.shamap
|
xrpld.app > xrpld.shamap
|
||||||
|
|
||||||
|
Loop: xrpld.core xrpld.net
|
||||||
|
xrpld.net > xrpld.core
|
||||||
|
|
||||||
Loop: xrpld.core xrpld.perflog
|
Loop: xrpld.core xrpld.perflog
|
||||||
xrpld.perflog == xrpld.core
|
xrpld.perflog == xrpld.core
|
||||||
|
|
||||||
|
Loop: xrpld.net xrpld.rpc
|
||||||
|
xrpld.rpc ~= xrpld.net
|
||||||
|
|
||||||
Loop: xrpld.overlay xrpld.rpc
|
Loop: xrpld.overlay xrpld.rpc
|
||||||
xrpld.rpc ~= xrpld.overlay
|
xrpld.rpc ~= xrpld.overlay
|
||||||
|
|
||||||
@@ -2,8 +2,6 @@ libxrpl.basics > xrpl.basics
|
|||||||
libxrpl.crypto > xrpl.basics
|
libxrpl.crypto > xrpl.basics
|
||||||
libxrpl.json > xrpl.basics
|
libxrpl.json > xrpl.basics
|
||||||
libxrpl.json > xrpl.json
|
libxrpl.json > xrpl.json
|
||||||
libxrpl.net > xrpl.basics
|
|
||||||
libxrpl.net > xrpl.net
|
|
||||||
libxrpl.protocol > xrpl.basics
|
libxrpl.protocol > xrpl.basics
|
||||||
libxrpl.protocol > xrpl.json
|
libxrpl.protocol > xrpl.json
|
||||||
libxrpl.protocol > xrpl.protocol
|
libxrpl.protocol > xrpl.protocol
|
||||||
@@ -64,9 +62,9 @@ test.jtx > xrpl.basics
|
|||||||
test.jtx > xrpld.app
|
test.jtx > xrpld.app
|
||||||
test.jtx > xrpld.core
|
test.jtx > xrpld.core
|
||||||
test.jtx > xrpld.ledger
|
test.jtx > xrpld.ledger
|
||||||
|
test.jtx > xrpld.net
|
||||||
test.jtx > xrpld.rpc
|
test.jtx > xrpld.rpc
|
||||||
test.jtx > xrpl.json
|
test.jtx > xrpl.json
|
||||||
test.jtx > xrpl.net
|
|
||||||
test.jtx > xrpl.protocol
|
test.jtx > xrpl.protocol
|
||||||
test.jtx > xrpl.resource
|
test.jtx > xrpl.resource
|
||||||
test.jtx > xrpl.server
|
test.jtx > xrpl.server
|
||||||
@@ -111,6 +109,7 @@ test.rpc > test.toplevel
|
|||||||
test.rpc > xrpl.basics
|
test.rpc > xrpl.basics
|
||||||
test.rpc > xrpld.app
|
test.rpc > xrpld.app
|
||||||
test.rpc > xrpld.core
|
test.rpc > xrpld.core
|
||||||
|
test.rpc > xrpld.net
|
||||||
test.rpc > xrpld.overlay
|
test.rpc > xrpld.overlay
|
||||||
test.rpc > xrpld.rpc
|
test.rpc > xrpld.rpc
|
||||||
test.rpc > xrpl.json
|
test.rpc > xrpl.json
|
||||||
@@ -135,7 +134,6 @@ test.toplevel > xrpl.json
|
|||||||
test.unit_test > xrpl.basics
|
test.unit_test > xrpl.basics
|
||||||
tests.libxrpl > xrpl.basics
|
tests.libxrpl > xrpl.basics
|
||||||
xrpl.json > xrpl.basics
|
xrpl.json > xrpl.basics
|
||||||
xrpl.net > xrpl.basics
|
|
||||||
xrpl.protocol > xrpl.basics
|
xrpl.protocol > xrpl.basics
|
||||||
xrpl.protocol > xrpl.json
|
xrpl.protocol > xrpl.json
|
||||||
xrpl.resource > xrpl.basics
|
xrpl.resource > xrpl.basics
|
||||||
@@ -151,7 +149,6 @@ xrpld.app > xrpld.consensus
|
|||||||
xrpld.app > xrpld.nodestore
|
xrpld.app > xrpld.nodestore
|
||||||
xrpld.app > xrpld.perflog
|
xrpld.app > xrpld.perflog
|
||||||
xrpld.app > xrpl.json
|
xrpld.app > xrpl.json
|
||||||
xrpld.app > xrpl.net
|
|
||||||
xrpld.app > xrpl.protocol
|
xrpld.app > xrpl.protocol
|
||||||
xrpld.app > xrpl.resource
|
xrpld.app > xrpl.resource
|
||||||
xrpld.conditions > xrpl.basics
|
xrpld.conditions > xrpl.basics
|
||||||
@@ -161,11 +158,14 @@ xrpld.consensus > xrpl.json
|
|||||||
xrpld.consensus > xrpl.protocol
|
xrpld.consensus > xrpl.protocol
|
||||||
xrpld.core > xrpl.basics
|
xrpld.core > xrpl.basics
|
||||||
xrpld.core > xrpl.json
|
xrpld.core > xrpl.json
|
||||||
xrpld.core > xrpl.net
|
|
||||||
xrpld.core > xrpl.protocol
|
xrpld.core > xrpl.protocol
|
||||||
xrpld.ledger > xrpl.basics
|
xrpld.ledger > xrpl.basics
|
||||||
xrpld.ledger > xrpl.json
|
xrpld.ledger > xrpl.json
|
||||||
xrpld.ledger > xrpl.protocol
|
xrpld.ledger > xrpl.protocol
|
||||||
|
xrpld.net > xrpl.basics
|
||||||
|
xrpld.net > xrpl.json
|
||||||
|
xrpld.net > xrpl.protocol
|
||||||
|
xrpld.net > xrpl.resource
|
||||||
xrpld.nodestore > xrpl.basics
|
xrpld.nodestore > xrpl.basics
|
||||||
xrpld.nodestore > xrpld.core
|
xrpld.nodestore > xrpld.core
|
||||||
xrpld.nodestore > xrpld.unity
|
xrpld.nodestore > xrpld.unity
|
||||||
@@ -189,7 +189,6 @@ xrpld.rpc > xrpld.core
|
|||||||
xrpld.rpc > xrpld.ledger
|
xrpld.rpc > xrpld.ledger
|
||||||
xrpld.rpc > xrpld.nodestore
|
xrpld.rpc > xrpld.nodestore
|
||||||
xrpld.rpc > xrpl.json
|
xrpld.rpc > xrpl.json
|
||||||
xrpld.rpc > xrpl.net
|
|
||||||
xrpld.rpc > xrpl.protocol
|
xrpld.rpc > xrpl.protocol
|
||||||
xrpld.rpc > xrpl.resource
|
xrpld.rpc > xrpl.resource
|
||||||
xrpld.rpc > xrpl.server
|
xrpld.rpc > xrpl.server
|
||||||
@@ -81,7 +81,7 @@ If you create new source files, they must be organized as follows:
|
|||||||
|
|
||||||
The source must be formatted according to the style guide below.
|
The source must be formatted according to the style guide below.
|
||||||
|
|
||||||
Header includes must be [levelized](.github/scripts/levelization).
|
Header includes must be [levelized](./Builds/levelization).
|
||||||
|
|
||||||
Changes should be usually squashed down into a single commit.
|
Changes should be usually squashed down into a single commit.
|
||||||
Some larger or more complicated change sets make more sense,
|
Some larger or more complicated change sets make more sense,
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ If you are interested in running an **API Server** (including a **Full History S
|
|||||||
Here are some good places to start learning the source code:
|
Here are some good places to start learning the source code:
|
||||||
|
|
||||||
- Read the markdown files in the source tree: `src/ripple/**/*.md`.
|
- Read the markdown files in the source tree: `src/ripple/**/*.md`.
|
||||||
- Read [the levelization document](.github/scripts/levelization) to get an idea of the internal dependency graph.
|
- Read [the levelization document](./Builds/levelization) to get an idea of the internal dependency graph.
|
||||||
- In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component.
|
- In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component.
|
||||||
|
|
||||||
### Repository Contents
|
### Repository Contents
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ then
|
|||||||
name=$( basename $0 )
|
name=$( basename $0 )
|
||||||
cat <<- USAGE
|
cat <<- USAGE
|
||||||
Usage: $name <username>
|
Usage: $name <username>
|
||||||
|
|
||||||
Where <username> is the Github username of the upstream repo. e.g. XRPLF
|
Where <username> is the Github username of the upstream repo. e.g. XRPLF
|
||||||
USAGE
|
USAGE
|
||||||
exit 0
|
exit 0
|
||||||
@@ -83,3 +83,4 @@ fi
|
|||||||
_run git fetch --jobs=$(nproc) upstreams
|
_run git fetch --jobs=$(nproc) upstreams
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ then
|
|||||||
name=$( basename $0 )
|
name=$( basename $0 )
|
||||||
cat <<- USAGE
|
cat <<- USAGE
|
||||||
Usage: $name workbranch base/branch user/branch [user/branch [...]]
|
Usage: $name workbranch base/branch user/branch [user/branch [...]]
|
||||||
|
|
||||||
* workbranch will be created locally from base/branch
|
* workbranch will be created locally from base/branch
|
||||||
* base/branch and user/branch may be specified as user:branch to allow
|
* base/branch and user/branch may be specified as user:branch to allow
|
||||||
easy copying from Github PRs
|
easy copying from Github PRs
|
||||||
@@ -66,3 +66,4 @@ git push $push HEAD:$b
|
|||||||
git fetch $repo
|
git fetch $repo
|
||||||
-------------------------------------------------------------------
|
-------------------------------------------------------------------
|
||||||
PUSH
|
PUSH
|
||||||
|
|
||||||
|
|||||||
@@ -396,8 +396,8 @@
|
|||||||
# true - enables compression
|
# true - enables compression
|
||||||
# false - disables compression [default].
|
# false - disables compression [default].
|
||||||
#
|
#
|
||||||
# The rippled server can save bandwidth by compressing its peer-to-peer communications,
|
# The rippled server can save bandwidth by compressing its peer-to-peer communications,
|
||||||
# at a cost of greater CPU usage. If you enable link compression,
|
# at a cost of greater CPU usage. If you enable link compression,
|
||||||
# the server automatically compresses communications with peer servers
|
# the server automatically compresses communications with peer servers
|
||||||
# that also have link compression enabled.
|
# that also have link compression enabled.
|
||||||
# https://xrpl.org/enable-link-compression.html
|
# https://xrpl.org/enable-link-compression.html
|
||||||
@@ -1011,7 +1011,7 @@
|
|||||||
# that rippled is still in sync with the network,
|
# that rippled is still in sync with the network,
|
||||||
# and that the validated ledger is less than
|
# and that the validated ledger is less than
|
||||||
# 'age_threshold_seconds' old. If not, then continue
|
# 'age_threshold_seconds' old. If not, then continue
|
||||||
# sleeping for this number of seconds and
|
# sleeping for this number of seconds and
|
||||||
# checking until healthy.
|
# checking until healthy.
|
||||||
# Default is 5.
|
# Default is 5.
|
||||||
#
|
#
|
||||||
@@ -1113,7 +1113,7 @@
|
|||||||
# page_size Valid values: integer (MUST be power of 2 between 512 and 65536)
|
# page_size Valid values: integer (MUST be power of 2 between 512 and 65536)
|
||||||
# The default is 4096 bytes. This setting determines
|
# The default is 4096 bytes. This setting determines
|
||||||
# the size of a page in the transaction.db file.
|
# the size of a page in the transaction.db file.
|
||||||
# See https://www.sqlite.org/pragma.html#pragma_page_size
|
# See https://www.sqlite.org/pragma.html#pragma_page_size
|
||||||
# for more details about the available options.
|
# for more details about the available options.
|
||||||
#
|
#
|
||||||
# journal_size_limit Valid values: integer
|
# journal_size_limit Valid values: integer
|
||||||
|
|||||||
@@ -101,9 +101,6 @@
|
|||||||
# 2025-05-12, Jingchen Wu
|
# 2025-05-12, Jingchen Wu
|
||||||
# - add -fprofile-update=atomic to ensure atomic profile generation
|
# - add -fprofile-update=atomic to ensure atomic profile generation
|
||||||
#
|
#
|
||||||
# 2025-08-28, Bronek Kozicki
|
|
||||||
# - fix "At least one COMMAND must be given" CMake warning from policy CMP0175
|
|
||||||
#
|
|
||||||
# USAGE:
|
# USAGE:
|
||||||
#
|
#
|
||||||
# 1. Copy this file into your cmake modules path.
|
# 1. Copy this file into your cmake modules path.
|
||||||
@@ -449,7 +446,7 @@ function(setup_target_for_coverage_gcovr)
|
|||||||
|
|
||||||
# Show info where to find the report
|
# Show info where to find the report
|
||||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||||
COMMAND echo
|
COMMAND ;
|
||||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
||||||
)
|
)
|
||||||
endfunction() # setup_target_for_coverage_gcovr
|
endfunction() # setup_target_for_coverage_gcovr
|
||||||
|
|||||||
@@ -16,16 +16,13 @@ set(CMAKE_CXX_EXTENSIONS OFF)
|
|||||||
target_compile_definitions (common
|
target_compile_definitions (common
|
||||||
INTERFACE
|
INTERFACE
|
||||||
$<$<CONFIG:Debug>:DEBUG _DEBUG>
|
$<$<CONFIG:Debug>:DEBUG _DEBUG>
|
||||||
#[===[
|
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
|
||||||
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
|
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
|
||||||
explicitly except for the special case of (profile ON) and (assert OFF).
|
# defined, so no need to add it explicitly except for
|
||||||
Presumably this is because we don't want profile builds asserting unless
|
# this special case of (profile ON) and (assert OFF)
|
||||||
asserts were specifically requested.
|
# -- presumably this is because we don't want profile
|
||||||
]===]
|
# builds asserting unless asserts were specifically
|
||||||
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
|
# requested
|
||||||
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
|
|
||||||
OPENSSL_SUPPRESS_DEPRECATED
|
|
||||||
)
|
|
||||||
|
|
||||||
if (MSVC)
|
if (MSVC)
|
||||||
# remove existing exception flag since we set it to -EHa
|
# remove existing exception flag since we set it to -EHa
|
||||||
|
|||||||
@@ -99,15 +99,6 @@ target_link_libraries(xrpl.libxrpl.protocol PUBLIC
|
|||||||
add_module(xrpl resource)
|
add_module(xrpl resource)
|
||||||
target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
|
target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
|
||||||
|
|
||||||
# Level 06
|
|
||||||
add_module(xrpl net)
|
|
||||||
target_link_libraries(xrpl.libxrpl.net PUBLIC
|
|
||||||
xrpl.libxrpl.basics
|
|
||||||
xrpl.libxrpl.json
|
|
||||||
xrpl.libxrpl.protocol
|
|
||||||
xrpl.libxrpl.resource
|
|
||||||
)
|
|
||||||
|
|
||||||
add_module(xrpl server)
|
add_module(xrpl server)
|
||||||
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
|
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
|
||||||
|
|
||||||
@@ -130,7 +121,6 @@ target_link_modules(xrpl PUBLIC
|
|||||||
protocol
|
protocol
|
||||||
resource
|
resource
|
||||||
server
|
server
|
||||||
net
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# All headers in libxrpl are in modules.
|
# All headers in libxrpl are in modules.
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ install (
|
|||||||
xrpl.libxrpl.protocol
|
xrpl.libxrpl.protocol
|
||||||
xrpl.libxrpl.resource
|
xrpl.libxrpl.resource
|
||||||
xrpl.libxrpl.server
|
xrpl.libxrpl.server
|
||||||
xrpl.libxrpl.net
|
|
||||||
xrpl.libxrpl
|
xrpl.libxrpl
|
||||||
antithesis-sdk-cpp
|
antithesis-sdk-cpp
|
||||||
EXPORT RippleExports
|
EXPORT RippleExports
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ option(beast_no_unit_test_inline
|
|||||||
"Prevents unit test definitions from being inserted into global table"
|
"Prevents unit test definitions from being inserted into global table"
|
||||||
OFF)
|
OFF)
|
||||||
option(single_io_service_thread
|
option(single_io_service_thread
|
||||||
"Restricts the number of threads calling io_context::run to one. \
|
"Restricts the number of threads calling io_service::run to one. \
|
||||||
This can be useful when debugging."
|
This can be useful when debugging."
|
||||||
OFF)
|
OFF)
|
||||||
option(boost_show_deprecated
|
option(boost_show_deprecated
|
||||||
|
|||||||
@@ -14,6 +14,12 @@ find_package(Boost 1.82 REQUIRED
|
|||||||
|
|
||||||
add_library(ripple_boost INTERFACE)
|
add_library(ripple_boost INTERFACE)
|
||||||
add_library(Ripple::boost ALIAS ripple_boost)
|
add_library(Ripple::boost ALIAS ripple_boost)
|
||||||
|
if(XCODE)
|
||||||
|
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||||
|
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
|
||||||
|
else()
|
||||||
|
target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||||
|
endif()
|
||||||
|
|
||||||
target_link_libraries(ripple_boost
|
target_link_libraries(ripple_boost
|
||||||
INTERFACE
|
INTERFACE
|
||||||
@@ -24,7 +30,6 @@ target_link_libraries(ripple_boost
|
|||||||
Boost::date_time
|
Boost::date_time
|
||||||
Boost::filesystem
|
Boost::filesystem
|
||||||
Boost::json
|
Boost::json
|
||||||
Boost::process
|
|
||||||
Boost::program_options
|
Boost::program_options
|
||||||
Boost::regex
|
Boost::regex
|
||||||
Boost::system
|
Boost::system
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
# Global configuration for Conan. This is used to set the number of parallel
|
|
||||||
# downloads, uploads, and build jobs. The verbosity is set to verbose to
|
|
||||||
# provide more information during the build process.
|
|
||||||
core:non_interactive=True
|
|
||||||
core.download:parallel={{ os.cpu_count() }}
|
|
||||||
core.upload:parallel={{ os.cpu_count() }}
|
|
||||||
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
|
|
||||||
tools.build:verbosity=verbose
|
|
||||||
tools.compilation:verbosity=verbose
|
|
||||||
@@ -26,6 +26,9 @@ tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
|||||||
{% if compiler == "apple-clang" and compiler_version >= 17 %}
|
{% if compiler == "apple-clang" and compiler_version >= 17 %}
|
||||||
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if compiler == "clang" and compiler_version == 16 %}
|
||||||
|
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||||
|
{% endif %}
|
||||||
{% if compiler == "gcc" and compiler_version < 13 %}
|
{% if compiler == "gcc" and compiler_version < 13 %}
|
||||||
tools.build:cxxflags=['-Wno-restrict']
|
tools.build:cxxflags=['-Wno-restrict']
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ class Xrpl(ConanFile):
|
|||||||
'grpc/1.50.1',
|
'grpc/1.50.1',
|
||||||
'libarchive/3.8.1',
|
'libarchive/3.8.1',
|
||||||
'nudb/2.0.9',
|
'nudb/2.0.9',
|
||||||
'openssl/3.5.2',
|
'openssl/1.1.1w',
|
||||||
'soci/4.0.3',
|
'soci/4.0.3',
|
||||||
'zlib/1.3.1',
|
'zlib/1.3.1',
|
||||||
]
|
]
|
||||||
@@ -100,13 +100,11 @@ class Xrpl(ConanFile):
|
|||||||
def configure(self):
|
def configure(self):
|
||||||
if self.settings.compiler == 'apple-clang':
|
if self.settings.compiler == 'apple-clang':
|
||||||
self.options['boost'].visibility = 'global'
|
self.options['boost'].visibility = 'global'
|
||||||
if self.settings.compiler in ['clang', 'gcc']:
|
|
||||||
self.options['boost'].without_cobalt = True
|
|
||||||
|
|
||||||
def requirements(self):
|
def requirements(self):
|
||||||
# Conan 2 requires transitive headers to be specified
|
# Conan 2 requires transitive headers to be specified
|
||||||
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
|
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
|
||||||
self.requires('boost/1.88.0', force=True, **transitive_headers_opt)
|
self.requires('boost/1.83.0', force=True, **transitive_headers_opt)
|
||||||
self.requires('date/3.0.4', **transitive_headers_opt)
|
self.requires('date/3.0.4', **transitive_headers_opt)
|
||||||
self.requires('lz4/1.10.0', force=True)
|
self.requires('lz4/1.10.0', force=True)
|
||||||
self.requires('protobuf/3.21.12', force=True)
|
self.requires('protobuf/3.21.12', force=True)
|
||||||
@@ -177,7 +175,6 @@ class Xrpl(ConanFile):
|
|||||||
'boost::filesystem',
|
'boost::filesystem',
|
||||||
'boost::json',
|
'boost::json',
|
||||||
'boost::program_options',
|
'boost::program_options',
|
||||||
'boost::process',
|
|
||||||
'boost::regex',
|
'boost::regex',
|
||||||
'boost::system',
|
'boost::system',
|
||||||
'boost::thread',
|
'boost::thread',
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ skinparam roundcorner 20
|
|||||||
skinparam maxmessagesize 160
|
skinparam maxmessagesize 160
|
||||||
|
|
||||||
actor "Rippled Start" as RS
|
actor "Rippled Start" as RS
|
||||||
participant "Timer" as T
|
participant "Timer" as T
|
||||||
participant "NetworkOPs" as NOP
|
participant "NetworkOPs" as NOP
|
||||||
participant "ValidatorList" as VL #lightgreen
|
participant "ValidatorList" as VL #lightgreen
|
||||||
participant "Consensus" as GC
|
participant "Consensus" as GC
|
||||||
participant "ConsensusAdaptor" as CA #lightgreen
|
participant "ConsensusAdaptor" as CA #lightgreen
|
||||||
@@ -20,7 +20,7 @@ VL -> NOP
|
|||||||
NOP -> VL: update trusted validators
|
NOP -> VL: update trusted validators
|
||||||
activate VL
|
activate VL
|
||||||
VL -> VL: re-calculate quorum
|
VL -> VL: re-calculate quorum
|
||||||
hnote over VL#lightgreen: ignore negative listed validators\nwhen calculate quorum
|
hnote over VL#lightgreen: ignore negative listed validators\nwhen calculate quorum
|
||||||
VL -> NOP
|
VL -> NOP
|
||||||
deactivate VL
|
deactivate VL
|
||||||
NOP -> GC: start round
|
NOP -> GC: start round
|
||||||
@@ -36,14 +36,14 @@ activate GC
|
|||||||
end
|
end
|
||||||
|
|
||||||
alt phase == OPEN
|
alt phase == OPEN
|
||||||
alt should close ledger
|
alt should close ledger
|
||||||
GC -> GC: phase = ESTABLISH
|
GC -> GC: phase = ESTABLISH
|
||||||
GC -> CA: onClose
|
GC -> CA: onClose
|
||||||
activate CA
|
activate CA
|
||||||
alt sqn%256==0
|
alt sqn%256==0
|
||||||
CA -[#green]> RM: <font color=green>getValidations
|
CA -[#green]> RM: <font color=green>getValidations
|
||||||
CA -[#green]> CA: <font color=green>create UNLModify Tx
|
CA -[#green]> CA: <font color=green>create UNLModify Tx
|
||||||
hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
|
hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
|
||||||
end
|
end
|
||||||
CA -> GC
|
CA -> GC
|
||||||
GC -> CA: propose
|
GC -> CA: propose
|
||||||
@@ -61,14 +61,14 @@ else phase == ESTABLISH
|
|||||||
CA -> CA : build LCL
|
CA -> CA : build LCL
|
||||||
hnote over CA #lightgreen: copy negative UNL from parent ledger
|
hnote over CA #lightgreen: copy negative UNL from parent ledger
|
||||||
alt sqn%256==0
|
alt sqn%256==0
|
||||||
CA -[#green]> CA: <font color=green>Adjust negative UNL
|
CA -[#green]> CA: <font color=green>Adjust negative UNL
|
||||||
CA -[#green]> CA: <font color=green>apply UNLModify Tx
|
CA -[#green]> CA: <font color=green>apply UNLModify Tx
|
||||||
end
|
end
|
||||||
CA -> CA : validate and send validation message
|
CA -> CA : validate and send validation message
|
||||||
activate NOP
|
activate NOP
|
||||||
CA -> NOP : end consensus and\n<b>begin next consensus round
|
CA -> NOP : end consensus and\n<b>begin next consensus round
|
||||||
deactivate NOP
|
deactivate NOP
|
||||||
deactivate CA
|
deactivate CA
|
||||||
hnote over RM: receive validations
|
hnote over RM: receive validations
|
||||||
end
|
end
|
||||||
else phase == ACCEPTED
|
else phase == ACCEPTED
|
||||||
@@ -76,4 +76,4 @@ else phase == ACCEPTED
|
|||||||
end
|
end
|
||||||
deactivate GC
|
deactivate GC
|
||||||
|
|
||||||
@enduml
|
@enduml
|
||||||
@@ -4,7 +4,7 @@ class TimeoutCounter {
|
|||||||
#app_ : Application&
|
#app_ : Application&
|
||||||
}
|
}
|
||||||
|
|
||||||
TimeoutCounter o-- "1" Application
|
TimeoutCounter o-- "1" Application
|
||||||
': app_
|
': app_
|
||||||
|
|
||||||
Stoppable <.. Application
|
Stoppable <.. Application
|
||||||
@@ -14,13 +14,13 @@ class Application {
|
|||||||
-m_inboundLedgers : uptr<InboundLedgers>
|
-m_inboundLedgers : uptr<InboundLedgers>
|
||||||
}
|
}
|
||||||
|
|
||||||
Application *-- "1" LedgerReplayer
|
Application *-- "1" LedgerReplayer
|
||||||
': m_ledgerReplayer
|
': m_ledgerReplayer
|
||||||
Application *-- "1" InboundLedgers
|
Application *-- "1" InboundLedgers
|
||||||
': m_inboundLedgers
|
': m_inboundLedgers
|
||||||
|
|
||||||
Stoppable <.. InboundLedgers
|
Stoppable <.. InboundLedgers
|
||||||
Application "1" --o InboundLedgers
|
Application "1" --o InboundLedgers
|
||||||
': app_
|
': app_
|
||||||
|
|
||||||
class InboundLedgers {
|
class InboundLedgers {
|
||||||
@@ -28,9 +28,9 @@ class InboundLedgers {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Stoppable <.. LedgerReplayer
|
Stoppable <.. LedgerReplayer
|
||||||
InboundLedgers "1" --o LedgerReplayer
|
InboundLedgers "1" --o LedgerReplayer
|
||||||
': inboundLedgers_
|
': inboundLedgers_
|
||||||
Application "1" --o LedgerReplayer
|
Application "1" --o LedgerReplayer
|
||||||
': app_
|
': app_
|
||||||
|
|
||||||
class LedgerReplayer {
|
class LedgerReplayer {
|
||||||
@@ -42,17 +42,17 @@ class LedgerReplayer {
|
|||||||
-skipLists_ : hash_map<u256, wptr<SkipListAcquire>>
|
-skipLists_ : hash_map<u256, wptr<SkipListAcquire>>
|
||||||
}
|
}
|
||||||
|
|
||||||
LedgerReplayer *-- LedgerReplayTask
|
LedgerReplayer *-- LedgerReplayTask
|
||||||
': tasks_
|
': tasks_
|
||||||
LedgerReplayer o-- LedgerDeltaAcquire
|
LedgerReplayer o-- LedgerDeltaAcquire
|
||||||
': deltas_
|
': deltas_
|
||||||
LedgerReplayer o-- SkipListAcquire
|
LedgerReplayer o-- SkipListAcquire
|
||||||
': skipLists_
|
': skipLists_
|
||||||
|
|
||||||
TimeoutCounter <.. LedgerReplayTask
|
TimeoutCounter <.. LedgerReplayTask
|
||||||
InboundLedgers "1" --o LedgerReplayTask
|
InboundLedgers "1" --o LedgerReplayTask
|
||||||
': inboundLedgers_
|
': inboundLedgers_
|
||||||
LedgerReplayer "1" --o LedgerReplayTask
|
LedgerReplayer "1" --o LedgerReplayTask
|
||||||
': replayer_
|
': replayer_
|
||||||
|
|
||||||
class LedgerReplayTask {
|
class LedgerReplayTask {
|
||||||
@@ -63,15 +63,15 @@ class LedgerReplayTask {
|
|||||||
+addDelta(sptr<LedgerDeltaAcquire>)
|
+addDelta(sptr<LedgerDeltaAcquire>)
|
||||||
}
|
}
|
||||||
|
|
||||||
LedgerReplayTask *-- "1" SkipListAcquire
|
LedgerReplayTask *-- "1" SkipListAcquire
|
||||||
': skipListAcquirer_
|
': skipListAcquirer_
|
||||||
LedgerReplayTask *-- LedgerDeltaAcquire
|
LedgerReplayTask *-- LedgerDeltaAcquire
|
||||||
': deltas_
|
': deltas_
|
||||||
|
|
||||||
TimeoutCounter <.. SkipListAcquire
|
TimeoutCounter <.. SkipListAcquire
|
||||||
InboundLedgers "1" --o SkipListAcquire
|
InboundLedgers "1" --o SkipListAcquire
|
||||||
': inboundLedgers_
|
': inboundLedgers_
|
||||||
LedgerReplayer "1" --o SkipListAcquire
|
LedgerReplayer "1" --o SkipListAcquire
|
||||||
': replayer_
|
': replayer_
|
||||||
LedgerReplayTask --o SkipListAcquire : implicit via callback
|
LedgerReplayTask --o SkipListAcquire : implicit via callback
|
||||||
|
|
||||||
@@ -83,9 +83,9 @@ class SkipListAcquire {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TimeoutCounter <.. LedgerDeltaAcquire
|
TimeoutCounter <.. LedgerDeltaAcquire
|
||||||
InboundLedgers "1" --o LedgerDeltaAcquire
|
InboundLedgers "1" --o LedgerDeltaAcquire
|
||||||
': inboundLedgers_
|
': inboundLedgers_
|
||||||
LedgerReplayer "1" --o LedgerDeltaAcquire
|
LedgerReplayer "1" --o LedgerDeltaAcquire
|
||||||
': replayer_
|
': replayer_
|
||||||
LedgerReplayTask --o LedgerDeltaAcquire : implicit via callback
|
LedgerReplayTask --o LedgerDeltaAcquire : implicit via callback
|
||||||
|
|
||||||
@@ -95,4 +95,4 @@ class LedgerDeltaAcquire {
|
|||||||
-replayer_ : LedgerReplayer&
|
-replayer_ : LedgerReplayer&
|
||||||
-dataReadyCallbacks_ : vector<callback>
|
-dataReadyCallbacks_ : vector<callback>
|
||||||
}
|
}
|
||||||
@enduml
|
@enduml
|
||||||
@@ -38,7 +38,7 @@ deactivate lr
|
|||||||
loop
|
loop
|
||||||
lr -> lda : make_shared(ledgerId, ledgerSeq)
|
lr -> lda : make_shared(ledgerId, ledgerSeq)
|
||||||
return delta
|
return delta
|
||||||
lr -> lrt : addDelta(delta)
|
lr -> lrt : addDelta(delta)
|
||||||
lrt -> lda : addDataCallback(callback)
|
lrt -> lda : addDataCallback(callback)
|
||||||
return
|
return
|
||||||
return
|
return
|
||||||
@@ -62,7 +62,7 @@ deactivate peer
|
|||||||
lr -> lda : processData(ledgerHeader, txns)
|
lr -> lda : processData(ledgerHeader, txns)
|
||||||
lda -> lda : notify()
|
lda -> lda : notify()
|
||||||
note over lda: call the callbacks added by\naddDataCallback(callback).
|
note over lda: call the callbacks added by\naddDataCallback(callback).
|
||||||
lda -> lrt : callback(ledgerId)
|
lda -> lrt : callback(ledgerId)
|
||||||
lrt -> lrt : deltaReady(ledgerId)
|
lrt -> lrt : deltaReady(ledgerId)
|
||||||
lrt -> lrt : tryAdvance()
|
lrt -> lrt : tryAdvance()
|
||||||
loop as long as child can be built
|
loop as long as child can be built
|
||||||
@@ -82,4 +82,4 @@ deactivate peer
|
|||||||
deactivate peer
|
deactivate peer
|
||||||
|
|
||||||
|
|
||||||
@enduml
|
@enduml
|
||||||
3
external/README.md
vendored
3
external/README.md
vendored
@@ -1,6 +1,7 @@
|
|||||||
# External Conan recipes
|
# External Conan recipes
|
||||||
|
|
||||||
The subdirectories in this directory contain external libraries used by rippled.
|
The subdirectories in this directory contain copies of external libraries used
|
||||||
|
by rippled.
|
||||||
|
|
||||||
| Folder | Upstream | Description |
|
| Folder | Upstream | Description |
|
||||||
| :--------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------- |
|
| :--------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------- |
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
---
|
---
|
||||||
DisableFormat: true
|
DisableFormat: true
|
||||||
SortIncludes: Never
|
SortIncludes: false
|
||||||
97
external/ed25519-donna/README.md
vendored
97
external/ed25519-donna/README.md
vendored
@@ -1,12 +1,12 @@
|
|||||||
[ed25519](http://ed25519.cr.yp.to/) is an
|
[ed25519](http://ed25519.cr.yp.to/) is an
|
||||||
[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA),
|
[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA),
|
||||||
developed by [Dan Bernstein](http://cr.yp.to/djb.html),
|
developed by [Dan Bernstein](http://cr.yp.to/djb.html),
|
||||||
[Niels Duif](http://www.nielsduif.nl/),
|
[Niels Duif](http://www.nielsduif.nl/),
|
||||||
[Tanja Lange](http://hyperelliptic.org/tanja),
|
[Tanja Lange](http://hyperelliptic.org/tanja),
|
||||||
[Peter Schwabe](http://www.cryptojedi.org/users/peter/),
|
[Peter Schwabe](http://www.cryptojedi.org/users/peter/),
|
||||||
and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/).
|
and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/).
|
||||||
|
|
||||||
This project provides performant, portable 32-bit & 64-bit implementations. All implementations are
|
This project provides performant, portable 32-bit & 64-bit implementations. All implementations are
|
||||||
of course constant time in regard to secret data.
|
of course constant time in regard to secret data.
|
||||||
|
|
||||||
#### Performance
|
#### Performance
|
||||||
@@ -52,35 +52,35 @@ are made.
|
|||||||
|
|
||||||
#### Compilation
|
#### Compilation
|
||||||
|
|
||||||
No configuration is needed **if you are compiling against OpenSSL**.
|
No configuration is needed **if you are compiling against OpenSSL**.
|
||||||
|
|
||||||
##### Hash Options
|
##### Hash Options
|
||||||
|
|
||||||
If you are not compiling aginst OpenSSL, you will need a hash function.
|
If you are not compiling aginst OpenSSL, you will need a hash function.
|
||||||
|
|
||||||
To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`.
|
To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`.
|
||||||
This should never be used except to verify the code works when OpenSSL is not available.
|
This should never be used except to verify the code works when OpenSSL is not available.
|
||||||
|
|
||||||
To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your
|
To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your
|
||||||
custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement
|
custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement
|
||||||
|
|
||||||
struct ed25519_hash_context;
|
struct ed25519_hash_context;
|
||||||
|
|
||||||
void ed25519_hash_init(ed25519_hash_context *ctx);
|
void ed25519_hash_init(ed25519_hash_context *ctx);
|
||||||
void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
|
void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
|
||||||
void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
|
void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
|
||||||
void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
|
void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
|
||||||
|
|
||||||
##### Random Options
|
##### Random Options
|
||||||
|
|
||||||
If you are not compiling aginst OpenSSL, you will need a random function for batch verification.
|
If you are not compiling aginst OpenSSL, you will need a random function for batch verification.
|
||||||
|
|
||||||
To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your
|
To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your
|
||||||
custom hash implementation in ed25519-randombytes-custom.h. The random function must implement:
|
custom hash implementation in ed25519-randombytes-custom.h. The random function must implement:
|
||||||
|
|
||||||
void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
|
void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
|
||||||
|
|
||||||
Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG
|
Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG
|
||||||
variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29)
|
variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29)
|
||||||
|
|
||||||
##### Minor options
|
##### Minor options
|
||||||
@@ -91,80 +91,79 @@ Use `-DED25519_FORCE_32BIT` to force the use of 32 bit routines even when compil
|
|||||||
|
|
||||||
##### 32-bit
|
##### 32-bit
|
||||||
|
|
||||||
gcc ed25519.c -m32 -O3 -c
|
gcc ed25519.c -m32 -O3 -c
|
||||||
|
|
||||||
##### 64-bit
|
##### 64-bit
|
||||||
|
|
||||||
gcc ed25519.c -m64 -O3 -c
|
gcc ed25519.c -m64 -O3 -c
|
||||||
|
|
||||||
##### SSE2
|
##### SSE2
|
||||||
|
|
||||||
gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2
|
gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2
|
||||||
gcc ed25519.c -m64 -O3 -c -DED25519_SSE2
|
gcc ed25519.c -m64 -O3 -c -DED25519_SSE2
|
||||||
|
|
||||||
clang and icc are also supported
|
clang and icc are also supported
|
||||||
|
|
||||||
|
|
||||||
#### Usage
|
#### Usage
|
||||||
|
|
||||||
To use the code, link against `ed25519.o -mbits` and:
|
To use the code, link against `ed25519.o -mbits` and:
|
||||||
|
|
||||||
#include "ed25519.h"
|
#include "ed25519.h"
|
||||||
|
|
||||||
Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error).
|
Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error).
|
||||||
|
|
||||||
To generate a private key, simply generate 32 bytes from a secure
|
To generate a private key, simply generate 32 bytes from a secure
|
||||||
cryptographic source:
|
cryptographic source:
|
||||||
|
|
||||||
ed25519_secret_key sk;
|
ed25519_secret_key sk;
|
||||||
randombytes(sk, sizeof(ed25519_secret_key));
|
randombytes(sk, sizeof(ed25519_secret_key));
|
||||||
|
|
||||||
To generate a public key:
|
To generate a public key:
|
||||||
|
|
||||||
ed25519_public_key pk;
|
ed25519_public_key pk;
|
||||||
ed25519_publickey(sk, pk);
|
ed25519_publickey(sk, pk);
|
||||||
|
|
||||||
To sign a message:
|
To sign a message:
|
||||||
|
|
||||||
ed25519_signature sig;
|
ed25519_signature sig;
|
||||||
ed25519_sign(message, message_len, sk, pk, signature);
|
ed25519_sign(message, message_len, sk, pk, signature);
|
||||||
|
|
||||||
To verify a signature:
|
To verify a signature:
|
||||||
|
|
||||||
int valid = ed25519_sign_open(message, message_len, pk, signature) == 0;
|
int valid = ed25519_sign_open(message, message_len, pk, signature) == 0;
|
||||||
|
|
||||||
To batch verify signatures:
|
To batch verify signatures:
|
||||||
|
|
||||||
const unsigned char *mp[num] = {message1, message2..}
|
const unsigned char *mp[num] = {message1, message2..}
|
||||||
size_t ml[num] = {message_len1, message_len2..}
|
size_t ml[num] = {message_len1, message_len2..}
|
||||||
const unsigned char *pkp[num] = {pk1, pk2..}
|
const unsigned char *pkp[num] = {pk1, pk2..}
|
||||||
const unsigned char *sigp[num] = {signature1, signature2..}
|
const unsigned char *sigp[num] = {signature1, signature2..}
|
||||||
int valid[num]
|
int valid[num]
|
||||||
|
|
||||||
/* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */
|
/* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */
|
||||||
int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0;
|
int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0;
|
||||||
|
|
||||||
**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in
|
**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in
|
||||||
`ed25519-randombytes.h`, to generate random scalars for the verification code.
|
`ed25519-randombytes.h`, to generate random scalars for the verification code.
|
||||||
The default implementation now uses OpenSSLs `RAND_bytes`.
|
The default implementation now uses OpenSSLs `RAND_bytes`.
|
||||||
|
|
||||||
Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are
|
Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are
|
||||||
not appended to messages, and there is no need for padding in front of messages.
|
not appended to messages, and there is no need for padding in front of messages.
|
||||||
Additionally, the secret key does not contain a copy of the public key, so it is
|
Additionally, the secret key does not contain a copy of the public key, so it is
|
||||||
32 bytes instead of 64 bytes, and the public key must be provided to the signing
|
32 bytes instead of 64 bytes, and the public key must be provided to the signing
|
||||||
function.
|
function.
|
||||||
|
|
||||||
##### Curve25519
|
##### Curve25519
|
||||||
|
|
||||||
Curve25519 public keys can be generated thanks to
|
Curve25519 public keys can be generated thanks to
|
||||||
[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html)
|
[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html)
|
||||||
leveraging Ed25519's precomputed basepoint scalar multiplication.
|
leveraging Ed25519's precomputed basepoint scalar multiplication.
|
||||||
|
|
||||||
curved25519_key sk, pk;
|
curved25519_key sk, pk;
|
||||||
randombytes(sk, sizeof(curved25519_key));
|
randombytes(sk, sizeof(curved25519_key));
|
||||||
curved25519_scalarmult_basepoint(pk, sk);
|
curved25519_scalarmult_basepoint(pk, sk);
|
||||||
|
|
||||||
Note the name is curved25519, a combination of curve and ed25519, to prevent
|
Note the name is curved25519, a combination of curve and ed25519, to prevent
|
||||||
name clashes. Performance is slightly faster than short message ed25519
|
name clashes. Performance is slightly faster than short message ed25519
|
||||||
signing due to both using the same code for the scalar multiply.
|
signing due to both using the same code for the scalar multiply.
|
||||||
|
|
||||||
@@ -180,4 +179,4 @@ with extreme values to ensure they function correctly. SSE2 is now supported.
|
|||||||
|
|
||||||
#### Papers
|
#### Papers
|
||||||
|
|
||||||
[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html)
|
[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html)
|
||||||
|
|||||||
99
external/ed25519-donna/fuzz/README.md
vendored
99
external/ed25519-donna/fuzz/README.md
vendored
@@ -1,78 +1,78 @@
|
|||||||
This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of
|
This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of
|
||||||
[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and
|
[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and
|
||||||
[ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10).
|
[ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10).
|
||||||
|
|
||||||
Curve25519 tests that generating a public key from a secret key
|
Curve25519 tests that generating a public key from a secret key
|
||||||
|
|
||||||
# Building
|
# Building
|
||||||
|
|
||||||
## *nix + PHP
|
## \*nix + PHP
|
||||||
|
|
||||||
`php build-nix.php (required parameters) (optional parameters)`
|
`php build-nix.php (required parameters) (optional parameters)`
|
||||||
|
|
||||||
Required parameters:
|
Required parameters:
|
||||||
|
|
||||||
* `--function=[curve25519,ed25519]`
|
- `--function=[curve25519,ed25519]`
|
||||||
* `--bits=[32,64]`
|
- `--bits=[32,64]`
|
||||||
|
|
||||||
Optional parameters:
|
Optional parameters:
|
||||||
|
|
||||||
* `--with-sse2`
|
- `--with-sse2`
|
||||||
|
|
||||||
Also fuzz against ed25519-donna-sse2
|
Also fuzz against ed25519-donna-sse2
|
||||||
* `--with-openssl`
|
|
||||||
|
|
||||||
Build with OpenSSL's SHA-512.
|
- `--with-openssl`
|
||||||
|
|
||||||
Default: Reference SHA-512 implementation (slow!)
|
Build with OpenSSL's SHA-512.
|
||||||
|
|
||||||
* `--compiler=[gcc,clang,icc]`
|
Default: Reference SHA-512 implementation (slow!)
|
||||||
|
|
||||||
Default: gcc
|
- `--compiler=[gcc,clang,icc]`
|
||||||
|
|
||||||
* `--no-asm`
|
Default: gcc
|
||||||
|
|
||||||
Do not use platform specific assembler
|
- `--no-asm`
|
||||||
|
|
||||||
|
Do not use platform specific assembler
|
||||||
|
|
||||||
example:
|
example:
|
||||||
|
|
||||||
php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc
|
php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc
|
||||||
|
|
||||||
## Windows
|
## Windows
|
||||||
|
|
||||||
Create a project with access to the ed25519 files.
|
Create a project with access to the ed25519 files.
|
||||||
|
|
||||||
If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects
|
If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects
|
||||||
"Properties/Preprocessor/Preprocessor Definitions" option
|
"Properties/Preprocessor/Preprocessor Definitions" option
|
||||||
|
|
||||||
Add the following files to the project:
|
Add the following files to the project:
|
||||||
|
|
||||||
* `fuzz/curve25519-ref10.c`
|
- `fuzz/curve25519-ref10.c`
|
||||||
* `fuzz/ed25519-ref10.c`
|
- `fuzz/ed25519-ref10.c`
|
||||||
* `fuzz/ed25519-donna.c`
|
- `fuzz/ed25519-donna.c`
|
||||||
* `fuzz/ed25519-donna-sse2.c` (optional)
|
- `fuzz/ed25519-donna-sse2.c` (optional)
|
||||||
* `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz)
|
- `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz)
|
||||||
|
|
||||||
If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under
|
If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under
|
||||||
its "Properties/Preprocessor/Preprocessor Definitions" option.
|
its "Properties/Preprocessor/Preprocessor Definitions" option.
|
||||||
|
|
||||||
# Running
|
# Running
|
||||||
|
|
||||||
If everything agrees, the program will only output occasional status dots (every 0x1000 passes)
|
If everything agrees, the program will only output occasional status dots (every 0x1000 passes)
|
||||||
and a 64bit progress count (every 0x20000 passes):
|
and a 64bit progress count (every 0x20000 passes):
|
||||||
|
|
||||||
fuzzing: ref10 curved25519 curved25519-sse2
|
fuzzing: ref10 curved25519 curved25519-sse2
|
||||||
|
|
||||||
................................ [0000000000020000]
|
................................ [0000000000020000]
|
||||||
................................ [0000000000040000]
|
................................ [0000000000040000]
|
||||||
................................ [0000000000060000]
|
................................ [0000000000060000]
|
||||||
................................ [0000000000080000]
|
................................ [0000000000080000]
|
||||||
................................ [00000000000a0000]
|
................................ [00000000000a0000]
|
||||||
................................ [00000000000c0000]
|
................................ [00000000000c0000]
|
||||||
|
|
||||||
If any of the implementations do not agree with the ref10 implementation, the program will dump
|
If any of the implementations do not agree with the ref10 implementation, the program will dump
|
||||||
the random data that was used, the data generated by the ref10 implementation, and diffs of the
|
the random data that was used, the data generated by the ref10 implementation, and diffs of the
|
||||||
ed25519-donna data against the ref10 data.
|
ed25519-donna data against the ref10 data.
|
||||||
|
|
||||||
## Example errors
|
## Example errors
|
||||||
@@ -83,21 +83,21 @@ These are example error dumps (with intentionally introduced errors).
|
|||||||
|
|
||||||
Random data:
|
Random data:
|
||||||
|
|
||||||
* sk, or Secret Key
|
- sk, or Secret Key
|
||||||
* m, or Message
|
- m, or Message
|
||||||
|
|
||||||
Generated data:
|
Generated data:
|
||||||
|
|
||||||
* pk, or Public Key
|
- pk, or Public Key
|
||||||
* sig, or Signature
|
- sig, or Signature
|
||||||
* valid, or if the signature of the message is valid with the public key
|
- valid, or if the signature of the message is valid with the public key
|
||||||
|
|
||||||
Dump:
|
Dump:
|
||||||
|
|
||||||
sk:
|
sk:
|
||||||
0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef,
|
0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef,
|
||||||
0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c,
|
0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c,
|
||||||
|
|
||||||
m:
|
m:
|
||||||
0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91,
|
0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91,
|
||||||
0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7,
|
0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7,
|
||||||
@@ -107,67 +107,66 @@ Dump:
|
|||||||
0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89,
|
0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89,
|
||||||
0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b,
|
0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b,
|
||||||
0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a,
|
0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a,
|
||||||
|
|
||||||
ref10:
|
ref10:
|
||||||
pk:
|
pk:
|
||||||
0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04,
|
0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04,
|
||||||
0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04,
|
0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04,
|
||||||
|
|
||||||
sig:
|
sig:
|
||||||
0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96,
|
0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96,
|
||||||
0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5,
|
0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5,
|
||||||
0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f,
|
0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f,
|
||||||
0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02,
|
0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02,
|
||||||
|
|
||||||
valid: yes
|
valid: yes
|
||||||
|
|
||||||
ed25519-donna:
|
ed25519-donna:
|
||||||
pk diff:
|
pk diff:
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
|
|
||||||
sig diff:
|
sig diff:
|
||||||
0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5,
|
0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5,
|
||||||
0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab,
|
0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab,
|
||||||
0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97,
|
0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97,
|
||||||
0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08,
|
0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08,
|
||||||
|
|
||||||
valid: no
|
valid: no
|
||||||
|
|
||||||
In this case, the generated public key matches, but the generated signature is completely
|
In this case, the generated public key matches, but the generated signature is completely
|
||||||
different and does not validate.
|
different and does not validate.
|
||||||
|
|
||||||
### Curve25519
|
### Curve25519
|
||||||
|
|
||||||
Random data:
|
Random data:
|
||||||
|
|
||||||
* sk, or Secret Key
|
- sk, or Secret Key
|
||||||
|
|
||||||
Generated data:
|
Generated data:
|
||||||
|
|
||||||
* pk, or Public Key
|
- pk, or Public Key
|
||||||
|
|
||||||
Dump:
|
Dump:
|
||||||
|
|
||||||
sk:
|
sk:
|
||||||
0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e,
|
0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e,
|
||||||
0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70,
|
0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70,
|
||||||
|
|
||||||
|
|
||||||
ref10:
|
ref10:
|
||||||
0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95,
|
0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95,
|
||||||
0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62,
|
0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62,
|
||||||
|
|
||||||
|
|
||||||
curved25519 diff:
|
curved25519 diff:
|
||||||
0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43,
|
0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43,
|
||||||
0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35,
|
0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35,
|
||||||
|
|
||||||
|
|
||||||
curved25519-sse2 diff:
|
curved25519-sse2 diff:
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
|
||||||
|
|
||||||
|
In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference
|
||||||
In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference
|
implementation.
|
||||||
implementation.
|
|
||||||
|
|||||||
144
external/secp256k1/CHANGELOG.md
vendored
144
external/secp256k1/CHANGELOG.md
vendored
@@ -8,153 +8,189 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
## [0.6.0] - 2024-11-04
|
## [0.6.0] - 2024-11-04
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See:
|
|
||||||
- Header file `include/secp256k1_musig.h` which defines the new API.
|
- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See:
|
||||||
- Document `doc/musig.md` for further notes on API usage.
|
- Header file `include/secp256k1_musig.h` which defines the new API.
|
||||||
- Usage example `examples/musig.c`.
|
- Document `doc/musig.md` for further notes on API usage.
|
||||||
- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command.
|
- Usage example `examples/musig.c`.
|
||||||
|
- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal.
|
|
||||||
- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++).
|
- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal.
|
||||||
- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility.
|
- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++).
|
||||||
|
- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility.
|
||||||
|
|
||||||
#### Removed
|
#### Removed
|
||||||
- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API.
|
|
||||||
|
- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The symbols `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` were removed.
|
The symbols `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` were removed.
|
||||||
Otherwise, the library maintains backward compatibility with versions 0.3.x through 0.5.x.
|
Otherwise, the library maintains backward compatibility with versions 0.3.x through 0.5.x.
|
||||||
|
|
||||||
## [0.5.1] - 2024-08-01
|
## [0.5.1] - 2024-08-01
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added usage example for an ElligatorSwift key exchange.
|
|
||||||
|
- Added usage example for an ElligatorSwift key exchange.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
|
||||||
- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly.
|
- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
||||||
|
- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly.
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed compilation when the extrakeys module is disabled.
|
|
||||||
|
- Fixed compilation when the extrakeys module is disabled.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is backward compatible with versions 0.5.0, 0.4.x and 0.3.x.
|
The ABI is backward compatible with versions 0.5.0, 0.4.x and 0.3.x.
|
||||||
|
|
||||||
## [0.5.0] - 2024-05-06
|
## [0.5.0] - 2024-05-06
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order.
|
|
||||||
|
- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations.
|
|
||||||
- The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations.
|
||||||
- This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB).
|
- The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
|
||||||
|
- This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB).
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is backward compatible with versions 0.4.x and 0.3.x.
|
The ABI is backward compatible with versions 0.4.x and 0.3.x.
|
||||||
|
|
||||||
## [0.4.1] - 2023-12-21
|
## [0.4.1] - 2023-12-21
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one.
|
|
||||||
- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`.
|
- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one.
|
||||||
|
- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is backward compatible with versions 0.4.0 and 0.3.x.
|
The ABI is backward compatible with versions 0.4.0 and 0.3.x.
|
||||||
|
|
||||||
## [0.4.0] - 2023-09-04
|
## [0.4.0] - 2023-09-04
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them.
|
|
||||||
ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See:
|
- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them.
|
||||||
- Header file `include/secp256k1_ellswift.h` which defines the new API.
|
ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See:
|
||||||
- Document `doc/ellswift.md` which explains the mathematical background of the scheme.
|
- Header file `include/secp256k1_ellswift.h` which defines the new API.
|
||||||
- The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based.
|
- Document `doc/ellswift.md` which explains the mathematical background of the scheme.
|
||||||
- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases).
|
- The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based.
|
||||||
|
- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases).
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported.
|
|
||||||
|
- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`.
|
|
||||||
|
- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug.
|
This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug.
|
||||||
|
|
||||||
## [0.3.2] - 2023-05-13
|
## [0.3.2] - 2023-05-13
|
||||||
|
|
||||||
We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`.
|
We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`.
|
||||||
|
|
||||||
#### Security
|
#### Security
|
||||||
- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1.
|
|
||||||
|
- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1.
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far.
|
|
||||||
|
- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Various improvements and changes to CMake builds. CMake builds remain experimental.
|
|
||||||
- Made API versioning consistent with GNU Autotools builds.
|
- Various improvements and changes to CMake builds. CMake builds remain experimental.
|
||||||
- Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library.
|
- Made API versioning consistent with GNU Autotools builds.
|
||||||
- Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts.
|
- Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library.
|
||||||
- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake).
|
- Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts.
|
||||||
|
- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake).
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is compatible with versions 0.3.0 and 0.3.1.
|
The ABI is compatible with versions 0.3.0 and 0.3.1.
|
||||||
|
|
||||||
## [0.3.1] - 2023-04-10
|
## [0.3.1] - 2023-04-10
|
||||||
|
|
||||||
We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`.
|
We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`.
|
||||||
|
|
||||||
#### Security
|
#### Security
|
||||||
- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14.
|
|
||||||
|
- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14.
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases.
|
|
||||||
|
- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Increased minimum required CMake version to 3.13. CMake builds remain experimental.
|
|
||||||
|
- Increased minimum required CMake version to 3.13. CMake builds remain experimental.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
The ABI is compatible with version 0.3.0.
|
The ABI is compatible with version 0.3.0.
|
||||||
|
|
||||||
## [0.3.0] - 2023-03-08
|
## [0.3.0] - 2023-03-08
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported.
|
|
||||||
- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory.
|
- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported.
|
||||||
- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target.
|
- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory.
|
||||||
|
- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target.
|
||||||
|
|
||||||
#### Fixed
|
#### Fixed
|
||||||
- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning.
|
|
||||||
|
- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.)
|
|
||||||
- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization.
|
- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.)
|
||||||
|
- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization.
|
||||||
|
|
||||||
#### Removed
|
#### Removed
|
||||||
- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags).
|
|
||||||
|
- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags).
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is *not* compatible with previous versions.
|
|
||||||
|
Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is _not_ compatible with previous versions.
|
||||||
|
|
||||||
## [0.2.0] - 2022-12-12
|
## [0.2.0] - 2022-12-12
|
||||||
|
|
||||||
#### Added
|
#### Added
|
||||||
- Added usage examples for common use cases in a new `examples/` directory.
|
|
||||||
- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`.
|
- Added usage examples for common use cases in a new `examples/` directory.
|
||||||
- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms.
|
- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`.
|
||||||
|
- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms.
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`.
|
|
||||||
- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API.
|
- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`.
|
||||||
|
- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API.
|
||||||
|
|
||||||
#### Deprecated
|
#### Deprecated
|
||||||
- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead.
|
|
||||||
- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`.
|
- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead.
|
||||||
- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`.
|
- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`.
|
||||||
|
- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`.
|
||||||
|
|
||||||
#### ABI Compatibility
|
#### ABI Compatibility
|
||||||
|
|
||||||
Since this is the first release, we do not compare application binary interfaces.
|
Since this is the first release, we do not compare application binary interfaces.
|
||||||
However, there are earlier unreleased versions of libsecp256k1 that are *not* ABI compatible with this version.
|
However, there are earlier unreleased versions of libsecp256k1 that are _not_ ABI compatible with this version.
|
||||||
|
|
||||||
## [0.1.0] - 2013-03-05 to 2021-12-25
|
## [0.1.0] - 2013-03-05 to 2021-12-25
|
||||||
|
|
||||||
|
|||||||
6
external/secp256k1/CMakePresets.json
vendored
6
external/secp256k1/CMakePresets.json
vendored
@@ -1,5 +1,9 @@
|
|||||||
{
|
{
|
||||||
"cmakeMinimumRequired": {"major": 3, "minor": 21, "patch": 0},
|
"cmakeMinimumRequired": {
|
||||||
|
"major": 3,
|
||||||
|
"minor": 21,
|
||||||
|
"patch": 0
|
||||||
|
},
|
||||||
"version": 3,
|
"version": 3,
|
||||||
"configurePresets": [
|
"configurePresets": [
|
||||||
{
|
{
|
||||||
|
|||||||
74
external/secp256k1/CONTRIBUTING.md
vendored
74
external/secp256k1/CONTRIBUTING.md
vendored
@@ -12,15 +12,15 @@ The libsecp256k1 project welcomes contributions in the form of new functionality
|
|||||||
It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable.
|
It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable.
|
||||||
Contributors are recommended to provide the following in addition to the new code:
|
Contributors are recommended to provide the following in addition to the new code:
|
||||||
|
|
||||||
* **Specification:**
|
- **Specification:**
|
||||||
A specification can help significantly in reviewing the new code as it provides documentation and context.
|
A specification can help significantly in reviewing the new code as it provides documentation and context.
|
||||||
It may justify various design decisions, give a motivation and outline security goals.
|
It may justify various design decisions, give a motivation and outline security goals.
|
||||||
If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code.
|
If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code.
|
||||||
* **Security Arguments:**
|
- **Security Arguments:**
|
||||||
In addition to a defining the security goals, it should be argued that the new functionality meets these goals.
|
In addition to a defining the security goals, it should be argued that the new functionality meets these goals.
|
||||||
Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security.
|
Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security.
|
||||||
* **Relevance Arguments:**
|
- **Relevance Arguments:**
|
||||||
The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases.
|
The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases.
|
||||||
|
|
||||||
These are not the only factors taken into account when considering to add new functionality.
|
These are not the only factors taken into account when considering to add new functionality.
|
||||||
The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design.
|
The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design.
|
||||||
@@ -44,36 +44,36 @@ The Contributor Workflow & Peer Review in libsecp256k1 are similar to Bitcoin Co
|
|||||||
|
|
||||||
In addition, libsecp256k1 tries to maintain the following coding conventions:
|
In addition, libsecp256k1 tries to maintain the following coding conventions:
|
||||||
|
|
||||||
* No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations.
|
- No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations.
|
||||||
* The tests should cover all lines and branches of the library (see [Test coverage](#coverage)).
|
- The tests should cover all lines and branches of the library (see [Test coverage](#coverage)).
|
||||||
* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
|
- Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
|
||||||
* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
|
- Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
|
||||||
* Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
|
- Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
|
||||||
* As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)).
|
- As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)).
|
||||||
|
|
||||||
#### Style conventions
|
#### Style conventions
|
||||||
|
|
||||||
* Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures.
|
- Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures.
|
||||||
* New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting.
|
- New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting.
|
||||||
* The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block:
|
- The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block:
|
||||||
```C
|
```C
|
||||||
void secp256k_foo(void) {
|
void secp256k_foo(void) {
|
||||||
unsigned int x; /* declaration */
|
unsigned int x; /* declaration */
|
||||||
int y = 2*x; /* declaration */
|
int y = 2*x; /* declaration */
|
||||||
x = 17; /* statement */
|
x = 17; /* statement */
|
||||||
{
|
{
|
||||||
int a, b; /* declaration */
|
int a, b; /* declaration */
|
||||||
a = x + y; /* statement */
|
a = x + y; /* statement */
|
||||||
secp256k_bar(x, &b); /* statement */
|
secp256k_bar(x, &b); /* statement */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
* Use `unsigned int` instead of just `unsigned`.
|
- Use `unsigned int` instead of just `unsigned`.
|
||||||
* Use `void *ptr` instead of `void* ptr`.
|
- Use `void *ptr` instead of `void* ptr`.
|
||||||
* Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h).
|
- Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h).
|
||||||
* User-facing comment lines in headers should be limited to 80 chars if possible.
|
- User-facing comment lines in headers should be limited to 80 chars if possible.
|
||||||
* All identifiers in file scope should start with `secp256k1_`.
|
- All identifiers in file scope should start with `secp256k1_`.
|
||||||
* Avoid trailing whitespace.
|
- Avoid trailing whitespace.
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
|
|
||||||
@@ -101,7 +101,7 @@ To create a HTML report with coloured and annotated source code:
|
|||||||
#### Exhaustive tests
|
#### Exhaustive tests
|
||||||
|
|
||||||
There are tests of several functions in which a small group replaces secp256k1.
|
There are tests of several functions in which a small group replaces secp256k1.
|
||||||
These tests are *exhaustive* since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)).
|
These tests are _exhaustive_ since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)).
|
||||||
|
|
||||||
### Benchmarks
|
### Benchmarks
|
||||||
|
|
||||||
|
|||||||
130
external/secp256k1/README.md
vendored
130
external/secp256k1/README.md
vendored
@@ -1,5 +1,4 @@
|
|||||||
libsecp256k1
|
# libsecp256k1
|
||||||
============
|
|
||||||
|
|
||||||

|

|
||||||
[](https://web.libera.chat/#secp256k1)
|
[](https://web.libera.chat/#secp256k1)
|
||||||
@@ -9,60 +8,59 @@ High-performance high-assurance C library for digital signatures and other crypt
|
|||||||
This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose.
|
This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose.
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
* secp256k1 ECDSA signing/verification and key generation.
|
|
||||||
* Additive and multiplicative tweaking of secret/public keys.
|
|
||||||
* Serialization/parsing of secret keys, public keys, signatures.
|
|
||||||
* Constant time, constant memory access signing and public key generation.
|
|
||||||
* Derandomized ECDSA (via RFC6979 or with a caller provided function.)
|
|
||||||
* Very efficient implementation.
|
|
||||||
* Suitable for embedded systems.
|
|
||||||
* No runtime dependencies.
|
|
||||||
* Optional module for public key recovery.
|
|
||||||
* Optional module for ECDH key exchange.
|
|
||||||
* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
|
|
||||||
* Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki).
|
|
||||||
* Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki).
|
|
||||||
|
|
||||||
Implementation details
|
- secp256k1 ECDSA signing/verification and key generation.
|
||||||
----------------------
|
- Additive and multiplicative tweaking of secret/public keys.
|
||||||
|
- Serialization/parsing of secret keys, public keys, signatures.
|
||||||
|
- Constant time, constant memory access signing and public key generation.
|
||||||
|
- Derandomized ECDSA (via RFC6979 or with a caller provided function.)
|
||||||
|
- Very efficient implementation.
|
||||||
|
- Suitable for embedded systems.
|
||||||
|
- No runtime dependencies.
|
||||||
|
- Optional module for public key recovery.
|
||||||
|
- Optional module for ECDH key exchange.
|
||||||
|
- Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
|
||||||
|
- Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki).
|
||||||
|
- Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki).
|
||||||
|
|
||||||
* General
|
## Implementation details
|
||||||
* No runtime heap allocation.
|
|
||||||
* Extensive testing infrastructure.
|
|
||||||
* Structured to facilitate review and analysis.
|
|
||||||
* Intended to be portable to any system with a C89 compiler and uint64_t support.
|
|
||||||
* No use of floating types.
|
|
||||||
* Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
|
|
||||||
* Field operations
|
|
||||||
* Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
|
|
||||||
* Using 5 52-bit limbs
|
|
||||||
* Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
|
|
||||||
* This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community.
|
|
||||||
* Scalar operations
|
|
||||||
* Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
|
|
||||||
* Using 4 64-bit limbs (relying on __int128 support in the compiler).
|
|
||||||
* Using 8 32-bit limbs.
|
|
||||||
* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman).
|
|
||||||
* Group operations
|
|
||||||
* Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
|
|
||||||
* Use addition between points in Jacobian and affine coordinates where possible.
|
|
||||||
* Use a unified addition/doubling formula where necessary to avoid data-dependent branches.
|
|
||||||
* Point/x comparison without a field inversion by comparison in the Jacobian coordinate space.
|
|
||||||
* Point multiplication for verification (a*P + b*G).
|
|
||||||
* Use wNAF notation for point multiplicands.
|
|
||||||
* Use a much larger window for multiples of G, using precomputed multiples.
|
|
||||||
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
|
|
||||||
* Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
|
|
||||||
* Point multiplication for signing
|
|
||||||
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
|
|
||||||
* Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
|
|
||||||
* Access the table with branch-free conditional moves so memory access is uniform.
|
|
||||||
* No data-dependent branches
|
|
||||||
* Optional runtime blinding which attempts to frustrate differential power analysis.
|
|
||||||
* The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally.
|
|
||||||
|
|
||||||
Building with Autotools
|
- General
|
||||||
-----------------------
|
- No runtime heap allocation.
|
||||||
|
- Extensive testing infrastructure.
|
||||||
|
- Structured to facilitate review and analysis.
|
||||||
|
- Intended to be portable to any system with a C89 compiler and uint64_t support.
|
||||||
|
- No use of floating types.
|
||||||
|
- Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
|
||||||
|
- Field operations
|
||||||
|
- Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
|
||||||
|
- Using 5 52-bit limbs
|
||||||
|
- Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
|
||||||
|
- This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community.
|
||||||
|
- Scalar operations
|
||||||
|
- Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
|
||||||
|
- Using 4 64-bit limbs (relying on \_\_int128 support in the compiler).
|
||||||
|
- Using 8 32-bit limbs.
|
||||||
|
- Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman).
|
||||||
|
- Group operations
|
||||||
|
- Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
|
||||||
|
- Use addition between points in Jacobian and affine coordinates where possible.
|
||||||
|
- Use a unified addition/doubling formula where necessary to avoid data-dependent branches.
|
||||||
|
- Point/x comparison without a field inversion by comparison in the Jacobian coordinate space.
|
||||||
|
- Point multiplication for verification (a*P + b*G).
|
||||||
|
- Use wNAF notation for point multiplicands.
|
||||||
|
- Use a much larger window for multiples of G, using precomputed multiples.
|
||||||
|
- Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
|
||||||
|
- Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
|
||||||
|
- Point multiplication for signing
|
||||||
|
- Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
|
||||||
|
- Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
|
||||||
|
- Access the table with branch-free conditional moves so memory access is uniform.
|
||||||
|
- No data-dependent branches
|
||||||
|
- Optional runtime blinding which attempts to frustrate differential power analysis.
|
||||||
|
- The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally.
|
||||||
|
|
||||||
|
## Building with Autotools
|
||||||
|
|
||||||
$ ./autogen.sh
|
$ ./autogen.sh
|
||||||
$ ./configure
|
$ ./configure
|
||||||
@@ -72,8 +70,7 @@ Building with Autotools
|
|||||||
|
|
||||||
To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags.
|
To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags.
|
||||||
|
|
||||||
Building with CMake (experimental)
|
## Building with CMake (experimental)
|
||||||
----------------------------------
|
|
||||||
|
|
||||||
To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree.
|
To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree.
|
||||||
|
|
||||||
@@ -109,18 +106,19 @@ In "Developer Command Prompt for VS 2022":
|
|||||||
>cmake -G "Visual Studio 17 2022" -A x64 -S . -B build
|
>cmake -G "Visual Studio 17 2022" -A x64 -S . -B build
|
||||||
>cmake --build build --config RelWithDebInfo
|
>cmake --build build --config RelWithDebInfo
|
||||||
|
|
||||||
Usage examples
|
## Usage examples
|
||||||
-----------
|
|
||||||
Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`.
|
Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`.
|
||||||
* [ECDSA example](examples/ecdsa.c)
|
|
||||||
* [Schnorr signatures example](examples/schnorr.c)
|
- [ECDSA example](examples/ecdsa.c)
|
||||||
* [Deriving a shared secret (ECDH) example](examples/ecdh.c)
|
- [Schnorr signatures example](examples/schnorr.c)
|
||||||
* [ElligatorSwift key exchange example](examples/ellswift.c)
|
- [Deriving a shared secret (ECDH) example](examples/ecdh.c)
|
||||||
|
- [ElligatorSwift key exchange example](examples/ellswift.c)
|
||||||
|
|
||||||
To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`.
|
To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`.
|
||||||
|
|
||||||
Benchmark
|
## Benchmark
|
||||||
------------
|
|
||||||
If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build.
|
If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build.
|
||||||
|
|
||||||
To print the benchmark result to the command line:
|
To print the benchmark result to the command line:
|
||||||
@@ -131,12 +129,10 @@ To create a CSV file for the benchmark result :
|
|||||||
|
|
||||||
$ ./bench_name | sed '2d;s/ \{1,\}//g' > bench_name.csv
|
$ ./bench_name | sed '2d;s/ \{1,\}//g' > bench_name.csv
|
||||||
|
|
||||||
Reporting a vulnerability
|
## Reporting a vulnerability
|
||||||
------------
|
|
||||||
|
|
||||||
See [SECURITY.md](SECURITY.md)
|
See [SECURITY.md](SECURITY.md)
|
||||||
|
|
||||||
Contributing to libsecp256k1
|
## Contributing to libsecp256k1
|
||||||
------------
|
|
||||||
|
|
||||||
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||||
|
|||||||
10
external/secp256k1/SECURITY.md
vendored
10
external/secp256k1/SECURITY.md
vendored
@@ -6,10 +6,10 @@ To report security issues send an email to secp256k1-security@bitcoincore.org (n
|
|||||||
|
|
||||||
The following keys may be used to communicate sensitive information to developers:
|
The following keys may be used to communicate sensitive information to developers:
|
||||||
|
|
||||||
| Name | Fingerprint |
|
| Name | Fingerprint |
|
||||||
|------|-------------|
|
| ------------- | ------------------------------------------------- |
|
||||||
| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 |
|
| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 |
|
||||||
| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 |
|
| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 |
|
||||||
| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 |
|
| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 |
|
||||||
|
|
||||||
You can import a key by running the following command with that individual’s fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys "<fingerprint>"` Ensure that you put quotes around fingerprints containing spaces.
|
You can import a key by running the following command with that individual’s fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys "<fingerprint>"` Ensure that you put quotes around fingerprints containing spaces.
|
||||||
|
|||||||
410
external/secp256k1/doc/ellswift.md
vendored
410
external/secp256k1/doc/ellswift.md
vendored
@@ -5,17 +5,17 @@ construction in the
|
|||||||
["SwiftEC: Shallue–van de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759)
|
["SwiftEC: Shallue–van de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759)
|
||||||
paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi.
|
paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi.
|
||||||
|
|
||||||
* [1. Introduction](#1-introduction)
|
- [1. Introduction](#1-introduction)
|
||||||
* [2. The decoding function](#2-the-decoding-function)
|
- [2. The decoding function](#2-the-decoding-function)
|
||||||
+ [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1)
|
- [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1)
|
||||||
* [3. The encoding function](#3-the-encoding-function)
|
- [3. The encoding function](#3-the-encoding-function)
|
||||||
+ [3.1 Switching to *v, w* coordinates](#31-switching-to-v-w-coordinates)
|
- [3.1 Switching to _v, w_ coordinates](#31-switching-to-v-w-coordinates)
|
||||||
+ [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses)
|
- [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses)
|
||||||
+ [3.3 Finding the inverse](#33-finding-the-inverse)
|
- [3.3 Finding the inverse](#33-finding-the-inverse)
|
||||||
+ [3.4 Dealing with special cases](#34-dealing-with-special-cases)
|
- [3.4 Dealing with special cases](#34-dealing-with-special-cases)
|
||||||
+ [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1)
|
- [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1)
|
||||||
* [4. Encoding and decoding full *(x, y)* coordinates](#4-encoding-and-decoding-full-x-y-coordinates)
|
- [4. Encoding and decoding full _(x, y)_ coordinates](#4-encoding-and-decoding-full-x-y-coordinates)
|
||||||
+ [4.1 Full *(x, y)* coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1)
|
- [4.1 Full _(x, y)_ coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1)
|
||||||
|
|
||||||
## 1. Introduction
|
## 1. Introduction
|
||||||
|
|
||||||
@@ -34,13 +34,14 @@ are taken modulo $p$), and then evaluating $F_u(t)$, which for every $u$ and $t$
|
|||||||
x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function).
|
x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function).
|
||||||
|
|
||||||
**Encoding** a given $x$ coordinate is conceptually done as follows:
|
**Encoding** a given $x$ coordinate is conceptually done as follows:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
|
||||||
* Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to *8* elements.
|
|
||||||
* With probability $1 - \dfrac{\\#L}{8}$, restart the loop.
|
|
||||||
* Select a uniformly random $t \in L$ and return $(u, t).$
|
|
||||||
|
|
||||||
This is the *ElligatorSwift* algorithm, here given for just x-coordinates. An extension to full
|
- Loop:
|
||||||
|
- Pick a uniformly random field element $u.$
|
||||||
|
- Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to _8_ elements.
|
||||||
|
- With probability $1 - \dfrac{\\#L}{8}$, restart the loop.
|
||||||
|
- Select a uniformly random $t \in L$ and return $(u, t).$
|
||||||
|
|
||||||
|
This is the _ElligatorSwift_ algorithm, here given for just x-coordinates. An extension to full
|
||||||
$(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates).
|
$(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates).
|
||||||
The algorithm finds a uniformly random $(u, t)$ among (almost all) those
|
The algorithm finds a uniformly random $(u, t)$ among (almost all) those
|
||||||
for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for
|
for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for
|
||||||
@@ -50,37 +51,40 @@ almost all x-coordinates on the curve (all but at most 39) is close to two times
|
|||||||
## 2. The decoding function
|
## 2. The decoding function
|
||||||
|
|
||||||
First some definitions:
|
First some definitions:
|
||||||
* $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$
|
|
||||||
* For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement.
|
- $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$
|
||||||
* Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$
|
- For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement.
|
||||||
|
- Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$
|
||||||
public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square.
|
public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square.
|
||||||
This implies that the order of $E$ is either odd, or a multiple of *4*.
|
This implies that the order of $E$ is either odd, or a multiple of _4_.
|
||||||
If $a=0$, this condition is always fulfilled.
|
If $a=0$, this condition is always fulfilled.
|
||||||
* For `secp256k1`, $a=0$ and $b=7.$
|
- For `secp256k1`, $a=0$ and $b=7.$
|
||||||
* Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$
|
- Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$
|
||||||
* Let the function $h(x) = 3x^3 + 4a.$
|
- Let the function $h(x) = 3x^3 + 4a.$
|
||||||
* Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$
|
- Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$
|
||||||
* Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$
|
- Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$
|
||||||
* $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below.
|
- $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below.
|
||||||
* $\psi_u$ is a function from $S_u$ to $V$ that will be defined below.
|
- $\psi_u$ is a function from $S_u$ to $V$ that will be defined below.
|
||||||
|
|
||||||
**Note**: In the paper:
|
**Note**: In the paper:
|
||||||
* $F_u$ corresponds to $F_{0,u}$ there.
|
|
||||||
* $P_u(t)$ is called $P$ there.
|
- $F_u$ corresponds to $F_{0,u}$ there.
|
||||||
* All $S_u$ sets together correspond to $S$ there.
|
- $P_u(t)$ is called $P$ there.
|
||||||
* All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there.
|
- All $S_u$ sets together correspond to $S$ there.
|
||||||
|
- All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there.
|
||||||
|
|
||||||
Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right
|
Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right
|
||||||
hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$,
|
hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$,
|
||||||
out of the three right-hand side factors an even number must be non-squares.
|
out of the three right-hand side factors an even number must be non-squares.
|
||||||
This implies that exactly *1* or exactly *3* out of
|
This implies that exactly _1_ or exactly _3_ out of
|
||||||
$\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$,
|
$\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$,
|
||||||
at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception
|
at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception
|
||||||
to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate.
|
to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate.
|
||||||
|
|
||||||
**Define** the decoding function $F_u(t)$ as:
|
**Define** the decoding function $F_u(t)$ as:
|
||||||
* Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$
|
|
||||||
* Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square).
|
- Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$
|
||||||
|
- Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square).
|
||||||
|
|
||||||
$P_u(t) = (X(u, t), Y(u, t))$, where:
|
$P_u(t) = (X(u, t), Y(u, t))$, where:
|
||||||
|
|
||||||
@@ -98,12 +102,13 @@ Y(u, t) & = & \left\\{\begin{array}{ll}
|
|||||||
$$
|
$$
|
||||||
|
|
||||||
$P_u(t)$ is defined:
|
$P_u(t)$ is defined:
|
||||||
* For $a=0$, unless:
|
|
||||||
* $u = 0$ or $t = 0$ (division by zero)
|
- For $a=0$, unless:
|
||||||
* $g(u) = -t^2$ (would give $Y=0$).
|
- $u = 0$ or $t = 0$ (division by zero)
|
||||||
* For $a \neq 0$, unless:
|
- $g(u) = -t^2$ (would give $Y=0$).
|
||||||
* $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero)
|
- For $a \neq 0$, unless:
|
||||||
* $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$).
|
- $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero)
|
||||||
|
- $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$).
|
||||||
|
|
||||||
The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$
|
The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$
|
||||||
|
|
||||||
@@ -123,20 +128,22 @@ $$
|
|||||||
Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is:
|
Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is:
|
||||||
|
|
||||||
**Define** $F_u(t)$ as:
|
**Define** $F_u(t)$ as:
|
||||||
* Let $X = \dfrac{u^3 + b - t^2}{2t}.$
|
|
||||||
* Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$
|
- Let $X = \dfrac{u^3 + b - t^2}{2t}.$
|
||||||
* Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square.
|
- Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$
|
||||||
|
- Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square.
|
||||||
|
|
||||||
To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case
|
To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case
|
||||||
$P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$):
|
$P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$):
|
||||||
|
|
||||||
**Define** $F_u(t)$ as:
|
**Define** $F_u(t)$ as:
|
||||||
* Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$).
|
|
||||||
* Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$).
|
- Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$).
|
||||||
* Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$).
|
- Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$).
|
||||||
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
- Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$).
|
||||||
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
||||||
* Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square.
|
- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
||||||
|
- Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square.
|
||||||
|
|
||||||
The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice,
|
The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice,
|
||||||
but the approach here is simple enough and gives fairly uniform output even in these cases.
|
but the approach here is simple enough and gives fairly uniform output even in these cases.
|
||||||
@@ -150,10 +157,11 @@ in `secp256k1_ellswift_xswiftec_var` (which outputs the actual x-coordinate).
|
|||||||
## 3. The encoding function
|
## 3. The encoding function
|
||||||
|
|
||||||
To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process:
|
To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process:
|
||||||
* Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$
|
|
||||||
* Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$
|
- Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$
|
||||||
* For each of the found $t$ values, verify that $F_u(t) = x.$
|
- Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$
|
||||||
* Return the remaining $t$ values.
|
- For each of the found $t$ values, verify that $F_u(t) = x.$
|
||||||
|
- Return the remaining $t$ values.
|
||||||
|
|
||||||
The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$
|
The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$
|
||||||
|
|
||||||
@@ -185,13 +193,14 @@ precedence over both. Because of this, the $g(-u-x)$ being square test for $x_1$
|
|||||||
values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder;
|
values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder;
|
||||||
any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder.
|
any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder.
|
||||||
|
|
||||||
### 3.1 Switching to *v, w* coordinates
|
### 3.1 Switching to _v, w_ coordinates
|
||||||
|
|
||||||
Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and
|
Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and
|
||||||
$w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$
|
$w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$
|
||||||
* $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$
|
|
||||||
* For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$
|
- $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$
|
||||||
* $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where
|
- For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$
|
||||||
|
- $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where
|
||||||
|
|
||||||
$$
|
$$
|
||||||
\begin{array}{lcl}
|
\begin{array}{lcl}
|
||||||
@@ -204,34 +213,37 @@ $$
|
|||||||
|
|
||||||
We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$
|
We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$
|
||||||
expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable:
|
expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable:
|
||||||
* Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
|
||||||
* Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
- Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
||||||
* Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions).
|
- Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
|
||||||
|
- Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions).
|
||||||
|
|
||||||
### 3.2 Avoiding computing all inverses
|
### 3.2 Avoiding computing all inverses
|
||||||
|
|
||||||
The *ElligatorSwift* algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the
|
The _ElligatorSwift_ algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the
|
||||||
set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary.
|
set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary.
|
||||||
|
|
||||||
Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a
|
Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a
|
||||||
uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8,
|
uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8,
|
||||||
picking a uniformly random element from that, restarting whenever $\bot$ is picked:
|
picking a uniformly random element from that, restarting whenever $\bot$ is picked:
|
||||||
|
|
||||||
**Define** *ElligatorSwift(x)* as:
|
**Define** _ElligatorSwift(x)_ as:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
- Loop:
|
||||||
* Compute the set $L = F_u^{-1}(x).$
|
- Pick a uniformly random field element $u.$
|
||||||
* Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$
|
- Compute the set $L = F_u^{-1}(x).$
|
||||||
* Select a uniformly random $t \in T.$
|
- Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$
|
||||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
- Select a uniformly random $t \in T.$
|
||||||
|
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||||
|
|
||||||
Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly
|
Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly
|
||||||
random element in it, so we do not need to have all $\bot$ values at the end.
|
random element in it, so we do not need to have all $\bot$ values at the end.
|
||||||
As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account),
|
As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account),
|
||||||
we can associate every index in $T$ with exactly one of those formulas, making sure that:
|
we can associate every index in $T$ with exactly one of those formulas, making sure that:
|
||||||
* Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$
|
|
||||||
* For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check).
|
- Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$
|
||||||
* In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those.
|
- For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check).
|
||||||
|
- In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those.
|
||||||
|
|
||||||
The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting
|
The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting
|
||||||
to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases)
|
to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases)
|
||||||
@@ -240,12 +252,13 @@ for an analysis of all the negligible cases.
|
|||||||
If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas,
|
If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas,
|
||||||
the loop can be simplified to only compute one of the inverses instead of all of them:
|
the loop can be simplified to only compute one of the inverses instead of all of them:
|
||||||
|
|
||||||
**Define** *ElligatorSwift(x)* as:
|
**Define** _ElligatorSwift(x)_ as:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
- Loop:
|
||||||
* Pick a uniformly random integer $c$ in $[0,8).$
|
- Pick a uniformly random field element $u.$
|
||||||
* Let $t = G_{c,u}(x).$
|
- Pick a uniformly random integer $c$ in $[0,8).$
|
||||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
- Let $t = G_{c,u}(x).$
|
||||||
|
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||||
|
|
||||||
This is implemented in `secp256k1_ellswift_xelligatorswift_var`.
|
This is implemented in `secp256k1_ellswift_xelligatorswift_var`.
|
||||||
|
|
||||||
@@ -256,18 +269,19 @@ Those are then repeated as $c=4$ through $c=7$ for the other sign of $w$ (noting
|
|||||||
Ignoring the negligible cases, we get:
|
Ignoring the negligible cases, we get:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x)$ as:
|
**Define** $G_{c,u}(x)$ as:
|
||||||
* If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas):
|
|
||||||
* If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence).
|
- If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas):
|
||||||
* If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula)
|
- If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence).
|
||||||
* Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows).
|
- If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula)
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas):
|
- Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows).
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas):
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
- Let $s = x-u.$
|
||||||
* Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise.
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
||||||
* Let $w = \sqrt{s}.$
|
- Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise.
|
||||||
* Depending on $c:$
|
- Let $w = \sqrt{s}.$
|
||||||
* If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$
|
- If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$
|
||||||
|
- If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$
|
||||||
|
|
||||||
Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly
|
Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly
|
||||||
50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen
|
50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen
|
||||||
@@ -284,20 +298,21 @@ transformation. Furthermore, that transformation has no effect on $s$ in the fir
|
|||||||
as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down:
|
as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x)$ as:
|
**Define** $G_{c,u}(x)$ as:
|
||||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
|
||||||
* If $g(-u-x)$ is square, return $\bot.$
|
- If $c \in \\{0, 1, 4, 5\\}:$
|
||||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a).$
|
- If $g(-u-x)$ is square, return $\bot.$
|
||||||
* Let $v = x.$
|
- Let $s = -g(u)/(u^2 + ux + x^2 + a).$
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
- Let $s = x-u.$
|
||||||
* Let $v = (r/s - u)/2.$
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}.$
|
||||||
* Let $w = \sqrt{s}.$
|
- Let $v = (r/s - u)/2.$
|
||||||
* Depending on $c:$
|
- Let $w = \sqrt{s}.$
|
||||||
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$
|
- If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$
|
||||||
* If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$
|
- If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$
|
||||||
* If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$
|
- If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$
|
||||||
|
- If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$
|
||||||
|
|
||||||
This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input.
|
This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input.
|
||||||
There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values.
|
There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values.
|
||||||
@@ -310,58 +325,60 @@ we analyse them here. They generally fall into two categories: cases in which th
|
|||||||
do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same
|
do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same
|
||||||
$t$ value for multiple $c$ inputs (thereby biasing that encoding):
|
$t$ value for multiple $c$ inputs (thereby biasing that encoding):
|
||||||
|
|
||||||
* In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$):
|
- In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$):
|
||||||
* When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves.
|
- When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves.
|
||||||
Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve
|
Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve
|
||||||
fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square).
|
fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square).
|
||||||
This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$),
|
This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$),
|
||||||
the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the
|
the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the
|
||||||
encoder anyway as there will generally be more than 8.
|
encoder anyway as there will generally be more than 8.
|
||||||
* When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence
|
- When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence
|
||||||
as it can deal with $g(u)=0$.
|
as it can deal with $g(u)=0$.
|
||||||
This is again only possible on even-ordered curves.
|
This is again only possible on even-ordered curves.
|
||||||
* In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$):
|
- In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$):
|
||||||
* When $s=0$, a division by zero would occur.
|
- When $s=0$, a division by zero would occur.
|
||||||
* When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases.
|
- When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases.
|
||||||
It is equivalent to checking whether $r=0$.
|
It is equivalent to checking whether $r=0$.
|
||||||
This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition.
|
This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition.
|
||||||
A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first
|
A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first
|
||||||
it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero.
|
it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero.
|
||||||
* Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder:
|
- Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder:
|
||||||
* For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve.
|
- For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve.
|
||||||
* For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$.
|
- For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$.
|
||||||
|
|
||||||
**Define** a version of $G_{c,u}(x)$ which deals with all these cases:
|
**Define** a version of $G_{c,u}(x)$ which deals with all these cases:
|
||||||
* If $a=0$ and $u=0$, return $\bot.$
|
|
||||||
* If $a \neq 0$ and $X_0(u)=0$, return $\bot.$
|
- If $a=0$ and $u=0$, return $\bot.$
|
||||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
- If $a \neq 0$ and $X_0(u)=0$, return $\bot.$
|
||||||
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
- If $c \in \\{0, 1, 4, 5\\}:$
|
||||||
* If $g(-u-x)$ is square, return $\bot.$
|
- If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
||||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
- If $g(-u-x)$ is square, return $\bot.$
|
||||||
* Let $v = x.$
|
- Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
- Let $s = x-u.$
|
||||||
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
||||||
* If $s = 0$, return $\bot.$
|
- If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
||||||
* Let $v = (r/s - u)/2.$
|
- If $s = 0$, return $\bot.$
|
||||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
- Let $v = (r/s - u)/2.$
|
||||||
* If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$
|
- Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||||
* Depending on $c:$
|
- If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$
|
||||||
* If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$
|
- If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$
|
||||||
* If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$
|
- If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$
|
||||||
* If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$
|
- If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$
|
||||||
* If $a=0$ and $t=0$, return $\bot$ (even curves only).
|
- If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$
|
||||||
* If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$
|
- If $a=0$ and $t=0$, return $\bot$ (even curves only).
|
||||||
* Return $t.$
|
- If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$
|
||||||
|
- Return $t.$
|
||||||
|
|
||||||
Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once,
|
Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once,
|
||||||
for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached:
|
for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached:
|
||||||
* All cases where $P_u(t)$ is not defined:
|
|
||||||
* For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$
|
- All cases where $P_u(t)$ is not defined:
|
||||||
* For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$
|
- For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$
|
||||||
* When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch.
|
- For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$
|
||||||
|
- When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch.
|
||||||
|
|
||||||
These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves.
|
These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves.
|
||||||
|
|
||||||
@@ -370,40 +387,42 @@ These cases form a negligible subset of all $(u, t)$ for cryptographically sized
|
|||||||
Specialized for odd-ordered $a=0$ curves:
|
Specialized for odd-ordered $a=0$ curves:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x)$ as:
|
**Define** $G_{c,u}(x)$ as:
|
||||||
* If $u=0$, return $\bot.$
|
|
||||||
* If $c \in \\{0, 1, 4, 5\\}:$
|
- If $u=0$, return $\bot.$
|
||||||
* If $(-u-x)^3 + b$ is square, return $\bot$
|
- If $c \in \\{0, 1, 4, 5\\}:$
|
||||||
* Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0).
|
- If $(-u-x)^3 + b$ is square, return $\bot$
|
||||||
* Let $v = x.$
|
- Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0).
|
||||||
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
|
||||||
* Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square.
|
- Let $s = x-u.$
|
||||||
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
- Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square.
|
||||||
* If $s = 0$, return $\bot.$
|
- If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
|
||||||
* Let $v = (r/s - u)/2.$
|
- If $s = 0$, return $\bot.$
|
||||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
- Let $v = (r/s - u)/2.$
|
||||||
* Depending on $c:$
|
- Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||||
* If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$
|
- If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$
|
||||||
* If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$
|
- If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$
|
||||||
* If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$
|
- If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$
|
||||||
|
- If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$
|
||||||
|
|
||||||
This is implemented in `secp256k1_ellswift_xswiftec_inv_var`.
|
This is implemented in `secp256k1_ellswift_xswiftec_inv_var`.
|
||||||
|
|
||||||
And the x-only ElligatorSwift encoding algorithm is still:
|
And the x-only ElligatorSwift encoding algorithm is still:
|
||||||
|
|
||||||
**Define** *ElligatorSwift(x)* as:
|
**Define** _ElligatorSwift(x)_ as:
|
||||||
* Loop:
|
|
||||||
* Pick a uniformly random field element $u.$
|
- Loop:
|
||||||
* Pick a uniformly random integer $c$ in $[0,8).$
|
- Pick a uniformly random field element $u.$
|
||||||
* Let $t = G_{c,u}(x).$
|
- Pick a uniformly random integer $c$ in $[0,8).$
|
||||||
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
- Let $t = G_{c,u}(x).$
|
||||||
|
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
|
||||||
|
|
||||||
Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them.
|
Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them.
|
||||||
While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$
|
While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$
|
||||||
combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity.
|
combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity.
|
||||||
|
|
||||||
## 4. Encoding and decoding full *(x, y)* coordinates
|
## 4. Encoding and decoding full _(x, y)_ coordinates
|
||||||
|
|
||||||
So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding
|
So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding
|
||||||
for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information
|
for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information
|
||||||
@@ -422,30 +441,32 @@ four distinct $P_u^{'-1}$ calls in the definition of $G_{u,c}.$
|
|||||||
|
|
||||||
To encode the sign of $y$ in the sign of $Y:$
|
To encode the sign of $y$ in the sign of $Y:$
|
||||||
|
|
||||||
**Define** *Decode(u, t)* for full $(x, y)$ as:
|
**Define** _Decode(u, t)_ for full $(x, y)$ as:
|
||||||
* Let $(X, Y) = P_u(t).$
|
|
||||||
* Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square.
|
- Let $(X, Y) = P_u(t).$
|
||||||
* Let $y = \sqrt{g(x)}.$
|
- Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square.
|
||||||
* If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$
|
- Let $y = \sqrt{g(x)}.$
|
||||||
|
- If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$
|
||||||
|
|
||||||
And encoding would be done using a $G_{c,u}(x, y)$ function defined as:
|
And encoding would be done using a $G_{c,u}(x, y)$ function defined as:
|
||||||
|
|
||||||
**Define** $G_{c,u}(x, y)$ as:
|
**Define** $G_{c,u}(x, y)$ as:
|
||||||
* If $c \in \\{0, 1\\}:$
|
|
||||||
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
- If $c \in \\{0, 1\\}:$
|
||||||
* If $g(-u-x)$ is square, return $\bot.$
|
- If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
|
||||||
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
- If $g(-u-x)$ is square, return $\bot.$
|
||||||
* Let $v = x.$
|
- Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
|
||||||
* Otherwise, when $c \in \\{2, 3\\}:$
|
- Let $v = x.$
|
||||||
* Let $s = x-u.$
|
- Otherwise, when $c \in \\{2, 3\\}:$
|
||||||
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
- Let $s = x-u.$
|
||||||
* If $c = 3$ and $r = 0$, return $\bot.$
|
- Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
|
||||||
* Let $v = (r/s - u)/2.$
|
- If $c = 3$ and $r = 0$, return $\bot.$
|
||||||
* Let $w = \sqrt{s}$; return $\bot$ if not square.
|
- Let $v = (r/s - u)/2.$
|
||||||
* Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise.
|
- Let $w = \sqrt{s}$; return $\bot$ if not square.
|
||||||
* Depending on $c:$
|
- Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise.
|
||||||
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$
|
- Depending on $c:$
|
||||||
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$
|
- If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$
|
||||||
|
- If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$
|
||||||
|
|
||||||
Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$
|
Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$
|
||||||
This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$.
|
This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$.
|
||||||
@@ -454,22 +475,23 @@ In the above logic, $sign$ can be implemented in several ways, such as parity of
|
|||||||
of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where
|
of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where
|
||||||
$-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$.
|
$-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$.
|
||||||
|
|
||||||
### 4.1 Full *(x, y)* coordinates for `secp256k1`
|
### 4.1 Full _(x, y)_ coordinates for `secp256k1`
|
||||||
|
|
||||||
For $a=0$ curves, there is another option. Note that for those,
|
For $a=0$ curves, there is another option. Note that for those,
|
||||||
the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to
|
the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to
|
||||||
encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get
|
encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get
|
||||||
as decoder:
|
as decoder:
|
||||||
|
|
||||||
**Define** *Decode(u, t)* as:
|
**Define** _Decode(u, t)_ as:
|
||||||
* Let $u'=u$ if $u \neq 0$; $1$ otherwise.
|
|
||||||
* Let $t'=t$ if $t \neq 0$; $1$ otherwise.
|
- Let $u'=u$ if $u \neq 0$; $1$ otherwise.
|
||||||
* Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise.
|
- Let $t'=t$ if $t \neq 0$; $1$ otherwise.
|
||||||
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
- Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise.
|
||||||
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
|
||||||
* Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square.
|
- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
|
||||||
* Let $y = \sqrt{g(x)}.$
|
- Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square.
|
||||||
* Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise.
|
- Let $y = \sqrt{g(x)}.$
|
||||||
|
- Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise.
|
||||||
|
|
||||||
This is implemented in `secp256k1_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$
|
This is implemented in `secp256k1_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$
|
||||||
|
|
||||||
|
|||||||
3
external/secp256k1/doc/musig.md
vendored
3
external/secp256k1/doc/musig.md
vendored
@@ -1,5 +1,4 @@
|
|||||||
Notes on the musig module API
|
# Notes on the musig module API
|
||||||
===========================
|
|
||||||
|
|
||||||
The following sections contain additional notes on the API of the musig module (`include/secp256k1_musig.h`).
|
The following sections contain additional notes on the API of the musig module (`include/secp256k1_musig.h`).
|
||||||
A usage example can be found in `examples/musig.c`.
|
A usage example can be found in `examples/musig.c`.
|
||||||
|
|||||||
40
external/secp256k1/doc/release-process.md
vendored
40
external/secp256k1/doc/release-process.md
vendored
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`.
|
This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`.
|
||||||
|
|
||||||
We distinguish between two types of releases: *regular* and *maintenance* releases.
|
We distinguish between two types of releases: _regular_ and _maintenance_ releases.
|
||||||
Regular releases are releases of a new major or minor version as well as patches of the most recent release.
|
Regular releases are releases of a new major or minor version as well as patches of the most recent release.
|
||||||
Maintenance releases, on the other hand, are required for patches of older releases.
|
Maintenance releases, on the other hand, are required for patches of older releases.
|
||||||
|
|
||||||
@@ -15,6 +15,7 @@ This process also assumes that there will be no minor releases for old major rel
|
|||||||
We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core.
|
We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core.
|
||||||
|
|
||||||
## Sanity checks
|
## Sanity checks
|
||||||
|
|
||||||
Perform these checks when reviewing the release PR (see below):
|
Perform these checks when reviewing the release PR (see below):
|
||||||
|
|
||||||
1. Ensure `make distcheck` doesn't fail.
|
1. Ensure `make distcheck` doesn't fail.
|
||||||
@@ -42,15 +43,15 @@ Perform these checks when reviewing the release PR (see below):
|
|||||||
## Regular release
|
## Regular release
|
||||||
|
|
||||||
1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that
|
1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that
|
||||||
* finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
|
- finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
|
||||||
* adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
|
- adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
|
||||||
* removing the `[Unreleased]` section header,
|
- removing the `[Unreleased]` section header,
|
||||||
* ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and
|
- ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and
|
||||||
* including an entry for `### ABI Compatibility` if it doesn't exist,
|
- including an entry for `### ABI Compatibility` if it doesn't exist,
|
||||||
* sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and,
|
- sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and,
|
||||||
* if this is not a patch release,
|
- if this is not a patch release,
|
||||||
* updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and
|
- updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and
|
||||||
* updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`.
|
- updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`.
|
||||||
2. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
2. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
||||||
3. After the PR is merged, tag the commit, and push the tag:
|
3. After the PR is merged, tag the commit, and push the tag:
|
||||||
```
|
```
|
||||||
@@ -59,11 +60,12 @@ Perform these checks when reviewing the release PR (see below):
|
|||||||
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
||||||
```
|
```
|
||||||
4. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that
|
4. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that
|
||||||
* sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`,
|
- sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`,
|
||||||
* increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and
|
- increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and
|
||||||
* adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md).
|
- adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md).
|
||||||
|
|
||||||
If other maintainers are not present to approve the PR, it can be merged without ACKs.
|
If other maintainers are not present to approve the PR, it can be merged without ACKs.
|
||||||
|
|
||||||
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
||||||
6. Send an announcement email to the bitcoin-dev mailing list.
|
6. Send an announcement email to the bitcoin-dev mailing list.
|
||||||
|
|
||||||
@@ -77,9 +79,9 @@ Note that bug fixes need to be backported only to releases for which no compatib
|
|||||||
git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR
|
git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR
|
||||||
```
|
```
|
||||||
2. Open a pull request to the `$MAJOR.$MINOR` branch that
|
2. Open a pull request to the `$MAJOR.$MINOR` branch that
|
||||||
* includes the bug fixes,
|
- includes the bug fixes,
|
||||||
* finalizes the release notes similar to a regular release,
|
- finalizes the release notes similar to a regular release,
|
||||||
* increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`
|
- increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`
|
||||||
and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`
|
and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`
|
||||||
(with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example).
|
(with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example).
|
||||||
3. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
3. Perform the [sanity checks](#sanity-checks) on the PR branch.
|
||||||
@@ -89,6 +91,6 @@ Note that bug fixes need to be backported only to releases for which no compatib
|
|||||||
git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH"
|
git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH"
|
||||||
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
|
||||||
```
|
```
|
||||||
6. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
|
||||||
7. Send an announcement email to the bitcoin-dev mailing list.
|
6. Send an announcement email to the bitcoin-dev mailing list.
|
||||||
8. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md).
|
7. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md).
|
||||||
|
|||||||
301
external/secp256k1/doc/safegcd_implementation.md
vendored
301
external/secp256k1/doc/safegcd_implementation.md
vendored
@@ -29,65 +29,67 @@ def gcd(f, g):
|
|||||||
return abs(f)
|
return abs(f)
|
||||||
```
|
```
|
||||||
|
|
||||||
It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop
|
It computes the greatest common divisor of an odd integer _f_ and any integer _g_. Its inner loop
|
||||||
keeps rewriting the variables *f* and *g* alongside a state variable *δ* that starts at *1*, until
|
keeps rewriting the variables _f_ and _g_ alongside a state variable _δ_ that starts at _1_, until
|
||||||
*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a
|
_g=0_ is reached. At that point, _|f|_ gives the GCD. Each of the transitions in the loop is called a
|
||||||
"division step" (referred to as divstep in what follows).
|
"division step" (referred to as divstep in what follows).
|
||||||
|
|
||||||
For example, *gcd(21, 14)* would be computed as:
|
For example, _gcd(21, 14)_ would be computed as:
|
||||||
- Start with *δ=1 f=21 g=14*
|
|
||||||
- Take the third branch: *δ=2 f=21 g=7*
|
- Start with _δ=1 f=21 g=14_
|
||||||
- Take the first branch: *δ=-1 f=7 g=-7*
|
- Take the third branch: _δ=2 f=21 g=7_
|
||||||
- Take the second branch: *δ=0 f=7 g=0*
|
- Take the first branch: _δ=-1 f=7 g=-7_
|
||||||
- The answer *|f| = 7*.
|
- Take the second branch: _δ=0 f=7 g=0_
|
||||||
|
- The answer _|f| = 7_.
|
||||||
|
|
||||||
Why it works:
|
Why it works:
|
||||||
|
|
||||||
- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper):
|
- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper):
|
||||||
- (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*.
|
- (a) If _g_ is odd, replace _(f,g)_ with _(g,g-f)_ or (f,g+f), resulting in an even _g_.
|
||||||
- (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even).
|
- (b) Replace _(f,g)_ with _(f,g/2)_ (where _g_ is guaranteed to be even).
|
||||||
- Neither of those two operations change the GCD:
|
- Neither of those two operations change the GCD:
|
||||||
- For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a c* and *g=b c* for some integers *a*
|
- For (a), assume _gcd(f,g)=c_, then it must be the case that _f=a c_ and _g=b c_ for some integers _a_
|
||||||
and *b*. As *(g,g-f)=(b c,(b-a)c)* and *(f,f+g)=(a c,(a+b)c)*, the result clearly still has
|
and _b_. As _(g,g-f)=(b c,(b-a)c)_ and _(f,f+g)=(a c,(a+b)c)_, the result clearly still has
|
||||||
common factor *c*. Reasoning in the other direction shows that no common factor can be added by
|
common factor _c_. Reasoning in the other direction shows that no common factor can be added by
|
||||||
doing so either.
|
doing so either.
|
||||||
- For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove
|
- For (b), we know that _f_ is odd, so _gcd(f,g)_ clearly has no factor _2_, and we can remove
|
||||||
it from *g*.
|
it from _g_.
|
||||||
- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3).
|
- The algorithm will eventually converge to _g=0_. This is proven in the paper (see theorem G.3).
|
||||||
- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the
|
- It follows that eventually we find a final value _f'_ for which _gcd(f,g) = gcd(f',0)_. As the
|
||||||
gcd of *f'* and *0* is *|f'|* by definition, that is our answer.
|
gcd of _f'_ and _0_ is _|f'|_ by definition, that is our answer.
|
||||||
|
|
||||||
Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at
|
Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at
|
||||||
the low-order bits of the variables to decide the next steps, and being easy to make
|
the low-order bits of the variables to decide the next steps, and being easy to make
|
||||||
constant-time (in more low-level languages than Python). The *δ* parameter is necessary to
|
constant-time (in more low-level languages than Python). The _δ_ parameter is necessary to
|
||||||
guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look
|
guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look
|
||||||
at high order bits.
|
at high order bits.
|
||||||
|
|
||||||
Properties that will become important later:
|
Properties that will become important later:
|
||||||
- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*.
|
|
||||||
- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we
|
|
||||||
do not need to worry about rounding.
|
|
||||||
- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N*
|
|
||||||
bits of *f* and *g*, and on *δ*.
|
|
||||||
|
|
||||||
|
- Performing more divsteps than needed is not a problem, as _f_ does not change anymore after _g=0_.
|
||||||
|
- Only even numbers are divided by _2_. This means that when reasoning about it algebraically we
|
||||||
|
do not need to worry about rounding.
|
||||||
|
- At every point during the algorithm's execution the next _N_ steps only depend on the bottom _N_
|
||||||
|
bits of _f_ and _g_, and on _δ_.
|
||||||
|
|
||||||
## 2. From GCDs to modular inverses
|
## 2. From GCDs to modular inverses
|
||||||
|
|
||||||
We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a x=1
|
We want an algorithm to compute the inverse _a_ of _x_ modulo _M_, i.e. the number a such that _a x=1
|
||||||
mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is
|
mod M_. This inverse only exists if the GCD of _x_ and _M_ is _1_, but that is always the case if _M_ is
|
||||||
prime and *0 < x < M*. In what follows, assume that the modular inverse exists.
|
prime and _0 < x < M_. In what follows, assume that the modular inverse exists.
|
||||||
It turns out this inverse can be computed as a side effect of computing the GCD by keeping track
|
It turns out this inverse can be computed as a side effect of computing the GCD by keeping track
|
||||||
of how the internal variables can be written as linear combinations of the inputs at every step
|
of how the internal variables can be written as linear combinations of the inputs at every step
|
||||||
(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)).
|
(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)).
|
||||||
Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a x + b M = 1*.
|
Since the GCD is _1_, such an algorithm will compute numbers _a_ and _b_ such that a x + b M = 1*.
|
||||||
Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x
|
Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x
|
||||||
mod M*.
|
mod M\*.
|
||||||
|
|
||||||
A similar approach can be used to calculate modular inverses using the divsteps-based GCD
|
A similar approach can be used to calculate modular inverses using the divsteps-based GCD
|
||||||
algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping
|
algorithm shown above, if the modulus _M_ is odd. To do so, compute _gcd(f=M,g=x)_, while keeping
|
||||||
track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*.
|
track of extra variables _d_ and _e_, for which at every step _d = f/x (mod M)_ and _e = g/x (mod M)_.
|
||||||
*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M*
|
_f/x_ here means the number which multiplied with _x_ gives _f mod M_. As _f_ and _g_ are initialized to _M_
|
||||||
and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M
|
and _x_ respectively, _d_ and _e_ just start off being _0_ (_M/x mod M = 0/x mod M = 0_) and _1_ (_x/x mod M
|
||||||
= 1*).
|
= 1_).
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def div2(M, x):
|
def div2(M, x):
|
||||||
@@ -119,17 +121,16 @@ def modinv(M, x):
|
|||||||
return (d * f) % M
|
return (d * f) % M
|
||||||
```
|
```
|
||||||
|
|
||||||
Also note that this approach to track *d* and *e* throughout the computation to determine the inverse
|
Also note that this approach to track _d_ and _e_ throughout the computation to determine the inverse
|
||||||
is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the
|
is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the
|
||||||
entire computation is determined (see section 3 below) and the inverse is computed from that.
|
entire computation is determined (see section 3 below) and the inverse is computed from that.
|
||||||
The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to
|
The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to
|
||||||
be faster at the level of optimization we're able to do in C.
|
be faster at the level of optimization we're able to do in C.
|
||||||
|
|
||||||
|
|
||||||
## 3. Batching multiple divsteps
|
## 3. Batching multiple divsteps
|
||||||
|
|
||||||
Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)*
|
Every divstep can be expressed as a matrix multiplication, applying a transition matrix _(1/2 t)_
|
||||||
to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper):
|
to both vectors _[f, g]_ and _[d, e]_ (see paragraph 8.1 in the paper):
|
||||||
|
|
||||||
```
|
```
|
||||||
t = [ u, v ]
|
t = [ u, v ]
|
||||||
@@ -142,15 +143,15 @@ to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper):
|
|||||||
[ out_e ] [ in_e ]
|
[ out_e ] [ in_e ]
|
||||||
```
|
```
|
||||||
|
|
||||||
where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is
|
where _(u, v, q, r)_ is _(0, 2, -1, 1)_, _(2, 0, 1, 1)_, or _(2, 0, 0, 1)_, depending on which branch is
|
||||||
taken. As above, the resulting *f* and *g* are always integers.
|
taken. As above, the resulting _f_ and _g_ are always integers.
|
||||||
|
|
||||||
Performing multiple divsteps corresponds to a multiplication with the product of all the
|
Performing multiple divsteps corresponds to a multiplication with the product of all the
|
||||||
individual divsteps' transition matrices. As each transition matrix consists of integers
|
individual divsteps' transition matrices. As each transition matrix consists of integers
|
||||||
divided by *2*, the product of these matrices will consist of integers divided by *2<sup>N</sup>* (see also
|
divided by _2_, the product of these matrices will consist of integers divided by _2<sup>N</sup>_ (see also
|
||||||
theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay
|
theorem 9.2 in the paper). These divisions are expensive when updating _d_ and _e_, so we delay
|
||||||
them: we compute the integer coefficients of the combined transition matrix scaled by *2<sup>N</sup>*, and
|
them: we compute the integer coefficients of the combined transition matrix scaled by _2<sup>N</sup>_, and
|
||||||
do one division by *2<sup>N</sup>* as a final step:
|
do one division by _2<sup>N</sup>_ as a final step:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def divsteps_n_matrix(delta, f, g):
|
def divsteps_n_matrix(delta, f, g):
|
||||||
@@ -166,13 +167,13 @@ def divsteps_n_matrix(delta, f, g):
|
|||||||
return delta, (u, v, q, r)
|
return delta, (u, v, q, r)
|
||||||
```
|
```
|
||||||
|
|
||||||
As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this
|
As the branches in the divsteps are completely determined by the bottom _N_ bits of _f_ and _g_, this
|
||||||
function to compute the transition matrix only needs to see those bottom bits. Furthermore all
|
function to compute the transition matrix only needs to see those bottom bits. Furthermore all
|
||||||
intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*,
|
intermediate results and outputs fit in _(N+1)_-bit numbers (unsigned for _f_ and _g_; signed for _u_, _v_,
|
||||||
*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit
|
_q_, and _r_) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit
|
||||||
integers could set *N=62* and compute the full transition matrix for 62 steps at once without any
|
integers could set _N=62_ and compute the full transition matrix for 62 steps at once without any
|
||||||
big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs
|
big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs
|
||||||
to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps.
|
to update the full-size _f_, _g_, _d_, and _e_ numbers once every _N_ steps.
|
||||||
|
|
||||||
We still need functions to compute:
|
We still need functions to compute:
|
||||||
|
|
||||||
@@ -184,8 +185,8 @@ We still need functions to compute:
|
|||||||
[ out_e ] ( [ q, r ]) [ in_e ]
|
[ out_e ] ( [ q, r ]) [ in_e ]
|
||||||
```
|
```
|
||||||
|
|
||||||
Because the divsteps transformation only ever divides even numbers by two, the result of *t [f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f*
|
Because the divsteps transformation only ever divides even numbers by two, the result of _t [f,g]_ is always even. When _t_ is a composition of _N_ divsteps, it follows that the resulting _f_
|
||||||
and *g* will be multiple of *2<sup>N</sup>*, and division by *2<sup>N</sup>* is simply shifting them down:
|
and _g_ will be multiple of _2<sup>N</sup>_, and division by _2<sup>N</sup>_ is simply shifting them down:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def update_fg(f, g, t):
|
def update_fg(f, g, t):
|
||||||
@@ -199,8 +200,8 @@ def update_fg(f, g, t):
|
|||||||
return cf >> N, cg >> N
|
return cf >> N, cg >> N
|
||||||
```
|
```
|
||||||
|
|
||||||
The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2<sup>N</sup> mod M*.
|
The same is not true for _d_ and _e_, and we need an equivalent of the `div2` function for division by _2<sup>N</sup> mod M_.
|
||||||
This is easy if we have precomputed *1/M mod 2<sup>N</sup>* (which always exists for odd *M*):
|
This is easy if we have precomputed _1/M mod 2<sup>N</sup>_ (which always exists for odd _M_):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def div2n(M, Mi, x):
|
def div2n(M, Mi, x):
|
||||||
@@ -224,7 +225,7 @@ def update_de(d, e, t, M, Mi):
|
|||||||
return div2n(M, Mi, cd), div2n(M, Mi, ce)
|
return div2n(M, Mi, cd), div2n(M, Mi, ce)
|
||||||
```
|
```
|
||||||
|
|
||||||
With all of those, we can write a version of `modinv` that performs *N* divsteps at once:
|
With all of those, we can write a version of `modinv` that performs _N_ divsteps at once:
|
||||||
|
|
||||||
```python3
|
```python3
|
||||||
def modinv(M, Mi, x):
|
def modinv(M, Mi, x):
|
||||||
@@ -242,20 +243,19 @@ def modinv(M, Mi, x):
|
|||||||
return (d * f) % M
|
return (d * f) % M
|
||||||
```
|
```
|
||||||
|
|
||||||
This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem
|
This means that in practice we'll always perform a multiple of _N_ divsteps. This is not a problem
|
||||||
because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *δ* keeps
|
because once _g=0_, further divsteps do not affect _f_, _g_, _d_, or _e_ anymore (only _δ_ keeps
|
||||||
increasing). For variable time code such excess iterations will be mostly optimized away in later
|
increasing). For variable time code such excess iterations will be mostly optimized away in later
|
||||||
sections.
|
sections.
|
||||||
|
|
||||||
|
|
||||||
## 4. Avoiding modulus operations
|
## 4. Avoiding modulus operations
|
||||||
|
|
||||||
So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of
|
So far, there are two places where we compute a remainder of big numbers modulo _M_: at the end of
|
||||||
`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the
|
`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating _d_ due to the
|
||||||
sign of *f*. These are relatively expensive operations when done generically.
|
sign of _f_. These are relatively expensive operations when done generically.
|
||||||
|
|
||||||
To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range
|
To deal with the modulus operation in `div2n`, we simply stop requiring _d_ and _e_ to be in range
|
||||||
*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus
|
_[0,M)_ all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus
|
||||||
operation at the end:
|
operation at the end:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -272,15 +272,15 @@ def update_de(d, e, t, M, Mi):
|
|||||||
return cd >> N, ce >> N
|
return cd >> N, ce >> N
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|*
|
Let's look at bounds on the ranges of these numbers. It can be shown that _|u|+|v|_ and _|q|+|r|_
|
||||||
never exceed *2<sup>N</sup>* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have
|
never exceed _2<sup>N</sup>_ (see paragraph 8.3 in the paper), and thus a multiplication with _t_ will have
|
||||||
outputs whose absolute values are at most *2<sup>N</sup>* times the maximum absolute input value. In case the
|
outputs whose absolute values are at most _2<sup>N</sup>_ times the maximum absolute input value. In case the
|
||||||
inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming
|
inputs _d_ and _e_ are in _(-M,M)_, which is certainly true for the initial values _d=0_ and _e=1_ assuming
|
||||||
*M > 1*, the multiplication results in numbers in range *(-2<sup>N</sup>M,2<sup>N</sup>M)*. Subtracting less than *2<sup>N</sup>*
|
_M > 1_, the multiplication results in numbers in range _(-2<sup>N</sup>M,2<sup>N</sup>M)_. Subtracting less than _2<sup>N</sup>_
|
||||||
times *M* to cancel out *N* bits brings that up to *(-2<sup>N+1</sup>M,2<sup>N</sup>M)*, and
|
times _M_ to cancel out _N_ bits brings that up to _(-2<sup>N+1</sup>M,2<sup>N</sup>M)_, and
|
||||||
dividing by *2<sup>N</sup>* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that
|
dividing by _2<sup>N</sup>_ at the end takes it to _(-2M,M)_. Another application of `update_de` would take that
|
||||||
to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be
|
to _(-3M,2M)_, and so forth. This progressive expansion of the variables' ranges can be
|
||||||
counteracted by incrementing *d* and *e* by *M* whenever they're negative:
|
counteracted by incrementing _d_ and _e_ by _M_ whenever they're negative:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -293,12 +293,12 @@ counteracted by incrementing *d* and *e* by *M* whenever they're negative:
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the
|
With inputs in _(-2M,M)_, they will first be shifted into range _(-M,M)_, which means that the
|
||||||
output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de`
|
output will again be in _(-2M,M)_, and this remains the case regardless of how many `update_de`
|
||||||
invocations there are. In what follows, we will try to make this more efficient.
|
invocations there are. In what follows, we will try to make this more efficient.
|
||||||
|
|
||||||
Note that increasing *d* by *M* is equal to incrementing *cd* by *u M* and *ce* by *q M*. Similarly,
|
Note that increasing _d_ by _M_ is equal to incrementing _cd_ by _u M_ and _ce_ by _q M_. Similarly,
|
||||||
increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by *r M*. So we could instead write:
|
increasing _e_ by _M_ is equal to incrementing _cd_ by _v M_ and _ce_ by _r M_. So we could instead write:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -318,10 +318,10 @@ increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this
|
Now note that we have two steps of corrections to _cd_ and _ce_ that add multiples of _M_: this
|
||||||
increment, and the decrement that cancels out bottom bits. The second one depends on the first
|
increment, and the decrement that cancels out bottom bits. The second one depends on the first
|
||||||
one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce*
|
one, but they can still be efficiently combined by only computing the bottom bits of _cd_ and _ce_
|
||||||
at first, and using that to compute the final *md*, *me* values:
|
at first, and using that to compute the final _md_, _me_ values:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def update_de(d, e, t, M, Mi):
|
def update_de(d, e, t, M, Mi):
|
||||||
@@ -346,8 +346,8 @@ def update_de(d, e, t, M, Mi):
|
|||||||
return cd >> N, ce >> N
|
return cd >> N, ce >> N
|
||||||
```
|
```
|
||||||
|
|
||||||
One last optimization: we can avoid the *md M* and *me M* multiplications in the bottom bits of *cd*
|
One last optimization: we can avoid the _md M_ and _me M_ multiplications in the bottom bits of _cd_
|
||||||
and *ce* by moving them to the *md* and *me* correction:
|
and _ce_ by moving them to the _md_ and _me_ correction:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -362,10 +362,10 @@ and *ce* by moving them to the *md* and *me* correction:
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same
|
The resulting function takes _d_ and _e_ in range _(-2M,M)_ as inputs, and outputs values in the same
|
||||||
range. That also means that the *d* value at the end of `modinv` will be in that range, while we want
|
range. That also means that the _d_ value at the end of `modinv` will be in that range, while we want
|
||||||
a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the
|
a result in _[0,M)_. To do that, we need a normalization function. It's easy to integrate the
|
||||||
conditional negation of *d* (based on the sign of *f*) into it as well:
|
conditional negation of _d_ (based on the sign of _f_) into it as well:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def normalize(sign, v, M):
|
def normalize(sign, v, M):
|
||||||
@@ -391,22 +391,21 @@ And calling it in `modinv` is simply:
|
|||||||
return normalize(f, d, M)
|
return normalize(f, d, M)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 5. Constant-time operation
|
## 5. Constant-time operation
|
||||||
|
|
||||||
The primary selling point of the algorithm is fast constant-time operation. What code flow still
|
The primary selling point of the algorithm is fast constant-time operation. What code flow still
|
||||||
depends on the input data so far?
|
depends on the input data so far?
|
||||||
|
|
||||||
- the number of iterations of the while *g ≠ 0* loop in `modinv`
|
- the number of iterations of the while _g ≠ 0_ loop in `modinv`
|
||||||
- the branches inside `divsteps_n_matrix`
|
- the branches inside `divsteps_n_matrix`
|
||||||
- the sign checks in `update_de`
|
- the sign checks in `update_de`
|
||||||
- the sign checks in `normalize`
|
- the sign checks in `normalize`
|
||||||
|
|
||||||
To make the while loop in `modinv` constant time it can be replaced with a constant number of
|
To make the while loop in `modinv` constant time it can be replaced with a constant number of
|
||||||
iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit
|
iterations. The paper proves (Theorem 11.2) that _741_ divsteps are sufficient for any _256_-bit
|
||||||
inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is
|
inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound _724_ is
|
||||||
sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of
|
sufficient even. Given that every loop iteration performs _N_ divsteps, it will run a total of
|
||||||
*⌈724/N⌉* times.
|
_⌈724/N⌉_ times.
|
||||||
|
|
||||||
To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise
|
To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise
|
||||||
operations (and hope the C compiler isn't smart enough to turn them back into branches; see
|
operations (and hope the C compiler isn't smart enough to turn them back into branches; see
|
||||||
@@ -425,10 +424,10 @@ divstep can be written instead as (compare to the inner loop of `gcd` in section
|
|||||||
```
|
```
|
||||||
|
|
||||||
To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the
|
To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the
|
||||||
definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As
|
definition of negative numbers in two's complement, (_-v == ~v + 1_) holds for every number _v_. As
|
||||||
*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows
|
_-1_ in two's complement is all _1_ bits, bitflipping can be expressed as xor with _-1_. It follows
|
||||||
that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then
|
that _-v == (v ^ -1) - (-1)_. Thus, if we have a variable _c_ that takes on values _0_ or _-1_, then
|
||||||
*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*.
|
_(v ^ c) - c_ is _v_ if _c=0_ and _-v_ if _c=-1_.
|
||||||
|
|
||||||
Using this we can write:
|
Using this we can write:
|
||||||
|
|
||||||
@@ -444,13 +443,13 @@ in constant-time form as:
|
|||||||
x = (f ^ c1) - c1
|
x = (f ^ c1) - c1
|
||||||
```
|
```
|
||||||
|
|
||||||
To use that trick, we need a helper mask variable *c1* that resolves the condition *δ>0* to *-1*
|
To use that trick, we need a helper mask variable _c1_ that resolves the condition _δ>0_ to _-1_
|
||||||
(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by
|
(if true) or _0_ (if false). We compute _c1_ using right shifting, which is equivalent to dividing by
|
||||||
the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see
|
the specified power of _2_ and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see
|
||||||
`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all
|
`assumptions.h` for tests that this is the case). Right shifting by _63_ thus maps all
|
||||||
numbers in range *[-2<sup>63</sup>,0)* to *-1*, and numbers in range *[0,2<sup>63</sup>)* to *0*.
|
numbers in range _[-2<sup>63</sup>,0)_ to _-1_, and numbers in range _[0,2<sup>63</sup>)_ to _0_.
|
||||||
|
|
||||||
Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write:
|
Using the facts that _x&0=0_ and _x&(-1)=x_ (on two's complement systems again), we can write:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
if g & 1:
|
if g & 1:
|
||||||
@@ -498,8 +497,8 @@ becomes:
|
|||||||
```
|
```
|
||||||
|
|
||||||
It turns out that this can be implemented more efficiently by applying the substitution
|
It turns out that this can be implemented more efficiently by applying the substitution
|
||||||
*η=-δ*. In this representation, negating *δ* corresponds to negating *η*, and incrementing
|
_η=-δ_. In this representation, negating _δ_ corresponds to negating _η_, and incrementing
|
||||||
*δ* corresponds to decrementing *η*. This allows us to remove the negation in the *c1*
|
_δ_ corresponds to decrementing _η_. This allows us to remove the negation in the _c1_
|
||||||
computation:
|
computation:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -519,12 +518,12 @@ computation:
|
|||||||
g >>= 1
|
g >>= 1
|
||||||
```
|
```
|
||||||
|
|
||||||
A variant of divsteps with better worst-case performance can be used instead: starting *δ* at
|
A variant of divsteps with better worst-case performance can be used instead: starting _δ_ at
|
||||||
*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs
|
_1/2_ instead of _1_. This reduces the worst case number of iterations to _590_ for _256_-bit inputs
|
||||||
(which can be shown using convex hull analysis). In this case, the substitution *ζ=-(δ+1/2)*
|
(which can be shown using convex hull analysis). In this case, the substitution _ζ=-(δ+1/2)_
|
||||||
is used instead to keep the variable integral. Incrementing *δ* by *1* still translates to
|
is used instead to keep the variable integral. Incrementing _δ_ by _1_ still translates to
|
||||||
decrementing *ζ* by *1*, but negating *δ* now corresponds to going from *ζ* to *-(ζ+1)*, or
|
decrementing _ζ_ by _1_, but negating _δ_ now corresponds to going from _ζ_ to _-(ζ+1)_, or
|
||||||
*~ζ*. Doing that conditionally based on *c3* is simply:
|
_~ζ_. Doing that conditionally based on _c3_ is simply:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
...
|
...
|
||||||
@@ -534,13 +533,12 @@ decrementing *ζ* by *1*, but negating *δ* now corresponds to going fr
|
|||||||
```
|
```
|
||||||
|
|
||||||
By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to
|
By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to
|
||||||
also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of
|
also apply all _f_ operations to _u_, _v_ and all _g_ operations to _q_, _r_), a constant-time version of
|
||||||
`divsteps_n_matrix` is obtained. The full code will be in section 7.
|
`divsteps_n_matrix` is obtained. The full code will be in section 7.
|
||||||
|
|
||||||
These bit fiddling tricks can also be used to make the conditional negations and additions in
|
These bit fiddling tricks can also be used to make the conditional negations and additions in
|
||||||
`update_de` and `normalize` constant-time.
|
`update_de` and `normalize` constant-time.
|
||||||
|
|
||||||
|
|
||||||
## 6. Variable-time optimizations
|
## 6. Variable-time optimizations
|
||||||
|
|
||||||
In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time.
|
In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time.
|
||||||
@@ -550,7 +548,7 @@ faster non-constant time `divsteps_n_matrix` function.
|
|||||||
|
|
||||||
To do so, first consider yet another way of writing the inner loop of divstep operations in
|
To do so, first consider yet another way of writing the inner loop of divstep operations in
|
||||||
`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use
|
`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use
|
||||||
the original version with initial *δ=1* and *η=-δ* here.
|
the original version with initial _δ=1_ and _η=-δ_ here.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
for _ in range(N):
|
for _ in range(N):
|
||||||
@@ -562,7 +560,7 @@ for _ in range(N):
|
|||||||
g >>= 1
|
g >>= 1
|
||||||
```
|
```
|
||||||
|
|
||||||
Whenever *g* is even, the loop only shifts *g* down and decreases *η*. When *g* ends in multiple zero
|
Whenever _g_ is even, the loop only shifts _g_ down and decreases _η_. When _g_ ends in multiple zero
|
||||||
bits, these iterations can be consolidated into one step. This requires counting the bottom zero
|
bits, these iterations can be consolidated into one step. This requires counting the bottom zero
|
||||||
bits efficiently, which is possible on most platforms; it is abstracted here as the function
|
bits efficiently, which is possible on most platforms; it is abstracted here as the function
|
||||||
`count_trailing_zeros`.
|
`count_trailing_zeros`.
|
||||||
@@ -595,20 +593,20 @@ while True:
|
|||||||
# g is even now, and the eta decrement and g shift will happen in the next loop.
|
# g is even now, and the eta decrement and g shift will happen in the next loop.
|
||||||
```
|
```
|
||||||
|
|
||||||
We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever
|
We can now remove multiple bottom _0_ bits from _g_ at once, but still need a full iteration whenever
|
||||||
there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as
|
there is a bottom _1_ bit. In what follows, we will get rid of multiple _1_ bits simultaneously as
|
||||||
well.
|
well.
|
||||||
|
|
||||||
Observe that as long as *η ≥ 0*, the loop does not modify *f*. Instead, it cancels out bottom
|
Observe that as long as _η ≥ 0_, the loop does not modify _f_. Instead, it cancels out bottom
|
||||||
bits of *g* and shifts them out, and decreases *η* and *i* accordingly - interrupting only when *η*
|
bits of _g_ and shifts them out, and decreases _η_ and _i_ accordingly - interrupting only when _η_
|
||||||
becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to
|
becomes negative, or when _i_ reaches _0_. Combined, this is equivalent to adding a multiple of _f_ to
|
||||||
*g* to cancel out multiple bottom bits, and then shifting them out.
|
_g_ to cancel out multiple bottom bits, and then shifting them out.
|
||||||
|
|
||||||
It is easy to find what that multiple is: we want a number *w* such that *g+w f* has a few bottom
|
It is easy to find what that multiple is: we want a number _w_ such that _g+w f_ has a few bottom
|
||||||
zero bits. If that number of bits is *L*, we want *g+w f mod 2<sup>L</sup> = 0*, or *w = -g/f mod 2<sup>L</sup>*. Since *f*
|
zero bits. If that number of bits is _L_, we want _g+w f mod 2<sup>L</sup> = 0_, or _w = -g/f mod 2<sup>L</sup>_. Since _f_
|
||||||
is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before
|
is odd, such a _w_ exists for any _L_. _L_ cannot be more than _i_ steps (as we'd finish the loop before
|
||||||
doing more) or more than *η+1* steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but
|
doing more) or more than _η+1_ steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but
|
||||||
apart from that, we're only limited by the complexity of computing *w*.
|
apart from that, we're only limited by the complexity of computing _w_.
|
||||||
|
|
||||||
This code demonstrates how to cancel up to 4 bits per step:
|
This code demonstrates how to cancel up to 4 bits per step:
|
||||||
|
|
||||||
@@ -642,26 +640,25 @@ some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pag
|
|||||||
Here we need the negated modular inverse, which is a simple transformation of those:
|
Here we need the negated modular inverse, which is a simple transformation of those:
|
||||||
|
|
||||||
- Instead of a 3-bit table:
|
- Instead of a 3-bit table:
|
||||||
- *-f* or *f ^ 6*
|
- _-f_ or _f ^ 6_
|
||||||
- Instead of a 4-bit table:
|
- Instead of a 4-bit table:
|
||||||
- *1 - f(f + 1)*
|
- _1 - f(f + 1)_
|
||||||
- *-(f + (((f + 1) & 4) << 1))*
|
- _-(f + (((f + 1) & 4) << 1))_
|
||||||
- For larger tables the following technique can be used: if *w=-1/f mod 2<sup>L</sup>*, then *w(w f+2)* is
|
- For larger tables the following technique can be used: if _w=-1/f mod 2<sup>L</sup>_, then _w(w f+2)_ is
|
||||||
*-1/f mod 2<sup>2L</sup>*. This allows extending the previous formulas (or tables). In particular we
|
_-1/f mod 2<sup>2L</sup>_. This allows extending the previous formulas (or tables). In particular we
|
||||||
have this 6-bit function (based on the 3-bit function above):
|
have this 6-bit function (based on the 3-bit function above):
|
||||||
- *f(f<sup>2</sup> - 2)*
|
- _f(f<sup>2</sup> - 2)_
|
||||||
|
|
||||||
This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in
|
This loop, again extended to also handle _u_, _v_, _q_, and _r_ alongside _f_ and _g_, placed in
|
||||||
`divsteps_n_matrix`, gives a significantly faster, but non-constant time version.
|
`divsteps_n_matrix`, gives a significantly faster, but non-constant time version.
|
||||||
|
|
||||||
|
|
||||||
## 7. Final Python version
|
## 7. Final Python version
|
||||||
|
|
||||||
All together we need the following functions:
|
All together we need the following functions:
|
||||||
|
|
||||||
- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function
|
- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function
|
||||||
from section 2, but with its loop replaced by a variant of the constant-time divstep from
|
from section 2, but with its loop replaced by a variant of the constant-time divstep from
|
||||||
section 5, extended to handle *u*, *v*, *q*, *r*:
|
section 5, extended to handle _u_, _v_, _q_, _r_:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def divsteps_n_matrix(zeta, f, g):
|
def divsteps_n_matrix(zeta, f, g):
|
||||||
@@ -684,7 +681,7 @@ def divsteps_n_matrix(zeta, f, g):
|
|||||||
return zeta, (u, v, q, r)
|
return zeta, (u, v, q, r)
|
||||||
```
|
```
|
||||||
|
|
||||||
- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time
|
- The functions to update _f_ and _g_, and _d_ and _e_, from section 2 and section 4, with the constant-time
|
||||||
changes to `update_de` from section 5:
|
changes to `update_de` from section 5:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -723,7 +720,7 @@ def normalize(sign, v, M):
|
|||||||
return v
|
return v
|
||||||
```
|
```
|
||||||
|
|
||||||
- And finally the `modinv` function too, adapted to use *ζ* instead of *δ*, and using the fixed
|
- And finally the `modinv` function too, adapted to use _ζ_ instead of _δ_, and using the fixed
|
||||||
iteration count from section 5:
|
iteration count from section 5:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -772,20 +769,21 @@ def modinv_var(M, Mi, x):
|
|||||||
|
|
||||||
## 8. From GCDs to Jacobi symbol
|
## 8. From GCDs to Jacobi symbol
|
||||||
|
|
||||||
We can also use a similar approach to calculate Jacobi symbol *(x | M)* by keeping track of an
|
We can also use a similar approach to calculate Jacobi symbol _(x | M)_ by keeping track of an
|
||||||
extra variable *j*, for which at every step *(x | M) = j (g | f)*. As we update *f* and *g*, we
|
extra variable _j_, for which at every step _(x | M) = j (g | f)_. As we update _f_ and _g_, we
|
||||||
make corresponding updates to *j* using
|
make corresponding updates to _j_ using
|
||||||
[properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties):
|
[properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties):
|
||||||
* *((g/2) | f)* is either *(g | f)* or *-(g | f)*, depending on the value of *f mod 8* (negating if it's *3* or *5*).
|
|
||||||
* *(f | g)* is either *(g | f)* or *-(g | f)*, depending on *f mod 4* and *g mod 4* (negating if both are *3*).
|
|
||||||
|
|
||||||
These updates depend only on the values of *f* and *g* modulo *4* or *8*, and can thus be applied
|
- _((g/2) | f)_ is either _(g | f)_ or _-(g | f)_, depending on the value of _f mod 8_ (negating if it's _3_ or _5_).
|
||||||
very quickly, as long as we keep track of a few additional bits of *f* and *g*. Overall, this
|
- _(f | g)_ is either _(g | f)_ or _-(g | f)_, depending on _f mod 4_ and _g mod 4_ (negating if both are _3_).
|
||||||
|
|
||||||
|
These updates depend only on the values of _f_ and _g_ modulo _4_ or _8_, and can thus be applied
|
||||||
|
very quickly, as long as we keep track of a few additional bits of _f_ and _g_. Overall, this
|
||||||
calculation is slightly simpler than the one for the modular inverse because we no longer need to
|
calculation is slightly simpler than the one for the modular inverse because we no longer need to
|
||||||
keep track of *d* and *e*.
|
keep track of _d_ and _e_.
|
||||||
|
|
||||||
However, one difficulty of this approach is that the Jacobi symbol *(a | n)* is only defined for
|
However, one difficulty of this approach is that the Jacobi symbol _(a | n)_ is only defined for
|
||||||
positive odd integers *n*, whereas in the original safegcd algorithm, *f, g* can take negative
|
positive odd integers _n_, whereas in the original safegcd algorithm, _f, g_ can take negative
|
||||||
values. We resolve this by using the following modified steps:
|
values. We resolve this by using the following modified steps:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -799,15 +797,16 @@ values. We resolve this by using the following modified steps:
|
|||||||
```
|
```
|
||||||
|
|
||||||
The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4
|
The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4
|
||||||
and E.5 in the paper) preserves *gcd(f, g)*. However, there's no proof that the modified algorithm
|
and E.5 in the paper) preserves _gcd(f, g)_. However, there's no proof that the modified algorithm
|
||||||
will converge. The justification for posdivsteps is completely empirical: in practice, it appears
|
will converge. The justification for posdivsteps is completely empirical: in practice, it appears
|
||||||
that the vast majority of nonzero inputs converge to *f=g=gcd(f<sub>0</sub>, g<sub>0</sub>)* in a
|
that the vast majority of nonzero inputs converge to _f=g=gcd(f<sub>0</sub>, g<sub>0</sub>)_ in a
|
||||||
number of steps proportional to their logarithm.
|
number of steps proportional to their logarithm.
|
||||||
|
|
||||||
Note that:
|
Note that:
|
||||||
- We require inputs to satisfy *gcd(x, M) = 1*, as otherwise *f=1* is not reached.
|
|
||||||
- We require inputs *x &neq; 0*, because applying posdivstep with *g=0* has no effect.
|
- We require inputs to satisfy _gcd(x, M) = 1_, as otherwise _f=1_ is not reached.
|
||||||
- We need to update the termination condition from *g=0* to *f=1*.
|
- We require inputs _x &neq; 0_, because applying posdivstep with _g=0_ has no effect.
|
||||||
|
- We need to update the termination condition from _g=0_ to _f=1_.
|
||||||
|
|
||||||
We account for the possibility of nonconvergence by only performing a bounded number of
|
We account for the possibility of nonconvergence by only performing a bounded number of
|
||||||
posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not
|
posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not
|
||||||
@@ -815,5 +814,5 @@ yet been found.
|
|||||||
|
|
||||||
The optimizations in sections 3-7 above are described in the context of the original divsteps, but
|
The optimizations in sections 3-7 above are described in the context of the original divsteps, but
|
||||||
in the C implementation we also adapt most of them (not including "avoiding modulus operations",
|
in the C implementation we also adapt most of them (not including "avoiding modulus operations",
|
||||||
since it's not necessary to track *d, e*, and "constant-time operation", since we never calculate
|
since it's not necessary to track _d, e_, and "constant-time operation", since we never calculate
|
||||||
Jacobi symbols for secret data) to the posdivsteps version.
|
Jacobi symbols for secret data) to the posdivsteps version.
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -23,7 +23,7 @@
|
|||||||
#include <xrpl/basics/Resolver.h>
|
#include <xrpl/basics/Resolver.h>
|
||||||
#include <xrpl/beast/utility/Journal.h>
|
#include <xrpl/beast/utility/Journal.h>
|
||||||
|
|
||||||
#include <boost/asio/io_context.hpp>
|
#include <boost/asio/io_service.hpp>
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ public:
|
|||||||
explicit ResolverAsio() = default;
|
explicit ResolverAsio() = default;
|
||||||
|
|
||||||
static std::unique_ptr<ResolverAsio>
|
static std::unique_ptr<ResolverAsio>
|
||||||
New(boost::asio::io_context&, beast::Journal);
|
New(boost::asio::io_service&, beast::Journal);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
|
|||||||
@@ -23,8 +23,7 @@
|
|||||||
#include <xrpl/beast/utility/instrumentation.h>
|
#include <xrpl/beast/utility/instrumentation.h>
|
||||||
|
|
||||||
#include <boost/asio/basic_waitable_timer.hpp>
|
#include <boost/asio/basic_waitable_timer.hpp>
|
||||||
#include <boost/asio/io_context.hpp>
|
#include <boost/asio/io_service.hpp>
|
||||||
#include <boost/asio/post.hpp>
|
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
@@ -33,7 +32,7 @@
|
|||||||
|
|
||||||
namespace beast {
|
namespace beast {
|
||||||
|
|
||||||
/** Measures handler latency on an io_context queue. */
|
/** Measures handler latency on an io_service queue. */
|
||||||
template <class Clock>
|
template <class Clock>
|
||||||
class io_latency_probe
|
class io_latency_probe
|
||||||
{
|
{
|
||||||
@@ -45,12 +44,12 @@ private:
|
|||||||
std::condition_variable_any m_cond;
|
std::condition_variable_any m_cond;
|
||||||
std::size_t m_count;
|
std::size_t m_count;
|
||||||
duration const m_period;
|
duration const m_period;
|
||||||
boost::asio::io_context& m_ios;
|
boost::asio::io_service& m_ios;
|
||||||
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
|
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
|
||||||
bool m_cancel;
|
bool m_cancel;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
io_latency_probe(duration const& period, boost::asio::io_context& ios)
|
io_latency_probe(duration const& period, boost::asio::io_service& ios)
|
||||||
: m_count(1)
|
: m_count(1)
|
||||||
, m_period(period)
|
, m_period(period)
|
||||||
, m_ios(ios)
|
, m_ios(ios)
|
||||||
@@ -65,16 +64,16 @@ public:
|
|||||||
cancel(lock, true);
|
cancel(lock, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Return the io_context associated with the latency probe. */
|
/** Return the io_service associated with the latency probe. */
|
||||||
/** @{ */
|
/** @{ */
|
||||||
boost::asio::io_context&
|
boost::asio::io_service&
|
||||||
get_io_context()
|
get_io_service()
|
||||||
{
|
{
|
||||||
return m_ios;
|
return m_ios;
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::asio::io_context const&
|
boost::asio::io_service const&
|
||||||
get_io_context() const
|
get_io_service() const
|
||||||
{
|
{
|
||||||
return m_ios;
|
return m_ios;
|
||||||
}
|
}
|
||||||
@@ -110,10 +109,8 @@ public:
|
|||||||
std::lock_guard lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
if (m_cancel)
|
if (m_cancel)
|
||||||
throw std::logic_error("io_latency_probe is canceled");
|
throw std::logic_error("io_latency_probe is canceled");
|
||||||
boost::asio::post(
|
m_ios.post(sample_op<Handler>(
|
||||||
m_ios,
|
std::forward<Handler>(handler), Clock::now(), false, this));
|
||||||
sample_op<Handler>(
|
|
||||||
std::forward<Handler>(handler), Clock::now(), false, this));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Initiate continuous i/o latency sampling.
|
/** Initiate continuous i/o latency sampling.
|
||||||
@@ -127,10 +124,8 @@ public:
|
|||||||
std::lock_guard lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
if (m_cancel)
|
if (m_cancel)
|
||||||
throw std::logic_error("io_latency_probe is canceled");
|
throw std::logic_error("io_latency_probe is canceled");
|
||||||
boost::asio::post(
|
m_ios.post(sample_op<Handler>(
|
||||||
m_ios,
|
std::forward<Handler>(handler), Clock::now(), true, this));
|
||||||
sample_op<Handler>(
|
|
||||||
std::forward<Handler>(handler), Clock::now(), true, this));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@@ -241,13 +236,12 @@ private:
|
|||||||
// The latency is too high to maintain the desired
|
// The latency is too high to maintain the desired
|
||||||
// period so don't bother with a timer.
|
// period so don't bother with a timer.
|
||||||
//
|
//
|
||||||
boost::asio::post(
|
m_probe->m_ios.post(
|
||||||
m_probe->m_ios,
|
|
||||||
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
m_probe->m_timer.expires_after(when - now);
|
m_probe->m_timer.expires_from_now(when - now);
|
||||||
m_probe->m_timer.async_wait(
|
m_probe->m_timer.async_wait(
|
||||||
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
||||||
}
|
}
|
||||||
@@ -260,8 +254,7 @@ private:
|
|||||||
if (!m_probe)
|
if (!m_probe)
|
||||||
return;
|
return;
|
||||||
typename Clock::time_point const now(Clock::now());
|
typename Clock::time_point const now(Clock::now());
|
||||||
boost::asio::post(
|
m_probe->m_ios.post(
|
||||||
m_probe->m_ios,
|
|
||||||
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -8,11 +8,9 @@
|
|||||||
#ifndef BEAST_TEST_YIELD_TO_HPP
|
#ifndef BEAST_TEST_YIELD_TO_HPP
|
||||||
#define BEAST_TEST_YIELD_TO_HPP
|
#define BEAST_TEST_YIELD_TO_HPP
|
||||||
|
|
||||||
#include <boost/asio/executor_work_guard.hpp>
|
#include <boost/asio/io_service.hpp>
|
||||||
#include <boost/asio/io_context.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
#include <boost/thread/csbl/memory/allocator_arg.hpp>
|
|
||||||
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
@@ -31,12 +29,10 @@ namespace test {
|
|||||||
class enable_yield_to
|
class enable_yield_to
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
boost::asio::io_context ios_;
|
boost::asio::io_service ios_;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
boost::optional<boost::asio::executor_work_guard<
|
boost::optional<boost::asio::io_service::work> work_;
|
||||||
boost::asio::io_context::executor_type>>
|
|
||||||
work_;
|
|
||||||
std::vector<std::thread> threads_;
|
std::vector<std::thread> threads_;
|
||||||
std::mutex m_;
|
std::mutex m_;
|
||||||
std::condition_variable cv_;
|
std::condition_variable cv_;
|
||||||
@@ -46,8 +42,7 @@ public:
|
|||||||
/// The type of yield context passed to functions.
|
/// The type of yield context passed to functions.
|
||||||
using yield_context = boost::asio::yield_context;
|
using yield_context = boost::asio::yield_context;
|
||||||
|
|
||||||
explicit enable_yield_to(std::size_t concurrency = 1)
|
explicit enable_yield_to(std::size_t concurrency = 1) : work_(ios_)
|
||||||
: work_(boost::asio::make_work_guard(ios_))
|
|
||||||
{
|
{
|
||||||
threads_.reserve(concurrency);
|
threads_.reserve(concurrency);
|
||||||
while (concurrency--)
|
while (concurrency--)
|
||||||
@@ -61,9 +56,9 @@ public:
|
|||||||
t.join();
|
t.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the `io_context` associated with the object
|
/// Return the `io_service` associated with the object
|
||||||
boost::asio::io_context&
|
boost::asio::io_service&
|
||||||
get_io_context()
|
get_io_service()
|
||||||
{
|
{
|
||||||
return ios_;
|
return ios_;
|
||||||
}
|
}
|
||||||
@@ -116,18 +111,13 @@ enable_yield_to::spawn(F0&& f, FN&&... fn)
|
|||||||
{
|
{
|
||||||
boost::asio::spawn(
|
boost::asio::spawn(
|
||||||
ios_,
|
ios_,
|
||||||
boost::allocator_arg,
|
|
||||||
boost::context::fixedsize_stack(2 * 1024 * 1024),
|
|
||||||
[&](yield_context yield) {
|
[&](yield_context yield) {
|
||||||
f(yield);
|
f(yield);
|
||||||
std::lock_guard lock{m_};
|
std::lock_guard lock{m_};
|
||||||
if (--running_ == 0)
|
if (--running_ == 0)
|
||||||
cv_.notify_all();
|
cv_.notify_all();
|
||||||
},
|
},
|
||||||
[](std::exception_ptr e) {
|
boost::coroutines::attributes(2 * 1024 * 1024));
|
||||||
if (e)
|
|
||||||
std::rethrow_exception(e);
|
|
||||||
});
|
|
||||||
spawn(fn...);
|
spawn(fn...);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -217,7 +217,7 @@ Reader::parse(Value& root, BufferSequence const& bs)
|
|||||||
std::string s;
|
std::string s;
|
||||||
s.reserve(buffer_size(bs));
|
s.reserve(buffer_size(bs));
|
||||||
for (auto const& b : bs)
|
for (auto const& b : bs)
|
||||||
s.append(static_cast<char const*>(b.data()), buffer_size(b));
|
s.append(buffer_cast<char const*>(b), buffer_size(b));
|
||||||
return parse(s, root);
|
return parse(s, root);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,81 +6,89 @@ option java_multiple_files = true;
|
|||||||
|
|
||||||
import "org/xrpl/rpc/v1/ledger.proto";
|
import "org/xrpl/rpc/v1/ledger.proto";
|
||||||
|
|
||||||
message GetLedgerRequest {
|
message GetLedgerRequest
|
||||||
LedgerSpecifier ledger = 1;
|
{
|
||||||
|
|
||||||
// If true, include transactions contained in this ledger
|
LedgerSpecifier ledger = 1;
|
||||||
bool transactions = 2;
|
|
||||||
|
|
||||||
// If true and transactions, include full transactions and metadata
|
// If true, include transactions contained in this ledger
|
||||||
// If false and transactions, include only transaction hashes
|
bool transactions = 2;
|
||||||
bool expand = 3;
|
|
||||||
|
|
||||||
// If true, include state map difference between this ledger and the
|
// If true and transactions, include full transactions and metadata
|
||||||
// previous ledger. This includes all added, modified or deleted ledger
|
// If false and transactions, include only transaction hashes
|
||||||
// objects
|
bool expand = 3;
|
||||||
bool get_objects = 4;
|
|
||||||
|
|
||||||
// If the request needs to be forwarded from a reporting node to a p2p node,
|
// If true, include state map difference between this ledger and the
|
||||||
// the reporting node will set this field. Clients should not set this
|
// previous ledger. This includes all added, modified or deleted ledger
|
||||||
// field.
|
// objects
|
||||||
string client_ip = 5;
|
bool get_objects = 4;
|
||||||
|
|
||||||
|
// If the request needs to be forwarded from a reporting node to a p2p node,
|
||||||
|
// the reporting node will set this field. Clients should not set this
|
||||||
|
// field.
|
||||||
|
string client_ip = 5;
|
||||||
|
|
||||||
// Identifying string. If user is set, client_ip is not set, and request is
|
// Identifying string. If user is set, client_ip is not set, and request is
|
||||||
// coming from a secure_gateway host, then the client is not subject to
|
// coming from a secure_gateway host, then the client is not subject to
|
||||||
// resource controls
|
// resource controls
|
||||||
string user = 6;
|
string user = 6;
|
||||||
|
|
||||||
// For every object in the diff, get the object's predecessor and successor
|
// For every object in the diff, get the object's predecessor and successor
|
||||||
// in the state map. Only used if get_objects is also true.
|
// in the state map. Only used if get_objects is also true.
|
||||||
bool get_object_neighbors = 7;
|
bool get_object_neighbors = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetLedgerResponse {
|
message GetLedgerResponse
|
||||||
bytes ledger_header = 1;
|
{
|
||||||
|
bytes ledger_header = 1;
|
||||||
|
|
||||||
oneof transactions {
|
oneof transactions
|
||||||
// Just the hashes
|
{
|
||||||
TransactionHashList hashes_list = 2;
|
// Just the hashes
|
||||||
|
TransactionHashList hashes_list = 2;
|
||||||
|
|
||||||
|
// Full transactions and metadata
|
||||||
|
TransactionAndMetadataList transactions_list = 3;
|
||||||
|
}
|
||||||
|
|
||||||
// Full transactions and metadata
|
// True if the ledger has been validated
|
||||||
TransactionAndMetadataList transactions_list = 3;
|
bool validated = 4;
|
||||||
}
|
|
||||||
|
|
||||||
// True if the ledger has been validated
|
// State map difference between this ledger and the previous ledger
|
||||||
bool validated = 4;
|
RawLedgerObjects ledger_objects = 5;
|
||||||
|
|
||||||
// State map difference between this ledger and the previous ledger
|
// True if the skiplist object is included in ledger_objects
|
||||||
RawLedgerObjects ledger_objects = 5;
|
bool skiplist_included = 6;
|
||||||
|
|
||||||
// True if the skiplist object is included in ledger_objects
|
// True if request was exempt from resource controls
|
||||||
bool skiplist_included = 6;
|
bool is_unlimited = 7;
|
||||||
|
|
||||||
// True if request was exempt from resource controls
|
// True if the response contains the state map diff
|
||||||
bool is_unlimited = 7;
|
bool objects_included = 8;
|
||||||
|
|
||||||
// True if the response contains the state map diff
|
// True if the response contains key of objects adjacent to objects in state
|
||||||
bool objects_included = 8;
|
// map diff
|
||||||
|
bool object_neighbors_included = 9;
|
||||||
|
|
||||||
// True if the response contains key of objects adjacent to objects in state
|
|
||||||
// map diff
|
|
||||||
bool object_neighbors_included = 9;
|
|
||||||
|
|
||||||
// Successor information for book directories modified as part of this
|
// Successor information for book directories modified as part of this
|
||||||
// ledger
|
// ledger
|
||||||
repeated BookSuccessor book_successors = 10;
|
repeated BookSuccessor book_successors = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TransactionHashList {
|
message TransactionHashList
|
||||||
repeated bytes hashes = 1;
|
{
|
||||||
|
repeated bytes hashes = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TransactionAndMetadata {
|
message TransactionAndMetadata
|
||||||
bytes transaction_blob = 1;
|
{
|
||||||
|
bytes transaction_blob = 1;
|
||||||
|
|
||||||
bytes metadata_blob = 2;
|
bytes metadata_blob = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TransactionAndMetadataList {
|
message TransactionAndMetadataList
|
||||||
repeated TransactionAndMetadata transactions = 1;
|
{
|
||||||
|
repeated TransactionAndMetadata transactions = 1;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,43 +8,46 @@ import "org/xrpl/rpc/v1/ledger.proto";
|
|||||||
|
|
||||||
// Get ledger objects for a specific ledger. You can iterate through several
|
// Get ledger objects for a specific ledger. You can iterate through several
|
||||||
// calls to retrieve the entire contents of a single ledger version.
|
// calls to retrieve the entire contents of a single ledger version.
|
||||||
message GetLedgerDataRequest {
|
message GetLedgerDataRequest
|
||||||
// If set, only objects with a key greater than marker are returned.
|
{
|
||||||
// This can be used to pick up where a previous call left off.
|
// If set, only objects with a key greater than marker are returned.
|
||||||
// Set marker to the value of marker in the previous response.
|
// This can be used to pick up where a previous call left off.
|
||||||
bytes marker = 1;
|
// Set marker to the value of marker in the previous response.
|
||||||
|
bytes marker = 1;
|
||||||
|
|
||||||
LedgerSpecifier ledger = 2;
|
LedgerSpecifier ledger = 2;
|
||||||
|
|
||||||
// If set, only objects with a key less than end_marker are returned
|
// If set, only objects with a key less than end_marker are returned
|
||||||
bytes end_marker = 3;
|
bytes end_marker = 3;
|
||||||
|
|
||||||
// If the request needs to be forwarded from a reporting node to a p2p node,
|
// If the request needs to be forwarded from a reporting node to a p2p node,
|
||||||
// the reporting node will set this field. Clients should not set this
|
// the reporting node will set this field. Clients should not set this
|
||||||
// field.
|
// field.
|
||||||
string client_ip = 4;
|
string client_ip = 4;
|
||||||
|
|
||||||
// Identifying string. If user is set, client_ip is not set, and request is
|
// Identifying string. If user is set, client_ip is not set, and request is
|
||||||
// coming from a secure_gateway host, then the client is not subject to
|
// coming from a secure_gateway host, then the client is not subject to
|
||||||
// resource controls
|
// resource controls
|
||||||
string user = 6;
|
string user = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetLedgerDataResponse {
|
message GetLedgerDataResponse
|
||||||
// Sequence of the ledger containing the returned ledger objects
|
{
|
||||||
uint32 ledger_index = 1;
|
// Sequence of the ledger containing the returned ledger objects
|
||||||
|
uint32 ledger_index = 1;
|
||||||
|
|
||||||
// Hash of the ledger containing the returned ledger objects
|
// Hash of the ledger containing the returned ledger objects
|
||||||
bytes ledger_hash = 2;
|
bytes ledger_hash = 2;
|
||||||
|
|
||||||
|
// Ledger objects
|
||||||
|
RawLedgerObjects ledger_objects = 3;
|
||||||
|
|
||||||
// Ledger objects
|
// Key to be passed into a subsequent call to continue iteration. If not
|
||||||
RawLedgerObjects ledger_objects = 3;
|
// set, there are no more objects left in the ledger, or no more objects
|
||||||
|
// with key less than end_marker (if end_marker was set in the request)
|
||||||
|
bytes marker = 4;
|
||||||
|
|
||||||
// Key to be passed into a subsequent call to continue iteration. If not
|
// True if request was exempt from resource controls
|
||||||
// set, there are no more objects left in the ledger, or no more objects
|
bool is_unlimited = 7;
|
||||||
// with key less than end_marker (if end_marker was set in the request)
|
|
||||||
bytes marker = 4;
|
|
||||||
|
|
||||||
// True if request was exempt from resource controls
|
|
||||||
bool is_unlimited = 7;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,23 +6,27 @@ option java_multiple_files = true;
|
|||||||
|
|
||||||
import "org/xrpl/rpc/v1/ledger.proto";
|
import "org/xrpl/rpc/v1/ledger.proto";
|
||||||
|
|
||||||
|
|
||||||
// Get the state map difference between the two specified ledgers
|
// Get the state map difference between the two specified ledgers
|
||||||
message GetLedgerDiffRequest {
|
message GetLedgerDiffRequest
|
||||||
LedgerSpecifier base_ledger = 1;
|
{
|
||||||
|
LedgerSpecifier base_ledger = 1;
|
||||||
|
|
||||||
LedgerSpecifier desired_ledger = 2;
|
LedgerSpecifier desired_ledger = 2;
|
||||||
|
|
||||||
// If true, include the full ledger object. If false, only keys are included.
|
// If true, include the full ledger object. If false, only keys are included.
|
||||||
bool include_blobs = 3;
|
bool include_blobs = 3;
|
||||||
|
|
||||||
// If the request needs to be forwarded from a reporting node to a p2p node,
|
// If the request needs to be forwarded from a reporting node to a p2p node,
|
||||||
// the reporting node will set this field. Clients should not set this
|
// the reporting node will set this field. Clients should not set this
|
||||||
// field.
|
// field.
|
||||||
string client_ip = 4;
|
string client_ip = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetLedgerDiffResponse {
|
message GetLedgerDiffResponse
|
||||||
// All ledger objects that were added, modified or deleted between
|
{
|
||||||
// base_ledger and desired_ledger
|
// All ledger objects that were added, modified or deleted between
|
||||||
RawLedgerObjects ledger_objects = 1;
|
// base_ledger and desired_ledger
|
||||||
|
RawLedgerObjects ledger_objects = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,23 +7,25 @@ option java_multiple_files = true;
|
|||||||
import "org/xrpl/rpc/v1/ledger.proto";
|
import "org/xrpl/rpc/v1/ledger.proto";
|
||||||
|
|
||||||
// Get a single ledger object
|
// Get a single ledger object
|
||||||
message GetLedgerEntryRequest {
|
message GetLedgerEntryRequest
|
||||||
// Key of the desired object
|
{
|
||||||
bytes key = 1;
|
// Key of the desired object
|
||||||
|
bytes key = 1;
|
||||||
|
|
||||||
// Ledger containing the object
|
// Ledger containing the object
|
||||||
LedgerSpecifier ledger = 2;
|
LedgerSpecifier ledger = 2;
|
||||||
|
|
||||||
// If the request needs to be forwarded from a reporting node to a p2p node,
|
// If the request needs to be forwarded from a reporting node to a p2p node,
|
||||||
// the reporting node will set this field. Clients should not set this
|
// the reporting node will set this field. Clients should not set this
|
||||||
// field.
|
// field.
|
||||||
string client_ip = 3;
|
string client_ip = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetLedgerEntryResponse {
|
message GetLedgerEntryResponse
|
||||||
RawLedgerObject ledger_object = 1;
|
{
|
||||||
|
RawLedgerObject ledger_object = 1;
|
||||||
|
|
||||||
// Ledger containing the object. Will match the value specified in the
|
// Ledger containing the object. Will match the value specified in the
|
||||||
// request.
|
// request.
|
||||||
LedgerSpecifier ledger = 2;
|
LedgerSpecifier ledger = 2;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,61 +5,71 @@ option java_package = "org.xrpl.rpc.v1";
|
|||||||
option java_multiple_files = true;
|
option java_multiple_files = true;
|
||||||
|
|
||||||
// Next field: 4
|
// Next field: 4
|
||||||
message LedgerSpecifier {
|
message LedgerSpecifier
|
||||||
// Next field: 4
|
{
|
||||||
enum Shortcut {
|
// Next field: 4
|
||||||
SHORTCUT_UNSPECIFIED = 0;
|
enum Shortcut
|
||||||
SHORTCUT_VALIDATED = 1;
|
{
|
||||||
SHORTCUT_CLOSED = 2;
|
SHORTCUT_UNSPECIFIED = 0;
|
||||||
SHORTCUT_CURRENT = 3;
|
SHORTCUT_VALIDATED = 1;
|
||||||
}
|
SHORTCUT_CLOSED = 2;
|
||||||
|
SHORTCUT_CURRENT = 3;
|
||||||
|
}
|
||||||
|
|
||||||
oneof ledger {
|
oneof ledger
|
||||||
Shortcut shortcut = 1;
|
{
|
||||||
uint32 sequence = 2;
|
Shortcut shortcut = 1;
|
||||||
// 32 bytes
|
uint32 sequence = 2;
|
||||||
bytes hash = 3;
|
// 32 bytes
|
||||||
}
|
bytes hash = 3;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Next field: 3
|
// Next field: 3
|
||||||
message RawLedgerObject {
|
message RawLedgerObject
|
||||||
// Raw data of the ledger object. In GetLedgerResponse and
|
{
|
||||||
// GetLedgerDiffResponse, data will be empty if the object was deleted.
|
// Raw data of the ledger object. In GetLedgerResponse and
|
||||||
bytes data = 1;
|
// GetLedgerDiffResponse, data will be empty if the object was deleted.
|
||||||
|
bytes data = 1;
|
||||||
|
|
||||||
// Key of the ledger object
|
// Key of the ledger object
|
||||||
bytes key = 2;
|
bytes key = 2;
|
||||||
|
|
||||||
enum ModificationType {
|
enum ModificationType
|
||||||
UNSPECIFIED = 0;
|
{
|
||||||
CREATED = 1;
|
UNSPECIFIED = 0;
|
||||||
MODIFIED = 2;
|
CREATED = 1;
|
||||||
DELETED = 3;
|
MODIFIED = 2;
|
||||||
}
|
DELETED = 3;
|
||||||
|
}
|
||||||
|
|
||||||
// Whether the object was created, modified or deleted
|
// Whether the object was created, modified or deleted
|
||||||
ModificationType mod_type = 3;
|
ModificationType mod_type = 3;
|
||||||
|
|
||||||
// Key of the object preceding this object in the desired ledger
|
// Key of the object preceding this object in the desired ledger
|
||||||
bytes predecessor = 4;
|
bytes predecessor = 4;
|
||||||
|
|
||||||
// Key of the object succeeding this object in the desired ledger
|
// Key of the object succeeding this object in the desired ledger
|
||||||
bytes successor = 5;
|
bytes successor = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message RawLedgerObjects {
|
message RawLedgerObjects
|
||||||
repeated RawLedgerObject objects = 1;
|
{
|
||||||
|
repeated RawLedgerObject objects = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Successor information for book directories. The book base is (usually) not
|
// Successor information for book directories. The book base is (usually) not
|
||||||
// an actual object, yet we need to be able to ask for the successor to the
|
// an actual object, yet we need to be able to ask for the successor to the
|
||||||
// book base.
|
// book base.
|
||||||
message BookSuccessor {
|
message BookSuccessor {
|
||||||
// Base of the book in question
|
|
||||||
bytes book_base = 1;
|
|
||||||
|
|
||||||
// First book directory in the book. An empty value here means the entire
|
// Base of the book in question
|
||||||
// book is deleted
|
bytes book_base = 1;
|
||||||
bytes first_book = 2;
|
|
||||||
|
// First book directory in the book. An empty value here means the entire
|
||||||
|
// book is deleted
|
||||||
|
bytes first_book = 2;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -9,11 +9,13 @@ import "org/xrpl/rpc/v1/get_ledger_entry.proto";
|
|||||||
import "org/xrpl/rpc/v1/get_ledger_data.proto";
|
import "org/xrpl/rpc/v1/get_ledger_data.proto";
|
||||||
import "org/xrpl/rpc/v1/get_ledger_diff.proto";
|
import "org/xrpl/rpc/v1/get_ledger_diff.proto";
|
||||||
|
|
||||||
|
|
||||||
// These methods are binary only methods for retrieiving arbitrary ledger state
|
// These methods are binary only methods for retrieiving arbitrary ledger state
|
||||||
// via gRPC. These methods are used by clio, but can also be
|
// via gRPC. These methods are used by clio, but can also be
|
||||||
// used by any client that wants to extract ledger state in an efficient manner.
|
// used by any client that wants to extract ledger state in an efficient manner.
|
||||||
// They do not directly mimic the JSON equivalent methods.
|
// They do not directly mimic the JSON equivalent methods.
|
||||||
service XRPLedgerAPIService {
|
service XRPLedgerAPIService {
|
||||||
|
|
||||||
// Get a specific ledger, optionally including transactions and any modified,
|
// Get a specific ledger, optionally including transactions and any modified,
|
||||||
// added or deleted ledger objects
|
// added or deleted ledger objects
|
||||||
rpc GetLedger(GetLedgerRequest) returns (GetLedgerResponse);
|
rpc GetLedger(GetLedgerRequest) returns (GetLedgerResponse);
|
||||||
@@ -27,4 +29,5 @@ service XRPLedgerAPIService {
|
|||||||
// Get all ledger objects that are different between the two specified
|
// Get all ledger objects that are different between the two specified
|
||||||
// ledgers. Note, this method has no JSON equivalent.
|
// ledgers. Note, this method has no JSON equivalent.
|
||||||
rpc GetLedgerDiff(GetLedgerDiffRequest) returns (GetLedgerDiffResponse);
|
rpc GetLedgerDiff(GetLedgerDiffRequest) returns (GetLedgerDiffResponse);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,28 +4,29 @@ package protocol;
|
|||||||
// Unused numbers in the list below may have been used previously. Please don't
|
// Unused numbers in the list below may have been used previously. Please don't
|
||||||
// reassign them for reuse unless you are 100% certain that there won't be a
|
// reassign them for reuse unless you are 100% certain that there won't be a
|
||||||
// conflict. Even if you're sure, it's probably best to assign a new type.
|
// conflict. Even if you're sure, it's probably best to assign a new type.
|
||||||
enum MessageType {
|
enum MessageType
|
||||||
mtMANIFESTS = 2;
|
{
|
||||||
mtPING = 3;
|
mtMANIFESTS = 2;
|
||||||
mtCLUSTER = 5;
|
mtPING = 3;
|
||||||
mtENDPOINTS = 15;
|
mtCLUSTER = 5;
|
||||||
mtTRANSACTION = 30;
|
mtENDPOINTS = 15;
|
||||||
mtGET_LEDGER = 31;
|
mtTRANSACTION = 30;
|
||||||
mtLEDGER_DATA = 32;
|
mtGET_LEDGER = 31;
|
||||||
mtPROPOSE_LEDGER = 33;
|
mtLEDGER_DATA = 32;
|
||||||
mtSTATUS_CHANGE = 34;
|
mtPROPOSE_LEDGER = 33;
|
||||||
mtHAVE_SET = 35;
|
mtSTATUS_CHANGE = 34;
|
||||||
mtVALIDATION = 41;
|
mtHAVE_SET = 35;
|
||||||
mtGET_OBJECTS = 42;
|
mtVALIDATION = 41;
|
||||||
mtVALIDATORLIST = 54;
|
mtGET_OBJECTS = 42;
|
||||||
mtSQUELCH = 55;
|
mtVALIDATORLIST = 54;
|
||||||
mtVALIDATORLISTCOLLECTION = 56;
|
mtSQUELCH = 55;
|
||||||
mtPROOF_PATH_REQ = 57;
|
mtVALIDATORLISTCOLLECTION = 56;
|
||||||
mtPROOF_PATH_RESPONSE = 58;
|
mtPROOF_PATH_REQ = 57;
|
||||||
mtREPLAY_DELTA_REQ = 59;
|
mtPROOF_PATH_RESPONSE = 58;
|
||||||
mtREPLAY_DELTA_RESPONSE = 60;
|
mtREPLAY_DELTA_REQ = 59;
|
||||||
mtHAVE_TRANSACTIONS = 63;
|
mtREPLAY_DELTA_RESPONSE = 60;
|
||||||
mtTRANSACTIONS = 64;
|
mtHAVE_TRANSACTIONS = 63;
|
||||||
|
mtTRANSACTIONS = 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
// token, iterations, target, challenge = issue demand for proof of work
|
// token, iterations, target, challenge = issue demand for proof of work
|
||||||
@@ -35,309 +36,352 @@ enum MessageType {
|
|||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
/* Provides the current ephemeral key for a validator. */
|
/* Provides the current ephemeral key for a validator. */
|
||||||
message TMManifest {
|
message TMManifest
|
||||||
// A Manifest object in the Ripple serialization format.
|
{
|
||||||
required bytes stobject = 1;
|
// A Manifest object in the Ripple serialization format.
|
||||||
|
required bytes stobject = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMManifests {
|
message TMManifests
|
||||||
repeated TMManifest list = 1;
|
{
|
||||||
|
repeated TMManifest list = 1;
|
||||||
|
|
||||||
// The manifests sent when a peer first connects to another peer are `history`.
|
// The manifests sent when a peer first connects to another peer are `history`.
|
||||||
optional bool history = 2 [deprecated = true];
|
optional bool history = 2 [deprecated=true];
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
// The status of a node in our cluster
|
// The status of a node in our cluster
|
||||||
message TMClusterNode {
|
message TMClusterNode
|
||||||
required string publicKey = 1;
|
{
|
||||||
required uint32 reportTime = 2;
|
required string publicKey = 1;
|
||||||
required uint32 nodeLoad = 3;
|
required uint32 reportTime = 2;
|
||||||
optional string nodeName = 4;
|
required uint32 nodeLoad = 3;
|
||||||
optional string address = 5;
|
optional string nodeName = 4;
|
||||||
|
optional string address = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sources that are placing load on the server
|
// Sources that are placing load on the server
|
||||||
message TMLoadSource {
|
message TMLoadSource
|
||||||
required string name = 1;
|
{
|
||||||
required uint32 cost = 2;
|
required string name = 1;
|
||||||
optional uint32 count = 3; // number of connections
|
required uint32 cost = 2;
|
||||||
|
optional uint32 count = 3; // number of connections
|
||||||
}
|
}
|
||||||
|
|
||||||
// The status of all nodes in the cluster
|
// The status of all nodes in the cluster
|
||||||
message TMCluster {
|
message TMCluster
|
||||||
repeated TMClusterNode clusterNodes = 1;
|
{
|
||||||
repeated TMLoadSource loadSources = 2;
|
repeated TMClusterNode clusterNodes = 1;
|
||||||
|
repeated TMLoadSource loadSources = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node public key
|
// Node public key
|
||||||
message TMLink {
|
message TMLink
|
||||||
required bytes nodePubKey = 1 [deprecated = true]; // node public key
|
{
|
||||||
|
required bytes nodePubKey = 1 [deprecated=true]; // node public key
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer public key
|
// Peer public key
|
||||||
message TMPublicKey {
|
message TMPublicKey
|
||||||
required bytes publicKey = 1;
|
{
|
||||||
|
required bytes publicKey = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// A transaction can have only one input and one output.
|
// A transaction can have only one input and one output.
|
||||||
// If you want to send an amount that is greater than any single address of yours
|
// If you want to send an amount that is greater than any single address of yours
|
||||||
// you must first combine coins from one address to another.
|
// you must first combine coins from one address to another.
|
||||||
|
|
||||||
enum TransactionStatus {
|
enum TransactionStatus
|
||||||
tsNEW = 1; // origin node did/could not validate
|
{
|
||||||
tsCURRENT = 2; // scheduled to go in this ledger
|
tsNEW = 1; // origin node did/could not validate
|
||||||
tsCOMMITED = 3; // in a closed ledger
|
tsCURRENT = 2; // scheduled to go in this ledger
|
||||||
tsREJECT_CONFLICT = 4;
|
tsCOMMITED = 3; // in a closed ledger
|
||||||
tsREJECT_INVALID = 5;
|
tsREJECT_CONFLICT = 4;
|
||||||
tsREJECT_FUNDS = 6;
|
tsREJECT_INVALID = 5;
|
||||||
tsHELD_SEQ = 7;
|
tsREJECT_FUNDS = 6;
|
||||||
tsHELD_LEDGER = 8; // held for future ledger
|
tsHELD_SEQ = 7;
|
||||||
|
tsHELD_LEDGER = 8; // held for future ledger
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMTransaction {
|
message TMTransaction
|
||||||
required bytes rawTransaction = 1;
|
{
|
||||||
required TransactionStatus status = 2;
|
required bytes rawTransaction = 1;
|
||||||
optional uint64 receiveTimestamp = 3;
|
required TransactionStatus status = 2;
|
||||||
optional bool deferred = 4; // not applied to open ledger
|
optional uint64 receiveTimestamp = 3;
|
||||||
|
optional bool deferred = 4; // not applied to open ledger
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMTransactions {
|
message TMTransactions
|
||||||
repeated TMTransaction transactions = 1;
|
{
|
||||||
|
repeated TMTransaction transactions = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum NodeStatus {
|
|
||||||
nsCONNECTING = 1; // acquiring connections
|
enum NodeStatus
|
||||||
nsCONNECTED = 2; // convinced we are connected to the real network
|
{
|
||||||
nsMONITORING = 3; // we know what the previous ledger is
|
nsCONNECTING = 1; // acquiring connections
|
||||||
nsVALIDATING = 4; // we have the full ledger contents
|
nsCONNECTED = 2; // convinced we are connected to the real network
|
||||||
nsSHUTTING = 5; // node is shutting down
|
nsMONITORING = 3; // we know what the previous ledger is
|
||||||
|
nsVALIDATING = 4; // we have the full ledger contents
|
||||||
|
nsSHUTTING = 5; // node is shutting down
|
||||||
}
|
}
|
||||||
|
|
||||||
enum NodeEvent {
|
enum NodeEvent
|
||||||
neCLOSING_LEDGER = 1; // closing a ledger because its close time has come
|
{
|
||||||
neACCEPTED_LEDGER = 2; // accepting a closed ledger, we have finished computing it
|
neCLOSING_LEDGER = 1; // closing a ledger because its close time has come
|
||||||
neSWITCHED_LEDGER = 3; // changing due to network consensus
|
neACCEPTED_LEDGER = 2; // accepting a closed ledger, we have finished computing it
|
||||||
neLOST_SYNC = 4;
|
neSWITCHED_LEDGER = 3; // changing due to network consensus
|
||||||
|
neLOST_SYNC = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMStatusChange {
|
message TMStatusChange
|
||||||
optional NodeStatus newStatus = 1;
|
{
|
||||||
optional NodeEvent newEvent = 2;
|
optional NodeStatus newStatus = 1;
|
||||||
optional uint32 ledgerSeq = 3;
|
optional NodeEvent newEvent = 2;
|
||||||
optional bytes ledgerHash = 4;
|
optional uint32 ledgerSeq = 3;
|
||||||
optional bytes ledgerHashPrevious = 5;
|
optional bytes ledgerHash = 4;
|
||||||
optional uint64 networkTime = 6;
|
optional bytes ledgerHashPrevious = 5;
|
||||||
optional uint32 firstSeq = 7;
|
optional uint64 networkTime = 6;
|
||||||
optional uint32 lastSeq = 8;
|
optional uint32 firstSeq = 7;
|
||||||
|
optional uint32 lastSeq = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Announce to the network our position on a closing ledger
|
// Announce to the network our position on a closing ledger
|
||||||
message TMProposeSet {
|
message TMProposeSet
|
||||||
required uint32 proposeSeq = 1;
|
{
|
||||||
required bytes currentTxHash = 2; // the hash of the ledger we are proposing
|
required uint32 proposeSeq = 1;
|
||||||
required bytes nodePubKey = 3;
|
required bytes currentTxHash = 2; // the hash of the ledger we are proposing
|
||||||
required uint32 closeTime = 4;
|
required bytes nodePubKey = 3;
|
||||||
required bytes signature = 5; // signature of above fields
|
required uint32 closeTime = 4;
|
||||||
required bytes previousledger = 6;
|
required bytes signature = 5; // signature of above fields
|
||||||
repeated bytes addedTransactions = 10; // not required if number is large
|
required bytes previousledger = 6;
|
||||||
repeated bytes removedTransactions = 11; // not required if number is large
|
repeated bytes addedTransactions = 10; // not required if number is large
|
||||||
|
repeated bytes removedTransactions = 11; // not required if number is large
|
||||||
|
|
||||||
// node vouches signature is correct
|
// node vouches signature is correct
|
||||||
optional bool checkedSignature = 7 [deprecated = true];
|
optional bool checkedSignature = 7 [deprecated=true];
|
||||||
|
|
||||||
// Number of hops traveled
|
// Number of hops traveled
|
||||||
optional uint32 hops = 12 [deprecated = true];
|
optional uint32 hops = 12 [deprecated=true];
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TxSetStatus {
|
enum TxSetStatus
|
||||||
tsHAVE = 1; // We have this set locally
|
{
|
||||||
tsCAN_GET = 2; // We have a peer with this set
|
tsHAVE = 1; // We have this set locally
|
||||||
tsNEED = 3; // We need this set and can't get it
|
tsCAN_GET = 2; // We have a peer with this set
|
||||||
|
tsNEED = 3; // We need this set and can't get it
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMHaveTransactionSet {
|
message TMHaveTransactionSet
|
||||||
required TxSetStatus status = 1;
|
{
|
||||||
required bytes hash = 2;
|
required TxSetStatus status = 1;
|
||||||
|
required bytes hash = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validator list (UNL)
|
// Validator list (UNL)
|
||||||
message TMValidatorList {
|
message TMValidatorList
|
||||||
required bytes manifest = 1;
|
{
|
||||||
required bytes blob = 2;
|
required bytes manifest = 1;
|
||||||
required bytes signature = 3;
|
required bytes blob = 2;
|
||||||
required uint32 version = 4;
|
required bytes signature = 3;
|
||||||
|
required uint32 version = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validator List v2
|
// Validator List v2
|
||||||
message ValidatorBlobInfo {
|
message ValidatorBlobInfo
|
||||||
optional bytes manifest = 1;
|
{
|
||||||
required bytes blob = 2;
|
optional bytes manifest = 1;
|
||||||
required bytes signature = 3;
|
required bytes blob = 2;
|
||||||
|
required bytes signature = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collection of Validator List v2 (UNL)
|
// Collection of Validator List v2 (UNL)
|
||||||
message TMValidatorListCollection {
|
message TMValidatorListCollection
|
||||||
required uint32 version = 1;
|
{
|
||||||
required bytes manifest = 2;
|
required uint32 version = 1;
|
||||||
repeated ValidatorBlobInfo blobs = 3;
|
required bytes manifest = 2;
|
||||||
|
repeated ValidatorBlobInfo blobs = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used to sign a final closed ledger after reprocessing
|
// Used to sign a final closed ledger after reprocessing
|
||||||
message TMValidation {
|
message TMValidation
|
||||||
// The serialized validation
|
{
|
||||||
required bytes validation = 1;
|
// The serialized validation
|
||||||
|
required bytes validation = 1;
|
||||||
|
|
||||||
// node vouches signature is correct
|
// node vouches signature is correct
|
||||||
optional bool checkedSignature = 2 [deprecated = true];
|
optional bool checkedSignature = 2 [deprecated = true];
|
||||||
|
|
||||||
// Number of hops traveled
|
// Number of hops traveled
|
||||||
optional uint32 hops = 3 [deprecated = true];
|
optional uint32 hops = 3 [deprecated = true];
|
||||||
}
|
}
|
||||||
|
|
||||||
// An array of Endpoint messages
|
// An array of Endpoint messages
|
||||||
message TMEndpoints {
|
message TMEndpoints
|
||||||
// Previously used - don't reuse.
|
{
|
||||||
reserved 2;
|
// Previously used - don't reuse.
|
||||||
|
reserved 2;
|
||||||
|
|
||||||
// This field is used to allow the TMEndpoints message format to be
|
// This field is used to allow the TMEndpoints message format to be
|
||||||
// modified as necessary in the future.
|
// modified as necessary in the future.
|
||||||
required uint32 version = 1;
|
required uint32 version = 1;
|
||||||
|
|
||||||
// An update to the Endpoint type that uses a string
|
// An update to the Endpoint type that uses a string
|
||||||
// to represent endpoints, thus allowing ipv6 or ipv4 addresses
|
// to represent endpoints, thus allowing ipv6 or ipv4 addresses
|
||||||
message TMEndpointv2 {
|
message TMEndpointv2
|
||||||
required string endpoint = 1;
|
{
|
||||||
required uint32 hops = 2;
|
required string endpoint = 1;
|
||||||
}
|
required uint32 hops = 2;
|
||||||
repeated TMEndpointv2 endpoints_v2 = 3;
|
}
|
||||||
|
repeated TMEndpointv2 endpoints_v2 = 3;
|
||||||
};
|
};
|
||||||
|
|
||||||
message TMIndexedObject {
|
message TMIndexedObject
|
||||||
optional bytes hash = 1;
|
{
|
||||||
optional bytes nodeID = 2;
|
optional bytes hash = 1;
|
||||||
optional bytes index = 3;
|
optional bytes nodeID = 2;
|
||||||
optional bytes data = 4;
|
optional bytes index = 3;
|
||||||
optional uint32 ledgerSeq = 5;
|
optional bytes data = 4;
|
||||||
|
optional uint32 ledgerSeq = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMGetObjectByHash {
|
message TMGetObjectByHash
|
||||||
enum ObjectType {
|
{
|
||||||
otUNKNOWN = 0;
|
enum ObjectType {
|
||||||
otLEDGER = 1;
|
otUNKNOWN = 0;
|
||||||
otTRANSACTION = 2;
|
otLEDGER = 1;
|
||||||
otTRANSACTION_NODE = 3;
|
otTRANSACTION = 2;
|
||||||
otSTATE_NODE = 4;
|
otTRANSACTION_NODE = 3;
|
||||||
otCAS_OBJECT = 5;
|
otSTATE_NODE = 4;
|
||||||
otFETCH_PACK = 6;
|
otCAS_OBJECT = 5;
|
||||||
otTRANSACTIONS = 7;
|
otFETCH_PACK = 6;
|
||||||
}
|
otTRANSACTIONS = 7;
|
||||||
|
}
|
||||||
|
|
||||||
required ObjectType type = 1;
|
required ObjectType type = 1;
|
||||||
required bool query = 2; // is this a query or a reply?
|
required bool query = 2; // is this a query or a reply?
|
||||||
optional uint32 seq = 3; // used to match replies to queries
|
optional uint32 seq = 3; // used to match replies to queries
|
||||||
optional bytes ledgerHash = 4; // the hash of the ledger these queries are for
|
optional bytes ledgerHash = 4; // the hash of the ledger these queries are for
|
||||||
optional bool fat = 5; // return related nodes
|
optional bool fat = 5; // return related nodes
|
||||||
repeated TMIndexedObject objects = 6; // the specific objects requested
|
repeated TMIndexedObject objects = 6; // the specific objects requested
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMLedgerNode {
|
|
||||||
required bytes nodedata = 1;
|
message TMLedgerNode
|
||||||
optional bytes nodeid = 2; // missing for ledger base data
|
{
|
||||||
|
required bytes nodedata = 1;
|
||||||
|
optional bytes nodeid = 2; // missing for ledger base data
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TMLedgerInfoType {
|
enum TMLedgerInfoType
|
||||||
liBASE = 0; // basic ledger info
|
{
|
||||||
liTX_NODE = 1; // transaction node
|
liBASE = 0; // basic ledger info
|
||||||
liAS_NODE = 2; // account state node
|
liTX_NODE = 1; // transaction node
|
||||||
liTS_CANDIDATE = 3; // candidate transaction set
|
liAS_NODE = 2; // account state node
|
||||||
|
liTS_CANDIDATE = 3; // candidate transaction set
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TMLedgerType {
|
enum TMLedgerType
|
||||||
ltACCEPTED = 0;
|
{
|
||||||
ltCURRENT = 1; // no longer supported
|
ltACCEPTED = 0;
|
||||||
ltCLOSED = 2;
|
ltCURRENT = 1; // no longer supported
|
||||||
|
ltCLOSED = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TMQueryType {
|
enum TMQueryType
|
||||||
qtINDIRECT = 0;
|
{
|
||||||
|
qtINDIRECT = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMGetLedger {
|
message TMGetLedger
|
||||||
required TMLedgerInfoType itype = 1;
|
{
|
||||||
optional TMLedgerType ltype = 2;
|
required TMLedgerInfoType itype = 1;
|
||||||
optional bytes ledgerHash = 3; // Can also be the transaction set hash if liTS_CANDIDATE
|
optional TMLedgerType ltype = 2;
|
||||||
optional uint32 ledgerSeq = 4;
|
optional bytes ledgerHash = 3; // Can also be the transaction set hash if liTS_CANDIDATE
|
||||||
repeated bytes nodeIDs = 5;
|
optional uint32 ledgerSeq = 4;
|
||||||
optional uint64 requestCookie = 6;
|
repeated bytes nodeIDs = 5;
|
||||||
optional TMQueryType queryType = 7;
|
optional uint64 requestCookie = 6;
|
||||||
optional uint32 queryDepth = 8; // How deep to go, number of extra levels
|
optional TMQueryType queryType = 7;
|
||||||
|
optional uint32 queryDepth = 8; // How deep to go, number of extra levels
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TMReplyError {
|
enum TMReplyError
|
||||||
reNO_LEDGER = 1; // We don't have the ledger you are asking about
|
{
|
||||||
reNO_NODE = 2; // We don't have any of the nodes you are asking for
|
reNO_LEDGER = 1; // We don't have the ledger you are asking about
|
||||||
reBAD_REQUEST = 3; // The request is wrong, e.g. wrong format
|
reNO_NODE = 2; // We don't have any of the nodes you are asking for
|
||||||
|
reBAD_REQUEST = 3; // The request is wrong, e.g. wrong format
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMLedgerData {
|
message TMLedgerData
|
||||||
required bytes ledgerHash = 1;
|
{
|
||||||
required uint32 ledgerSeq = 2;
|
required bytes ledgerHash = 1;
|
||||||
required TMLedgerInfoType type = 3;
|
required uint32 ledgerSeq = 2;
|
||||||
repeated TMLedgerNode nodes = 4;
|
required TMLedgerInfoType type = 3;
|
||||||
optional uint32 requestCookie = 5;
|
repeated TMLedgerNode nodes = 4;
|
||||||
optional TMReplyError error = 6;
|
optional uint32 requestCookie = 5;
|
||||||
|
optional TMReplyError error = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMPing {
|
message TMPing
|
||||||
enum pingType {
|
{
|
||||||
ptPING = 0; // we want a reply
|
enum pingType {
|
||||||
ptPONG = 1; // this is a reply
|
ptPING = 0; // we want a reply
|
||||||
}
|
ptPONG = 1; // this is a reply
|
||||||
required pingType type = 1;
|
}
|
||||||
optional uint32 seq = 2; // detect stale replies, ensure other side is reading
|
required pingType type = 1;
|
||||||
optional uint64 pingTime = 3; // know when we think we sent the ping
|
optional uint32 seq = 2; // detect stale replies, ensure other side is reading
|
||||||
optional uint64 netTime = 4;
|
optional uint64 pingTime = 3; // know when we think we sent the ping
|
||||||
|
optional uint64 netTime = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMSquelch {
|
message TMSquelch
|
||||||
required bool squelch = 1; // squelch if true, otherwise unsquelch
|
{
|
||||||
required bytes validatorPubKey = 2; // validator's public key
|
required bool squelch = 1; // squelch if true, otherwise unsquelch
|
||||||
optional uint32 squelchDuration = 3; // squelch duration in seconds
|
required bytes validatorPubKey = 2; // validator's public key
|
||||||
|
optional uint32 squelchDuration = 3; // squelch duration in seconds
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TMLedgerMapType {
|
enum TMLedgerMapType
|
||||||
lmTRANASCTION = 1; // transaction map
|
{
|
||||||
lmACCOUNT_STATE = 2; // account state map
|
lmTRANASCTION = 1; // transaction map
|
||||||
|
lmACCOUNT_STATE = 2; // account state map
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMProofPathRequest {
|
message TMProofPathRequest
|
||||||
required bytes key = 1;
|
{
|
||||||
required bytes ledgerHash = 2;
|
required bytes key = 1;
|
||||||
required TMLedgerMapType type = 3;
|
required bytes ledgerHash = 2;
|
||||||
|
required TMLedgerMapType type = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMProofPathResponse {
|
message TMProofPathResponse
|
||||||
required bytes key = 1;
|
{
|
||||||
required bytes ledgerHash = 2;
|
required bytes key = 1;
|
||||||
required TMLedgerMapType type = 3;
|
required bytes ledgerHash = 2;
|
||||||
optional bytes ledgerHeader = 4;
|
required TMLedgerMapType type = 3;
|
||||||
repeated bytes path = 5;
|
optional bytes ledgerHeader = 4;
|
||||||
optional TMReplyError error = 6;
|
repeated bytes path = 5;
|
||||||
|
optional TMReplyError error = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMReplayDeltaRequest {
|
message TMReplayDeltaRequest
|
||||||
required bytes ledgerHash = 1;
|
{
|
||||||
|
required bytes ledgerHash = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMReplayDeltaResponse {
|
message TMReplayDeltaResponse
|
||||||
required bytes ledgerHash = 1;
|
{
|
||||||
optional bytes ledgerHeader = 2;
|
required bytes ledgerHash = 1;
|
||||||
repeated bytes transaction = 3;
|
optional bytes ledgerHeader = 2;
|
||||||
optional TMReplyError error = 4;
|
repeated bytes transaction = 3;
|
||||||
|
optional TMReplyError error = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TMHaveTransactions {
|
message TMHaveTransactions
|
||||||
repeated bytes hashes = 1;
|
{
|
||||||
|
repeated bytes hashes = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -34,4 +34,4 @@ serializeBatch(
|
|||||||
msg.addBitString(txid);
|
msg.addBitString(txid);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
@@ -157,12 +157,7 @@ enum error_code_i {
|
|||||||
// Pathfinding
|
// Pathfinding
|
||||||
rpcDOMAIN_MALFORMED = 97,
|
rpcDOMAIN_MALFORMED = 97,
|
||||||
|
|
||||||
// ledger_entry
|
rpcLAST = rpcDOMAIN_MALFORMED // rpcLAST should always equal the last code.
|
||||||
rpcENTRY_NOT_FOUND = 98,
|
|
||||||
rpcUNEXPECTED_LEDGER_TYPE = 99,
|
|
||||||
|
|
||||||
rpcLAST =
|
|
||||||
rpcUNEXPECTED_LEDGER_TYPE // rpcLAST should always equal the last code.
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Codes returned in the `warnings` array of certain RPC commands.
|
/** Codes returned in the `warnings` array of certain RPC commands.
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ constexpr std::uint32_t const tfTransferable = 0x00000008;
|
|||||||
constexpr std::uint32_t const tfMutable = 0x00000010;
|
constexpr std::uint32_t const tfMutable = 0x00000010;
|
||||||
|
|
||||||
// MPTokenIssuanceCreate flags:
|
// MPTokenIssuanceCreate flags:
|
||||||
// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate.
|
// NOTE - there is intentionally no flag here for lsfMPTLocked, which this transaction cannot mutate.
|
||||||
constexpr std::uint32_t const tfMPTCanLock = lsfMPTCanLock;
|
constexpr std::uint32_t const tfMPTCanLock = lsfMPTCanLock;
|
||||||
constexpr std::uint32_t const tfMPTRequireAuth = lsfMPTRequireAuth;
|
constexpr std::uint32_t const tfMPTRequireAuth = lsfMPTRequireAuth;
|
||||||
constexpr std::uint32_t const tfMPTCanEscrow = lsfMPTCanEscrow;
|
constexpr std::uint32_t const tfMPTCanEscrow = lsfMPTCanEscrow;
|
||||||
@@ -243,7 +243,7 @@ constexpr std::uint32_t tfUntilFailure = 0x00040000;
|
|||||||
constexpr std::uint32_t tfIndependent = 0x00080000;
|
constexpr std::uint32_t tfIndependent = 0x00080000;
|
||||||
/**
|
/**
|
||||||
* @note If nested Batch transactions are supported in the future, the tfInnerBatchTxn flag
|
* @note If nested Batch transactions are supported in the future, the tfInnerBatchTxn flag
|
||||||
* will need to be removed from this mask to allow Batch transaction to be inside
|
* will need to be removed from this mask to allow Batch transaction to be inside
|
||||||
* the sfRawTransactions array.
|
* the sfRawTransactions array.
|
||||||
*/
|
*/
|
||||||
constexpr std::uint32_t const tfBatchMask =
|
constexpr std::uint32_t const tfBatchMask =
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo
|
|||||||
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
|
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
|
||||||
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
|
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
|
||||||
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
|
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
|
||||||
XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo)
|
XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo)
|
||||||
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
|
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
|
||||||
// Check flags in Credential transactions
|
// Check flags in Credential transactions
|
||||||
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
|
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
|
||||||
|
|||||||
@@ -505,3 +505,4 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
|
|||||||
|
|
||||||
#undef EXPAND
|
#undef EXPAND
|
||||||
#undef LEDGER_ENTRY_DUPLICATE
|
#undef LEDGER_ENTRY_DUPLICATE
|
||||||
|
|
||||||
|
|||||||
@@ -68,13 +68,9 @@ JSS(Flags); // in/out: TransactionSign; field.
|
|||||||
JSS(Holder); // field.
|
JSS(Holder); // field.
|
||||||
JSS(Invalid); //
|
JSS(Invalid); //
|
||||||
JSS(Issuer); // in: Credential transactions
|
JSS(Issuer); // in: Credential transactions
|
||||||
JSS(IssuingChainDoor); // field.
|
|
||||||
JSS(IssuingChainIssue); // field.
|
|
||||||
JSS(LastLedgerSequence); // in: TransactionSign; field
|
JSS(LastLedgerSequence); // in: TransactionSign; field
|
||||||
JSS(LastUpdateTime); // field.
|
JSS(LastUpdateTime); // field.
|
||||||
JSS(LimitAmount); // field.
|
JSS(LimitAmount); // field.
|
||||||
JSS(LockingChainDoor); // field.
|
|
||||||
JSS(LockingChainIssue); // field.
|
|
||||||
JSS(NetworkID); // field.
|
JSS(NetworkID); // field.
|
||||||
JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens
|
JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens
|
||||||
JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens
|
JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens
|
||||||
|
|||||||
@@ -25,7 +25,7 @@
|
|||||||
#include <xrpl/server/Port.h>
|
#include <xrpl/server/Port.h>
|
||||||
#include <xrpl/server/detail/ServerImpl.h>
|
#include <xrpl/server/detail/ServerImpl.h>
|
||||||
|
|
||||||
#include <boost/asio/io_context.hpp>
|
#include <boost/asio/io_service.hpp>
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
|
|
||||||
@@ -34,10 +34,10 @@ template <class Handler>
|
|||||||
std::unique_ptr<Server>
|
std::unique_ptr<Server>
|
||||||
make_Server(
|
make_Server(
|
||||||
Handler& handler,
|
Handler& handler,
|
||||||
boost::asio::io_context& io_context,
|
boost::asio::io_service& io_service,
|
||||||
beast::Journal journal)
|
beast::Journal journal)
|
||||||
{
|
{
|
||||||
return std::make_unique<ServerImpl<Handler>>(handler, io_context, journal);
|
return std::make_unique<ServerImpl<Handler>>(handler, io_service, journal);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
|
|||||||
@@ -88,7 +88,9 @@ public:
|
|||||||
++iter)
|
++iter)
|
||||||
{
|
{
|
||||||
typename BufferSequence::value_type const& buffer(*iter);
|
typename BufferSequence::value_type const& buffer(*iter);
|
||||||
write(buffer.data(), boost::asio::buffer_size(buffer));
|
write(
|
||||||
|
boost::asio::buffer_cast<void const*>(buffer),
|
||||||
|
boost::asio::buffer_size(buffer));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,7 +104,7 @@ public:
|
|||||||
|
|
||||||
/** Detach the session.
|
/** Detach the session.
|
||||||
This holds the session open so that the response can be sent
|
This holds the session open so that the response can be sent
|
||||||
asynchronously. Calls to io_context::run made by the server
|
asynchronously. Calls to io_service::run made by the server
|
||||||
will not return until all detached sessions are closed.
|
will not return until all detached sessions are closed.
|
||||||
*/
|
*/
|
||||||
virtual std::shared_ptr<Session>
|
virtual std::shared_ptr<Session>
|
||||||
|
|||||||
@@ -24,13 +24,11 @@
|
|||||||
#include <xrpl/beast/net/IPAddressConversion.h>
|
#include <xrpl/beast/net/IPAddressConversion.h>
|
||||||
#include <xrpl/beast/utility/instrumentation.h>
|
#include <xrpl/beast/utility/instrumentation.h>
|
||||||
#include <xrpl/server/Session.h>
|
#include <xrpl/server/Session.h>
|
||||||
#include <xrpl/server/detail/Spawn.h>
|
|
||||||
#include <xrpl/server/detail/io_list.h>
|
#include <xrpl/server/detail/io_list.h>
|
||||||
|
|
||||||
#include <boost/asio/ip/tcp.hpp>
|
#include <boost/asio/ip/tcp.hpp>
|
||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
#include <boost/asio/ssl/stream.hpp>
|
#include <boost/asio/ssl/stream.hpp>
|
||||||
#include <boost/asio/strand.hpp>
|
|
||||||
#include <boost/asio/streambuf.hpp>
|
#include <boost/asio/streambuf.hpp>
|
||||||
#include <boost/beast/core/stream_traits.hpp>
|
#include <boost/beast/core/stream_traits.hpp>
|
||||||
#include <boost/beast/http/dynamic_body.hpp>
|
#include <boost/beast/http/dynamic_body.hpp>
|
||||||
@@ -217,8 +215,8 @@ BaseHTTPPeer<Handler, Impl>::BaseHTTPPeer(
|
|||||||
ConstBufferSequence const& buffers)
|
ConstBufferSequence const& buffers)
|
||||||
: port_(port)
|
: port_(port)
|
||||||
, handler_(handler)
|
, handler_(handler)
|
||||||
, work_(boost::asio::make_work_guard(executor))
|
, work_(executor)
|
||||||
, strand_(boost::asio::make_strand(executor))
|
, strand_(executor)
|
||||||
, remote_address_(remote_address)
|
, remote_address_(remote_address)
|
||||||
, journal_(journal)
|
, journal_(journal)
|
||||||
{
|
{
|
||||||
@@ -358,7 +356,7 @@ BaseHTTPPeer<Handler, Impl>::on_write(
|
|||||||
return;
|
return;
|
||||||
if (graceful_)
|
if (graceful_)
|
||||||
return do_close();
|
return do_close();
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&BaseHTTPPeer<Handler, Impl>::do_read,
|
&BaseHTTPPeer<Handler, Impl>::do_read,
|
||||||
@@ -377,7 +375,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
|
|||||||
{
|
{
|
||||||
auto const p = impl().shared_from_this();
|
auto const p = impl().shared_from_this();
|
||||||
resume = std::function<void(void)>([this, p, writer, keep_alive]() {
|
resume = std::function<void(void)>([this, p, writer, keep_alive]() {
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&BaseHTTPPeer<Handler, Impl>::do_writer,
|
&BaseHTTPPeer<Handler, Impl>::do_writer,
|
||||||
@@ -408,7 +406,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
|
|||||||
if (!keep_alive)
|
if (!keep_alive)
|
||||||
return do_close();
|
return do_close();
|
||||||
|
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&BaseHTTPPeer<Handler, Impl>::do_read,
|
&BaseHTTPPeer<Handler, Impl>::do_read,
|
||||||
@@ -450,14 +448,14 @@ BaseHTTPPeer<Handler, Impl>::write(
|
|||||||
std::shared_ptr<Writer> const& writer,
|
std::shared_ptr<Writer> const& writer,
|
||||||
bool keep_alive)
|
bool keep_alive)
|
||||||
{
|
{
|
||||||
util::spawn(
|
boost::asio::spawn(bind_executor(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&BaseHTTPPeer<Handler, Impl>::do_writer,
|
&BaseHTTPPeer<Handler, Impl>::do_writer,
|
||||||
impl().shared_from_this(),
|
impl().shared_from_this(),
|
||||||
writer,
|
writer,
|
||||||
keep_alive,
|
keep_alive,
|
||||||
std::placeholders::_1));
|
std::placeholders::_1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED
|
// DEPRECATED
|
||||||
@@ -492,12 +490,12 @@ BaseHTTPPeer<Handler, Impl>::complete()
|
|||||||
}
|
}
|
||||||
|
|
||||||
// keep-alive
|
// keep-alive
|
||||||
util::spawn(
|
boost::asio::spawn(bind_executor(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&BaseHTTPPeer<Handler, Impl>::do_read,
|
&BaseHTTPPeer<Handler, Impl>::do_read,
|
||||||
impl().shared_from_this(),
|
impl().shared_from_this(),
|
||||||
std::placeholders::_1));
|
std::placeholders::_1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED
|
// DEPRECATED
|
||||||
|
|||||||
@@ -91,8 +91,8 @@ BasePeer<Handler, Impl>::BasePeer(
|
|||||||
return "##" + std::to_string(++id) + " ";
|
return "##" + std::to_string(++id) + " ";
|
||||||
}())
|
}())
|
||||||
, j_(sink_)
|
, j_(sink_)
|
||||||
, work_(boost::asio::make_work_guard(executor))
|
, work_(executor)
|
||||||
, strand_(boost::asio::make_strand(executor))
|
, strand_(executor)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,6 @@
|
|||||||
#include <xrpl/server/detail/BasePeer.h>
|
#include <xrpl/server/detail/BasePeer.h>
|
||||||
#include <xrpl/server/detail/LowestLayer.h>
|
#include <xrpl/server/detail/LowestLayer.h>
|
||||||
|
|
||||||
#include <boost/asio/error.hpp>
|
|
||||||
#include <boost/beast/core/multi_buffer.hpp>
|
#include <boost/beast/core/multi_buffer.hpp>
|
||||||
#include <boost/beast/http/message.hpp>
|
#include <boost/beast/http/message.hpp>
|
||||||
#include <boost/beast/websocket.hpp>
|
#include <boost/beast/websocket.hpp>
|
||||||
@@ -421,17 +420,11 @@ BaseWSPeer<Handler, Impl>::start_timer()
|
|||||||
// Max seconds without completing a message
|
// Max seconds without completing a message
|
||||||
static constexpr std::chrono::seconds timeout{30};
|
static constexpr std::chrono::seconds timeout{30};
|
||||||
static constexpr std::chrono::seconds timeoutLocal{3};
|
static constexpr std::chrono::seconds timeoutLocal{3};
|
||||||
|
error_code ec;
|
||||||
try
|
timer_.expires_from_now(
|
||||||
{
|
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout, ec);
|
||||||
timer_.expires_after(
|
if (ec)
|
||||||
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout);
|
return fail(ec, "start_timer");
|
||||||
}
|
|
||||||
catch (boost::system::system_error const& e)
|
|
||||||
{
|
|
||||||
return fail(e.code(), "start_timer");
|
|
||||||
}
|
|
||||||
|
|
||||||
timer_.async_wait(bind_executor(
|
timer_.async_wait(bind_executor(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
@@ -445,14 +438,8 @@ template <class Handler, class Impl>
|
|||||||
void
|
void
|
||||||
BaseWSPeer<Handler, Impl>::cancel_timer()
|
BaseWSPeer<Handler, Impl>::cancel_timer()
|
||||||
{
|
{
|
||||||
try
|
error_code ec;
|
||||||
{
|
timer_.cancel(ec);
|
||||||
timer_.cancel();
|
|
||||||
}
|
|
||||||
catch (boost::system::system_error const&)
|
|
||||||
{
|
|
||||||
// ignored
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Handler, class Impl>
|
template <class Handler, class Impl>
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ private:
|
|||||||
stream_type stream_;
|
stream_type stream_;
|
||||||
socket_type& socket_;
|
socket_type& socket_;
|
||||||
endpoint_type remote_address_;
|
endpoint_type remote_address_;
|
||||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
boost::asio::io_context::strand strand_;
|
||||||
beast::Journal const j_;
|
beast::Journal const j_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@@ -95,7 +95,7 @@ private:
|
|||||||
Handler& handler_;
|
Handler& handler_;
|
||||||
boost::asio::io_context& ioc_;
|
boost::asio::io_context& ioc_;
|
||||||
acceptor_type acceptor_;
|
acceptor_type acceptor_;
|
||||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
boost::asio::io_context::strand strand_;
|
||||||
bool ssl_;
|
bool ssl_;
|
||||||
bool plain_;
|
bool plain_;
|
||||||
|
|
||||||
@@ -155,7 +155,7 @@ Door<Handler>::Detector::Detector(
|
|||||||
, stream_(std::move(stream))
|
, stream_(std::move(stream))
|
||||||
, socket_(stream_.socket())
|
, socket_(stream_.socket())
|
||||||
, remote_address_(remote_address)
|
, remote_address_(remote_address)
|
||||||
, strand_(boost::asio::make_strand(ioc_))
|
, strand_(ioc_)
|
||||||
, j_(j)
|
, j_(j)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@@ -164,7 +164,7 @@ template <class Handler>
|
|||||||
void
|
void
|
||||||
Door<Handler>::Detector::run()
|
Door<Handler>::Detector::run()
|
||||||
{
|
{
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&Detector::do_detect,
|
&Detector::do_detect,
|
||||||
@@ -269,7 +269,7 @@ Door<Handler>::reOpen()
|
|||||||
Throw<std::exception>();
|
Throw<std::exception>();
|
||||||
}
|
}
|
||||||
|
|
||||||
acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec);
|
acceptor_.listen(boost::asio::socket_base::max_connections, ec);
|
||||||
if (ec)
|
if (ec)
|
||||||
{
|
{
|
||||||
JLOG(j_.error()) << "Listen on port '" << port_.name
|
JLOG(j_.error()) << "Listen on port '" << port_.name
|
||||||
@@ -291,7 +291,7 @@ Door<Handler>::Door(
|
|||||||
, handler_(handler)
|
, handler_(handler)
|
||||||
, ioc_(io_context)
|
, ioc_(io_context)
|
||||||
, acceptor_(io_context)
|
, acceptor_(io_context)
|
||||||
, strand_(boost::asio::make_strand(io_context))
|
, strand_(io_context)
|
||||||
, ssl_(
|
, ssl_(
|
||||||
port_.protocol.count("https") > 0 ||
|
port_.protocol.count("https") > 0 ||
|
||||||
port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 ||
|
port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 ||
|
||||||
@@ -307,7 +307,7 @@ template <class Handler>
|
|||||||
void
|
void
|
||||||
Door<Handler>::run()
|
Door<Handler>::run()
|
||||||
{
|
{
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
strand_,
|
strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&Door<Handler>::do_accept,
|
&Door<Handler>::do_accept,
|
||||||
@@ -320,8 +320,7 @@ void
|
|||||||
Door<Handler>::close()
|
Door<Handler>::close()
|
||||||
{
|
{
|
||||||
if (!strand_.running_in_this_thread())
|
if (!strand_.running_in_this_thread())
|
||||||
return boost::asio::post(
|
return strand_.post(
|
||||||
strand_,
|
|
||||||
std::bind(&Door<Handler>::close, this->shared_from_this()));
|
std::bind(&Door<Handler>::close, this->shared_from_this()));
|
||||||
error_code ec;
|
error_code ec;
|
||||||
acceptor_.close(ec);
|
acceptor_.close(ec);
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ PlainHTTPPeer<Handler>::run()
|
|||||||
{
|
{
|
||||||
if (!this->handler_.onAccept(this->session(), this->remote_address_))
|
if (!this->handler_.onAccept(this->session(), this->remote_address_))
|
||||||
{
|
{
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
this->strand_,
|
this->strand_,
|
||||||
std::bind(&PlainHTTPPeer::do_close, this->shared_from_this()));
|
std::bind(&PlainHTTPPeer::do_close, this->shared_from_this()));
|
||||||
return;
|
return;
|
||||||
@@ -114,7 +114,7 @@ PlainHTTPPeer<Handler>::run()
|
|||||||
if (!socket_.is_open())
|
if (!socket_.is_open())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
this->strand_,
|
this->strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&PlainHTTPPeer::do_read,
|
&PlainHTTPPeer::do_read,
|
||||||
|
|||||||
@@ -115,14 +115,14 @@ SSLHTTPPeer<Handler>::run()
|
|||||||
{
|
{
|
||||||
if (!this->handler_.onAccept(this->session(), this->remote_address_))
|
if (!this->handler_.onAccept(this->session(), this->remote_address_))
|
||||||
{
|
{
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
this->strand_,
|
this->strand_,
|
||||||
std::bind(&SSLHTTPPeer::do_close, this->shared_from_this()));
|
std::bind(&SSLHTTPPeer::do_close, this->shared_from_this()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!socket_.is_open())
|
if (!socket_.is_open())
|
||||||
return;
|
return;
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
this->strand_,
|
this->strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&SSLHTTPPeer::do_handshake,
|
&SSLHTTPPeer::do_handshake,
|
||||||
@@ -164,7 +164,7 @@ SSLHTTPPeer<Handler>::do_handshake(yield_context do_yield)
|
|||||||
this->port().protocol.count("https") > 0;
|
this->port().protocol.count("https") > 0;
|
||||||
if (http)
|
if (http)
|
||||||
{
|
{
|
||||||
util::spawn(
|
boost::asio::spawn(
|
||||||
this->strand_,
|
this->strand_,
|
||||||
std::bind(
|
std::bind(
|
||||||
&SSLHTTPPeer::do_read,
|
&SSLHTTPPeer::do_read,
|
||||||
|
|||||||
@@ -26,8 +26,6 @@
|
|||||||
#include <xrpl/server/detail/io_list.h>
|
#include <xrpl/server/detail/io_list.h>
|
||||||
|
|
||||||
#include <boost/asio.hpp>
|
#include <boost/asio.hpp>
|
||||||
#include <boost/asio/executor_work_guard.hpp>
|
|
||||||
#include <boost/asio/io_context.hpp>
|
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
@@ -87,11 +85,9 @@ private:
|
|||||||
|
|
||||||
Handler& handler_;
|
Handler& handler_;
|
||||||
beast::Journal const j_;
|
beast::Journal const j_;
|
||||||
boost::asio::io_context& io_context_;
|
boost::asio::io_service& io_service_;
|
||||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
boost::asio::io_service::strand strand_;
|
||||||
std::optional<boost::asio::executor_work_guard<
|
std::optional<boost::asio::io_service::work> work_;
|
||||||
boost::asio::io_context::executor_type>>
|
|
||||||
work_;
|
|
||||||
|
|
||||||
std::mutex m_;
|
std::mutex m_;
|
||||||
std::vector<Port> ports_;
|
std::vector<Port> ports_;
|
||||||
@@ -104,7 +100,7 @@ private:
|
|||||||
public:
|
public:
|
||||||
ServerImpl(
|
ServerImpl(
|
||||||
Handler& handler,
|
Handler& handler,
|
||||||
boost::asio::io_context& io_context,
|
boost::asio::io_service& io_service,
|
||||||
beast::Journal journal);
|
beast::Journal journal);
|
||||||
|
|
||||||
~ServerImpl();
|
~ServerImpl();
|
||||||
@@ -127,10 +123,10 @@ public:
|
|||||||
return ios_;
|
return ios_;
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::asio::io_context&
|
boost::asio::io_service&
|
||||||
get_io_context()
|
get_io_service()
|
||||||
{
|
{
|
||||||
return io_context_;
|
return io_service_;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@@ -144,13 +140,13 @@ private:
|
|||||||
template <class Handler>
|
template <class Handler>
|
||||||
ServerImpl<Handler>::ServerImpl(
|
ServerImpl<Handler>::ServerImpl(
|
||||||
Handler& handler,
|
Handler& handler,
|
||||||
boost::asio::io_context& io_context,
|
boost::asio::io_service& io_service,
|
||||||
beast::Journal journal)
|
beast::Journal journal)
|
||||||
: handler_(handler)
|
: handler_(handler)
|
||||||
, j_(journal)
|
, j_(journal)
|
||||||
, io_context_(io_context)
|
, io_service_(io_service)
|
||||||
, strand_(boost::asio::make_strand(io_context_))
|
, strand_(io_service_)
|
||||||
, work_(std::in_place, boost::asio::make_work_guard(io_context_))
|
, work_(io_service_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -177,7 +173,7 @@ ServerImpl<Handler>::ports(std::vector<Port> const& ports)
|
|||||||
ports_.push_back(port);
|
ports_.push_back(port);
|
||||||
auto& internalPort = ports_.back();
|
auto& internalPort = ports_.back();
|
||||||
if (auto sp = ios_.emplace<Door<Handler>>(
|
if (auto sp = ios_.emplace<Door<Handler>>(
|
||||||
handler_, io_context_, internalPort, j_))
|
handler_, io_service_, internalPort, j_))
|
||||||
{
|
{
|
||||||
list_.push_back(sp);
|
list_.push_back(sp);
|
||||||
|
|
||||||
|
|||||||
@@ -1,108 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of rippled: https://github.com/ripple/rippled
|
|
||||||
Copyright(c) 2025 Ripple Labs Inc.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and/or distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#ifndef RIPPLE_SERVER_SPAWN_H_INCLUDED
|
|
||||||
#define RIPPLE_SERVER_SPAWN_H_INCLUDED
|
|
||||||
|
|
||||||
#include <xrpl/basics/Log.h>
|
|
||||||
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/strand.hpp>
|
|
||||||
|
|
||||||
#include <concepts>
|
|
||||||
#include <type_traits>
|
|
||||||
|
|
||||||
namespace ripple::util {
|
|
||||||
namespace impl {
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
concept IsStrand = std::same_as<
|
|
||||||
std::decay_t<T>,
|
|
||||||
boost::asio::strand<typename std::decay_t<T>::inner_executor_type>>;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief A completion handler that restores `boost::asio::spawn`'s behaviour
|
|
||||||
* from Boost 1.83
|
|
||||||
*
|
|
||||||
* This is intended to be passed as the third argument to `boost::asio::spawn`
|
|
||||||
* so that exceptions are not ignored but propagated to `io_context.run()` call
|
|
||||||
* site.
|
|
||||||
*
|
|
||||||
* @param ePtr The exception that was caught on the coroutine
|
|
||||||
*/
|
|
||||||
inline constexpr auto kPROPAGATE_EXCEPTIONS = [](std::exception_ptr ePtr) {
|
|
||||||
if (ePtr)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
std::rethrow_exception(ePtr);
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
JLOG(debugLog().warn()) << "Spawn exception: " << e.what();
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
JLOG(debugLog().warn()) << "Spawn exception: Unknown";
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace impl
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Spawns a coroutine using `boost::asio::spawn`
|
|
||||||
*
|
|
||||||
* @note This uses kPROPAGATE_EXCEPTIONS to force asio to propagate exceptions
|
|
||||||
* through `io_context`
|
|
||||||
* @note Since implicit strand was removed from boost::asio::spawn this helper
|
|
||||||
* function adds the strand back
|
|
||||||
*
|
|
||||||
* @tparam Ctx The type of the context/strand
|
|
||||||
* @tparam F The type of the function to execute
|
|
||||||
* @param ctx The execution context
|
|
||||||
* @param func The function to execute. Must return `void`
|
|
||||||
*/
|
|
||||||
template <typename Ctx, typename F>
|
|
||||||
requires std::is_invocable_r_v<void, F, boost::asio::yield_context>
|
|
||||||
void
|
|
||||||
spawn(Ctx&& ctx, F&& func)
|
|
||||||
{
|
|
||||||
if constexpr (impl::IsStrand<Ctx>)
|
|
||||||
{
|
|
||||||
boost::asio::spawn(
|
|
||||||
std::forward<Ctx>(ctx),
|
|
||||||
std::forward<F>(func),
|
|
||||||
impl::kPROPAGATE_EXCEPTIONS);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
boost::asio::spawn(
|
|
||||||
boost::asio::make_strand(
|
|
||||||
boost::asio::get_associated_executor(std::forward<Ctx>(ctx))),
|
|
||||||
std::forward<F>(func),
|
|
||||||
impl::kPROPAGATE_EXCEPTIONS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace ripple::util
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@@ -166,7 +166,7 @@ public:
|
|||||||
May be called concurrently.
|
May be called concurrently.
|
||||||
|
|
||||||
Preconditions:
|
Preconditions:
|
||||||
No call to io_context::run on any io_context
|
No call to io_service::run on any io_service
|
||||||
used by work objects associated with this io_list
|
used by work objects associated with this io_list
|
||||||
exists in the caller's call stack.
|
exists in the caller's call stack.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -25,9 +25,8 @@
|
|||||||
#include <xrpl/beast/utility/Journal.h>
|
#include <xrpl/beast/utility/Journal.h>
|
||||||
#include <xrpl/beast/utility/instrumentation.h>
|
#include <xrpl/beast/utility/instrumentation.h>
|
||||||
|
|
||||||
#include <boost/asio/bind_executor.hpp>
|
|
||||||
#include <boost/asio/error.hpp>
|
#include <boost/asio/error.hpp>
|
||||||
#include <boost/asio/io_context.hpp>
|
#include <boost/asio/io_service.hpp>
|
||||||
#include <boost/asio/ip/tcp.hpp>
|
#include <boost/asio/ip/tcp.hpp>
|
||||||
#include <boost/system/detail/error_code.hpp>
|
#include <boost/system/detail/error_code.hpp>
|
||||||
|
|
||||||
@@ -125,8 +124,8 @@ public:
|
|||||||
|
|
||||||
beast::Journal m_journal;
|
beast::Journal m_journal;
|
||||||
|
|
||||||
boost::asio::io_context& m_io_context;
|
boost::asio::io_service& m_io_service;
|
||||||
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
|
boost::asio::io_service::strand m_strand;
|
||||||
boost::asio::ip::tcp::resolver m_resolver;
|
boost::asio::ip::tcp::resolver m_resolver;
|
||||||
|
|
||||||
std::condition_variable m_cv;
|
std::condition_variable m_cv;
|
||||||
@@ -156,12 +155,12 @@ public:
|
|||||||
std::deque<Work> m_work;
|
std::deque<Work> m_work;
|
||||||
|
|
||||||
ResolverAsioImpl(
|
ResolverAsioImpl(
|
||||||
boost::asio::io_context& io_context,
|
boost::asio::io_service& io_service,
|
||||||
beast::Journal journal)
|
beast::Journal journal)
|
||||||
: m_journal(journal)
|
: m_journal(journal)
|
||||||
, m_io_context(io_context)
|
, m_io_service(io_service)
|
||||||
, m_strand(boost::asio::make_strand(io_context))
|
, m_strand(io_service)
|
||||||
, m_resolver(io_context)
|
, m_resolver(io_service)
|
||||||
, m_asyncHandlersCompleted(true)
|
, m_asyncHandlersCompleted(true)
|
||||||
, m_stop_called(false)
|
, m_stop_called(false)
|
||||||
, m_stopped(true)
|
, m_stopped(true)
|
||||||
@@ -217,14 +216,8 @@ public:
|
|||||||
{
|
{
|
||||||
if (m_stop_called.exchange(true) == false)
|
if (m_stop_called.exchange(true) == false)
|
||||||
{
|
{
|
||||||
boost::asio::dispatch(
|
m_io_service.dispatch(m_strand.wrap(std::bind(
|
||||||
m_io_context,
|
&ResolverAsioImpl::do_stop, this, CompletionCounter(this))));
|
||||||
boost::asio::bind_executor(
|
|
||||||
m_strand,
|
|
||||||
std::bind(
|
|
||||||
&ResolverAsioImpl::do_stop,
|
|
||||||
this,
|
|
||||||
CompletionCounter(this))));
|
|
||||||
|
|
||||||
JLOG(m_journal.debug()) << "Queued a stop request";
|
JLOG(m_journal.debug()) << "Queued a stop request";
|
||||||
}
|
}
|
||||||
@@ -255,16 +248,12 @@ public:
|
|||||||
|
|
||||||
// TODO NIKB use rvalue references to construct and move
|
// TODO NIKB use rvalue references to construct and move
|
||||||
// reducing cost.
|
// reducing cost.
|
||||||
boost::asio::dispatch(
|
m_io_service.dispatch(m_strand.wrap(std::bind(
|
||||||
m_io_context,
|
&ResolverAsioImpl::do_resolve,
|
||||||
boost::asio::bind_executor(
|
this,
|
||||||
m_strand,
|
names,
|
||||||
std::bind(
|
handler,
|
||||||
&ResolverAsioImpl::do_resolve,
|
CompletionCounter(this))));
|
||||||
this,
|
|
||||||
names,
|
|
||||||
handler,
|
|
||||||
CompletionCounter(this))));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------------------------------------------------
|
//-------------------------------------------------------------------------
|
||||||
@@ -290,20 +279,19 @@ public:
|
|||||||
std::string name,
|
std::string name,
|
||||||
boost::system::error_code const& ec,
|
boost::system::error_code const& ec,
|
||||||
HandlerType handler,
|
HandlerType handler,
|
||||||
boost::asio::ip::tcp::resolver::results_type results,
|
boost::asio::ip::tcp::resolver::iterator iter,
|
||||||
CompletionCounter)
|
CompletionCounter)
|
||||||
{
|
{
|
||||||
if (ec == boost::asio::error::operation_aborted)
|
if (ec == boost::asio::error::operation_aborted)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
std::vector<beast::IP::Endpoint> addresses;
|
std::vector<beast::IP::Endpoint> addresses;
|
||||||
auto iter = results.begin();
|
|
||||||
|
|
||||||
// If we get an error message back, we don't return any
|
// If we get an error message back, we don't return any
|
||||||
// results that we may have gotten.
|
// results that we may have gotten.
|
||||||
if (!ec)
|
if (!ec)
|
||||||
{
|
{
|
||||||
while (iter != results.end())
|
while (iter != boost::asio::ip::tcp::resolver::iterator())
|
||||||
{
|
{
|
||||||
addresses.push_back(
|
addresses.push_back(
|
||||||
beast::IPAddressConversion::from_asio(*iter));
|
beast::IPAddressConversion::from_asio(*iter));
|
||||||
@@ -313,14 +301,8 @@ public:
|
|||||||
|
|
||||||
handler(name, addresses);
|
handler(name, addresses);
|
||||||
|
|
||||||
boost::asio::post(
|
m_io_service.post(m_strand.wrap(std::bind(
|
||||||
m_io_context,
|
&ResolverAsioImpl::do_work, this, CompletionCounter(this))));
|
||||||
boost::asio::bind_executor(
|
|
||||||
m_strand,
|
|
||||||
std::bind(
|
|
||||||
&ResolverAsioImpl::do_work,
|
|
||||||
this,
|
|
||||||
CompletionCounter(this))));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
HostAndPort
|
HostAndPort
|
||||||
@@ -401,21 +383,16 @@ public:
|
|||||||
{
|
{
|
||||||
JLOG(m_journal.error()) << "Unable to parse '" << name << "'";
|
JLOG(m_journal.error()) << "Unable to parse '" << name << "'";
|
||||||
|
|
||||||
boost::asio::post(
|
m_io_service.post(m_strand.wrap(std::bind(
|
||||||
m_io_context,
|
&ResolverAsioImpl::do_work, this, CompletionCounter(this))));
|
||||||
boost::asio::bind_executor(
|
|
||||||
m_strand,
|
|
||||||
std::bind(
|
|
||||||
&ResolverAsioImpl::do_work,
|
|
||||||
this,
|
|
||||||
CompletionCounter(this))));
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boost::asio::ip::tcp::resolver::query query(host, port);
|
||||||
|
|
||||||
m_resolver.async_resolve(
|
m_resolver.async_resolve(
|
||||||
host,
|
query,
|
||||||
port,
|
|
||||||
std::bind(
|
std::bind(
|
||||||
&ResolverAsioImpl::do_finish,
|
&ResolverAsioImpl::do_finish,
|
||||||
this,
|
this,
|
||||||
@@ -446,14 +423,10 @@ public:
|
|||||||
|
|
||||||
if (m_work.size() > 0)
|
if (m_work.size() > 0)
|
||||||
{
|
{
|
||||||
boost::asio::post(
|
m_io_service.post(m_strand.wrap(std::bind(
|
||||||
m_io_context,
|
&ResolverAsioImpl::do_work,
|
||||||
boost::asio::bind_executor(
|
this,
|
||||||
m_strand,
|
CompletionCounter(this))));
|
||||||
std::bind(
|
|
||||||
&ResolverAsioImpl::do_work,
|
|
||||||
this,
|
|
||||||
CompletionCounter(this))));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -462,9 +435,9 @@ public:
|
|||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
|
|
||||||
std::unique_ptr<ResolverAsio>
|
std::unique_ptr<ResolverAsio>
|
||||||
ResolverAsio::New(boost::asio::io_context& io_context, beast::Journal journal)
|
ResolverAsio::New(boost::asio::io_service& io_service, beast::Journal journal)
|
||||||
{
|
{
|
||||||
return std::make_unique<ResolverAsioImpl>(io_context, journal);
|
return std::make_unique<ResolverAsioImpl>(io_service, journal);
|
||||||
}
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
|
|||||||
@@ -30,11 +30,9 @@
|
|||||||
#include <xrpl/beast/utility/instrumentation.h>
|
#include <xrpl/beast/utility/instrumentation.h>
|
||||||
|
|
||||||
#include <boost/asio/basic_waitable_timer.hpp>
|
#include <boost/asio/basic_waitable_timer.hpp>
|
||||||
#include <boost/asio/bind_executor.hpp>
|
|
||||||
#include <boost/asio/buffer.hpp>
|
#include <boost/asio/buffer.hpp>
|
||||||
#include <boost/asio/error.hpp>
|
#include <boost/asio/error.hpp>
|
||||||
#include <boost/asio/executor_work_guard.hpp>
|
#include <boost/asio/io_service.hpp>
|
||||||
#include <boost/asio/io_context.hpp>
|
|
||||||
#include <boost/asio/ip/udp.hpp>
|
#include <boost/asio/ip/udp.hpp>
|
||||||
#include <boost/asio/strand.hpp>
|
#include <boost/asio/strand.hpp>
|
||||||
#include <boost/system/detail/error_code.hpp>
|
#include <boost/system/detail/error_code.hpp>
|
||||||
@@ -240,11 +238,9 @@ private:
|
|||||||
Journal m_journal;
|
Journal m_journal;
|
||||||
IP::Endpoint m_address;
|
IP::Endpoint m_address;
|
||||||
std::string m_prefix;
|
std::string m_prefix;
|
||||||
boost::asio::io_context m_io_context;
|
boost::asio::io_service m_io_service;
|
||||||
std::optional<boost::asio::executor_work_guard<
|
std::optional<boost::asio::io_service::work> m_work;
|
||||||
boost::asio::io_context::executor_type>>
|
boost::asio::io_service::strand m_strand;
|
||||||
m_work;
|
|
||||||
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
|
|
||||||
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
|
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
|
||||||
boost::asio::ip::udp::socket m_socket;
|
boost::asio::ip::udp::socket m_socket;
|
||||||
std::deque<std::string> m_data;
|
std::deque<std::string> m_data;
|
||||||
@@ -268,24 +264,18 @@ public:
|
|||||||
: m_journal(journal)
|
: m_journal(journal)
|
||||||
, m_address(address)
|
, m_address(address)
|
||||||
, m_prefix(prefix)
|
, m_prefix(prefix)
|
||||||
, m_work(boost::asio::make_work_guard(m_io_context))
|
, m_work(std::ref(m_io_service))
|
||||||
, m_strand(boost::asio::make_strand(m_io_context))
|
, m_strand(m_io_service)
|
||||||
, m_timer(m_io_context)
|
, m_timer(m_io_service)
|
||||||
, m_socket(m_io_context)
|
, m_socket(m_io_service)
|
||||||
, m_thread(&StatsDCollectorImp::run, this)
|
, m_thread(&StatsDCollectorImp::run, this)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
~StatsDCollectorImp() override
|
~StatsDCollectorImp() override
|
||||||
{
|
{
|
||||||
try
|
boost::system::error_code ec;
|
||||||
{
|
m_timer.cancel(ec);
|
||||||
m_timer.cancel();
|
|
||||||
}
|
|
||||||
catch (boost::system::system_error const&)
|
|
||||||
{
|
|
||||||
// ignored
|
|
||||||
}
|
|
||||||
|
|
||||||
m_work.reset();
|
m_work.reset();
|
||||||
m_thread.join();
|
m_thread.join();
|
||||||
@@ -344,10 +334,10 @@ public:
|
|||||||
|
|
||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
|
|
||||||
boost::asio::io_context&
|
boost::asio::io_service&
|
||||||
get_io_context()
|
get_io_service()
|
||||||
{
|
{
|
||||||
return m_io_context;
|
return m_io_service;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string const&
|
std::string const&
|
||||||
@@ -365,14 +355,8 @@ public:
|
|||||||
void
|
void
|
||||||
post_buffer(std::string&& buffer)
|
post_buffer(std::string&& buffer)
|
||||||
{
|
{
|
||||||
boost::asio::dispatch(
|
m_io_service.dispatch(m_strand.wrap(std::bind(
|
||||||
m_io_context,
|
&StatsDCollectorImp::do_post_buffer, this, std::move(buffer))));
|
||||||
boost::asio::bind_executor(
|
|
||||||
m_strand,
|
|
||||||
std::bind(
|
|
||||||
&StatsDCollectorImp::do_post_buffer,
|
|
||||||
this,
|
|
||||||
std::move(buffer))));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The keepAlive parameter makes sure the buffers sent to
|
// The keepAlive parameter makes sure the buffers sent to
|
||||||
@@ -402,7 +386,8 @@ public:
|
|||||||
for (auto const& buffer : buffers)
|
for (auto const& buffer : buffers)
|
||||||
{
|
{
|
||||||
std::string const s(
|
std::string const s(
|
||||||
buffer.data(), boost::asio::buffer_size(buffer));
|
boost::asio::buffer_cast<char const*>(buffer),
|
||||||
|
boost::asio::buffer_size(buffer));
|
||||||
std::cerr << s;
|
std::cerr << s;
|
||||||
}
|
}
|
||||||
std::cerr << '\n';
|
std::cerr << '\n';
|
||||||
@@ -471,7 +456,7 @@ public:
|
|||||||
set_timer()
|
set_timer()
|
||||||
{
|
{
|
||||||
using namespace std::chrono_literals;
|
using namespace std::chrono_literals;
|
||||||
m_timer.expires_after(1s);
|
m_timer.expires_from_now(1s);
|
||||||
m_timer.async_wait(std::bind(
|
m_timer.async_wait(std::bind(
|
||||||
&StatsDCollectorImp::on_timer, this, std::placeholders::_1));
|
&StatsDCollectorImp::on_timer, this, std::placeholders::_1));
|
||||||
}
|
}
|
||||||
@@ -513,13 +498,13 @@ public:
|
|||||||
|
|
||||||
set_timer();
|
set_timer();
|
||||||
|
|
||||||
m_io_context.run();
|
m_io_service.run();
|
||||||
|
|
||||||
m_socket.shutdown(boost::asio::ip::udp::socket::shutdown_send, ec);
|
m_socket.shutdown(boost::asio::ip::udp::socket::shutdown_send, ec);
|
||||||
|
|
||||||
m_socket.close();
|
m_socket.close();
|
||||||
|
|
||||||
m_io_context.poll();
|
m_io_service.poll();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -562,12 +547,10 @@ StatsDCounterImpl::~StatsDCounterImpl()
|
|||||||
void
|
void
|
||||||
StatsDCounterImpl::increment(CounterImpl::value_type amount)
|
StatsDCounterImpl::increment(CounterImpl::value_type amount)
|
||||||
{
|
{
|
||||||
boost::asio::dispatch(
|
m_impl->get_io_service().dispatch(std::bind(
|
||||||
m_impl->get_io_context(),
|
&StatsDCounterImpl::do_increment,
|
||||||
std::bind(
|
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()),
|
||||||
&StatsDCounterImpl::do_increment,
|
amount));
|
||||||
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()),
|
|
||||||
amount));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -609,12 +592,10 @@ StatsDEventImpl::StatsDEventImpl(
|
|||||||
void
|
void
|
||||||
StatsDEventImpl::notify(EventImpl::value_type const& value)
|
StatsDEventImpl::notify(EventImpl::value_type const& value)
|
||||||
{
|
{
|
||||||
boost::asio::dispatch(
|
m_impl->get_io_service().dispatch(std::bind(
|
||||||
m_impl->get_io_context(),
|
&StatsDEventImpl::do_notify,
|
||||||
std::bind(
|
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()),
|
||||||
&StatsDEventImpl::do_notify,
|
value));
|
||||||
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()),
|
|
||||||
value));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -644,23 +625,19 @@ StatsDGaugeImpl::~StatsDGaugeImpl()
|
|||||||
void
|
void
|
||||||
StatsDGaugeImpl::set(GaugeImpl::value_type value)
|
StatsDGaugeImpl::set(GaugeImpl::value_type value)
|
||||||
{
|
{
|
||||||
boost::asio::dispatch(
|
m_impl->get_io_service().dispatch(std::bind(
|
||||||
m_impl->get_io_context(),
|
&StatsDGaugeImpl::do_set,
|
||||||
std::bind(
|
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
||||||
&StatsDGaugeImpl::do_set,
|
value));
|
||||||
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
|
||||||
value));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
StatsDGaugeImpl::increment(GaugeImpl::difference_type amount)
|
StatsDGaugeImpl::increment(GaugeImpl::difference_type amount)
|
||||||
{
|
{
|
||||||
boost::asio::dispatch(
|
m_impl->get_io_service().dispatch(std::bind(
|
||||||
m_impl->get_io_context(),
|
&StatsDGaugeImpl::do_increment,
|
||||||
std::bind(
|
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
||||||
&StatsDGaugeImpl::do_increment,
|
amount));
|
||||||
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
|
||||||
amount));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -736,12 +713,10 @@ StatsDMeterImpl::~StatsDMeterImpl()
|
|||||||
void
|
void
|
||||||
StatsDMeterImpl::increment(MeterImpl::value_type amount)
|
StatsDMeterImpl::increment(MeterImpl::value_type amount)
|
||||||
{
|
{
|
||||||
boost::asio::dispatch(
|
m_impl->get_io_service().dispatch(std::bind(
|
||||||
m_impl->get_io_context(),
|
&StatsDMeterImpl::do_increment,
|
||||||
std::bind(
|
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()),
|
||||||
&StatsDMeterImpl::do_increment,
|
amount));
|
||||||
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()),
|
|
||||||
amount));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|||||||
@@ -25,11 +25,11 @@ namespace IP {
|
|||||||
bool
|
bool
|
||||||
is_private(AddressV4 const& addr)
|
is_private(AddressV4 const& addr)
|
||||||
{
|
{
|
||||||
return ((addr.to_uint() & 0xff000000) ==
|
return ((addr.to_ulong() & 0xff000000) ==
|
||||||
0x0a000000) || // Prefix /8, 10. #.#.#
|
0x0a000000) || // Prefix /8, 10. #.#.#
|
||||||
((addr.to_uint() & 0xfff00000) ==
|
((addr.to_ulong() & 0xfff00000) ==
|
||||||
0xac100000) || // Prefix /12 172. 16.#.# - 172.31.#.#
|
0xac100000) || // Prefix /12 172. 16.#.# - 172.31.#.#
|
||||||
((addr.to_uint() & 0xffff0000) ==
|
((addr.to_ulong() & 0xffff0000) ==
|
||||||
0xc0a80000) || // Prefix /16 192.168.#.#
|
0xc0a80000) || // Prefix /16 192.168.#.#
|
||||||
addr.is_loopback();
|
addr.is_loopback();
|
||||||
}
|
}
|
||||||
@@ -44,7 +44,7 @@ char
|
|||||||
get_class(AddressV4 const& addr)
|
get_class(AddressV4 const& addr)
|
||||||
{
|
{
|
||||||
static char const* table = "AAAABBCD";
|
static char const* table = "AAAABBCD";
|
||||||
return table[(addr.to_uint() & 0xE0000000) >> 29];
|
return table[(addr.to_ulong() & 0xE0000000) >> 29];
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace IP
|
} // namespace IP
|
||||||
|
|||||||
@@ -20,8 +20,6 @@
|
|||||||
#include <xrpl/beast/net/IPAddressV4.h>
|
#include <xrpl/beast/net/IPAddressV4.h>
|
||||||
#include <xrpl/beast/net/IPAddressV6.h>
|
#include <xrpl/beast/net/IPAddressV6.h>
|
||||||
|
|
||||||
#include <boost/asio/ip/address_v4.hpp>
|
|
||||||
|
|
||||||
namespace beast {
|
namespace beast {
|
||||||
namespace IP {
|
namespace IP {
|
||||||
|
|
||||||
@@ -30,9 +28,7 @@ is_private(AddressV6 const& addr)
|
|||||||
{
|
{
|
||||||
return (
|
return (
|
||||||
(addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ?
|
(addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ?
|
||||||
(addr.is_v4_mapped() &&
|
(addr.is_v4_mapped() && is_private(addr.to_v4())));
|
||||||
is_private(boost::asio::ip::make_address_v4(
|
|
||||||
boost::asio::ip::v4_mapped, addr))));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
|||||||
@@ -21,8 +21,6 @@
|
|||||||
#include <xrpl/beast/net/IPEndpoint.h>
|
#include <xrpl/beast/net/IPEndpoint.h>
|
||||||
|
|
||||||
#include <boost/algorithm/string/trim.hpp>
|
#include <boost/algorithm/string/trim.hpp>
|
||||||
#include <boost/asio/ip/address.hpp>
|
|
||||||
#include <boost/asio/ip/address_v4.hpp>
|
|
||||||
#include <boost/system/detail/error_code.hpp>
|
#include <boost/system/detail/error_code.hpp>
|
||||||
|
|
||||||
#include <cctype>
|
#include <cctype>
|
||||||
@@ -169,7 +167,7 @@ operator>>(std::istream& is, Endpoint& endpoint)
|
|||||||
}
|
}
|
||||||
|
|
||||||
boost::system::error_code ec;
|
boost::system::error_code ec;
|
||||||
auto addr = boost::asio::ip::make_address(addrStr, ec);
|
auto addr = Address::from_string(addrStr, ec);
|
||||||
if (ec)
|
if (ec)
|
||||||
{
|
{
|
||||||
is.setstate(std::ios_base::failbit);
|
is.setstate(std::ios_base::failbit);
|
||||||
|
|||||||
@@ -24,7 +24,6 @@
|
|||||||
#include <xrpl/json/json_value.h>
|
#include <xrpl/json/json_value.h>
|
||||||
#include <xrpl/json/json_writer.h>
|
#include <xrpl/json/json_writer.h>
|
||||||
|
|
||||||
#include <cmath>
|
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <string>
|
#include <string>
|
||||||
@@ -686,9 +685,7 @@ Value::isConvertibleTo(ValueType other) const
|
|||||||
(other == intValue && value_.real_ >= minInt &&
|
(other == intValue && value_.real_ >= minInt &&
|
||||||
value_.real_ <= maxInt) ||
|
value_.real_ <= maxInt) ||
|
||||||
(other == uintValue && value_.real_ >= 0 &&
|
(other == uintValue && value_.real_ >= 0 &&
|
||||||
value_.real_ <= maxUInt &&
|
value_.real_ <= maxUInt) ||
|
||||||
std::fabs(round(value_.real_) - value_.real_) <
|
|
||||||
std::numeric_limits<double>::epsilon()) ||
|
|
||||||
other == realValue || other == stringValue ||
|
other == realValue || other == stringValue ||
|
||||||
other == booleanValue;
|
other == booleanValue;
|
||||||
|
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ namespace BuildInfo {
|
|||||||
// and follow the format described at http://semver.org/
|
// and follow the format described at http://semver.org/
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// clang-format off
|
// clang-format off
|
||||||
char const* const versionString = "2.6.0"
|
char const* const versionString = "2.6.1-rc2"
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
#if defined(DEBUG) || defined(SANITIZER)
|
#if defined(DEBUG) || defined(SANITIZER)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user