Compare commits

..

4 Commits

Author SHA1 Message Date
Pratik Mankawde
c8877ec45d added a unit test for openssl hash calculation
Signed-off-by: Pratik Mankawde <pmankawde@ripple.com>
2025-10-22 13:01:04 +01:00
Pratik Mankawde
c4f1313d29 ignore deprecated
Signed-off-by: Pratik Mankawde <pmankawde@ripple.com>
2025-10-02 17:36:39 +01:00
Pratik Mankawde
f770aac75c deleted conan.lock file
Signed-off-by: Pratik Mankawde <pmankawde@ripple.com>
2025-10-02 17:02:20 +01:00
Pratik Mankawde
2cac24512c Tweaked Conan scripts to build against openssl3.6.0
Signed-off-by: Pratik Mankawde <pmankawde@ripple.com>
2025-10-02 16:58:47 +01:00
1516 changed files with 46933 additions and 62388 deletions

View File

@@ -1,20 +1,4 @@
---
BreakBeforeBraces: Custom
BraceWrapping:
AfterClass: true
AfterControlStatement: true
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1
---
Language: Cpp
AccessModifierOffset: -4
AlignAfterOpenBracket: AlwaysBreak
@@ -34,7 +18,20 @@ AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: true
AfterControlStatement: true
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
BreakBeforeBinaryOperators: false
BreakBeforeBraces: Custom
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 80
@@ -69,6 +66,8 @@ IndentFunctionDeclarationAfterType: false
IndentRequiresClause: true
IndentWidth: 4
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: false
@@ -97,7 +96,7 @@ TabWidth: 8
UseTab: Never
QualifierAlignment: Right
---
Language: Proto
BasedOnStyle: Google
ColumnLimit: 0
Language: JavaScript
---
Language: Json
IndentWidth: 2

View File

@@ -33,6 +33,5 @@ slack_app: false
ignore:
- "src/test/"
- "src/tests/"
- "include/xrpl/beast/test/"
- "include/xrpl/beast/unit_test/"

View File

@@ -12,5 +12,3 @@ fe9a5365b8a52d4acc42eb27369247e6f238a4f9
9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8
552377c76f55b403a1c876df873a23d780fcc81c
97f0747e103f13e26e45b731731059b32f7679ac
b13370ac0d207217354f1fc1c29aef87769fb8a1
896b8c3b54a22b0497cb0d1ce95e1095f9a227ce

6
.github/CODEOWNERS vendored
View File

@@ -1,2 +1,8 @@
# Allow anyone to review any change by default.
*
# Require the rpc-reviewers team to review changes to the rpc code.
include/xrpl/protocol/ @xrplf/rpc-reviewers
src/libxrpl/protocol/ @xrplf/rpc-reviewers
src/xrpld/rpc/ @xrplf/rpc-reviewers
src/xrpld/app/misc/ @xrplf/rpc-reviewers

View File

@@ -1,49 +0,0 @@
name: Build Conan dependencies
description: "Install Conan dependencies, optionally forcing a rebuild of all dependencies."
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
inputs:
build_dir:
description: "The directory where to build."
required: true
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
build_nproc:
description: "The number of processors to use for building."
required: true
force_build:
description: 'Force building of all dependencies ("true", "false").'
required: false
default: "false"
log_verbosity:
description: "The logging verbosity."
required: false
default: "verbose"
runs:
using: composite
steps:
- name: Install Conan dependencies
shell: bash
env:
BUILD_DIR: ${{ inputs.build_dir }}
BUILD_NPROC: ${{ inputs.build_nproc }}
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
BUILD_TYPE: ${{ inputs.build_type }}
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
run: |
echo 'Installing dependencies.'
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
conan install \
--output-folder . \
--build="${BUILD_OPTION}" \
--options:host='&:tests=True' \
--options:host='&:xrpld=True' \
--settings:all build_type="${BUILD_TYPE}" \
--conf:all tools.build:jobs=${BUILD_NPROC} \
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
..

34
.github/actions/build/action.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: build
inputs:
generator:
default: null
configuration:
required: true
cmake-args:
default: null
cmake-target:
default: all
# An implicit input is the environment variable `build_dir`.
runs:
using: composite
steps:
- name: configure
shell: bash
run: |
cd ${build_dir}
cmake \
${{ inputs.generator && format('-G "{0}"', inputs.generator) || '' }} \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \
-Dtests=TRUE \
-Dxrpld=TRUE \
${{ inputs.cmake-args }} \
..
- name: build
shell: bash
run: |
cmake \
--build ${build_dir} \
--config ${{ inputs.configuration }} \
--parallel ${NUM_PROCESSORS:-$(nproc)} \
--target ${{ inputs.cmake-target }}

38
.github/actions/dependencies/action.yml vendored Normal file
View File

@@ -0,0 +1,38 @@
name: dependencies
inputs:
configuration:
required: true
# Implicit inputs are the environment variables `build_dir`, CONAN_REMOTE_URL,
# CONAN_REMOTE_USERNAME, and CONAN_REMOTE_PASSWORD. The latter two are only
# used to upload newly built dependencies to the Conan remote.
runs:
using: composite
steps:
- name: add Conan remote
if: ${{ env.CONAN_REMOTE_URL != '' }}
shell: bash
run: |
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add --index 0 --force xrplf ${{ env.CONAN_REMOTE_URL }}
echo "Listing Conan remotes."
conan remote list
- name: install dependencies
shell: bash
run: |
mkdir -p ${{ env.build_dir }}
cd ${{ env.build_dir }}
conan install \
--output-folder . \
--build missing \
--options:host "&:tests=True" \
--options:host "&:xrpld=True" \
--settings:all build_type=${{ inputs.configuration }} \
..
- name: upload dependencies
if: ${{ env.CONAN_REMOTE_URL != '' && env.CONAN_REMOTE_USERNAME != '' && env.CONAN_REMOTE_PASSWORD != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
shell: bash
run: |
echo "Logging into Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote login xrplf "${{ env.CONAN_REMOTE_USERNAME }}" --password "${{ env.CONAN_REMOTE_PASSWORD }}"
echo "Uploading dependencies."
conan upload '*' --confirm --check --remote xrplf

View File

@@ -1,43 +0,0 @@
name: Print build environment
description: "Print environment and some tooling versions"
runs:
using: composite
steps:
- name: Check configuration (Windows)
if: ${{ runner.os == 'Windows' }}
shell: bash
run: |
echo 'Checking environment variables.'
set
echo 'Checking CMake version.'
cmake --version
echo 'Checking Conan version.'
conan --version
- name: Check configuration (Linux and macOS)
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
shell: bash
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking compiler version.'
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
echo 'Checking Conan version.'
conan --version
echo 'Checking Ninja version.'
ninja --version
echo 'Checking nproc version.'
nproc --version

View File

@@ -1,46 +0,0 @@
name: Setup Conan
description: "Set up Conan configuration, profile, and remote."
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
default: https://conan.ripplex.io
runs:
using: composite
steps:
- name: Set up Conan configuration
shell: bash
run: |
echo 'Installing configuration.'
cat conan/global.conf ${{ runner.os == 'Linux' && '>>' || '>' }} $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
shell: bash
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
run: |
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
echo 'Listing Conan remotes.'
conan remote list

View File

@@ -1,36 +0,0 @@
## Renaming ripple(d) to xrpl(d)
In the initial phases of development of the XRPL, the open source codebase was
called "rippled" and it remains with that name even today. Today, over 1000
nodes run the application, and code contributions have been submitted by
developers located around the world. The XRPL community is larger than ever.
In light of the decentralized and diversified nature of XRPL, we will rename any
references to `ripple` and `rippled` to `xrpl` and `xrpld`, when appropriate.
See [here](https://xls.xrpl.org/xls/XLS-0095-rename-rippled-to-xrpld.html) for
more information.
### Scripts
To facilitate this transition, there will be multiple scripts that developers
can run on their own PRs and forks to minimize conflicts. Each script should be
run from the repository root.
1. `.github/scripts/rename/definitions.sh`: This script will rename all
definitions, such as include guards, from `RIPPLE_XXX` and `RIPPLED_XXX` to
`XRPL_XXX`.
2. `.github/scripts/rename/copyright.sh`: This script will remove superflous
copyright notices.
3. `.github/scripts/rename/cmake.sh`: This script will rename all CMake files
from `RippleXXX.cmake` or `RippledXXX.cmake` to `XrplXXX.cmake`, and any
references to `ripple` and `rippled` (with or without capital letters) to
`xrpl` and `xrpld`, respectively. The name of the binary will remain as-is,
and will only be renamed to `xrpld` by a later script.
You can run all these scripts from the repository root as follows:
```shell
./.github/scripts/rename/definitions.sh .
./.github/scripts/rename/copyright.sh .
./.github/scripts/rename/cmake.sh .
```

View File

@@ -1,85 +0,0 @@
#!/bin/bash
# Exit the script as soon as an error occurs.
set -e
# On MacOS, ensure that GNU sed and head are installed and available as `gsed`
# and `ghead`, respectively.
SED_COMMAND=sed
HEAD_COMMAND=head
if [[ "${OSTYPE}" == 'darwin'* ]]; then
if ! command -v gsed &> /dev/null; then
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
exit 1
fi
SED_COMMAND=gsed
if ! command -v ghead &> /dev/null; then
echo "Error: ghead is not installed. Please install it using 'brew install coreutils'."
exit 1
fi
HEAD_COMMAND=ghead
fi
# This script renames CMake files from `RippleXXX.cmake` or `RippledXXX.cmake`
# to `XrplXXX.cmake`, and any references to `ripple` and `rippled` (with or
# without capital letters) to `xrpl` and `xrpld`, respectively. The name of the
# binary will remain as-is, and will only be renamed to `xrpld` in a different
# script, but the proto file will be renamed.
# Usage: .github/scripts/rename/cmake.sh <repository directory>
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <repository directory>"
exit 1
fi
DIRECTORY=$1
echo "Processing directory: ${DIRECTORY}"
if [ ! -d "${DIRECTORY}" ]; then
echo "Error: Directory '${DIRECTORY}' does not exist."
exit 1
fi
pushd ${DIRECTORY}
# Rename the files.
find cmake -type f -name 'Rippled*.cmake' -exec bash -c 'mv "${1}" "${1/Rippled/Xrpl}"' - {} \;
find cmake -type f -name 'Ripple*.cmake' -exec bash -c 'mv "${1}" "${1/Ripple/Xrpl}"' - {} \;
if [ -e cmake/xrpl_add_test.cmake ]; then
mv cmake/xrpl_add_test.cmake cmake/XrplAddTest.cmake
fi
if [ -e include/xrpl/proto/ripple.proto ]; then
mv include/xrpl/proto/ripple.proto include/xrpl/proto/xrpl.proto
fi
# Rename inside the files.
find cmake -type f -name '*.cmake' | while read -r FILE; do
echo "Processing file: ${FILE}"
${SED_COMMAND} -i 's/Rippled/Xrpld/g' "${FILE}"
${SED_COMMAND} -i 's/Ripple/Xrpl/g' "${FILE}"
${SED_COMMAND} -i 's/rippled/xrpld/g' "${FILE}"
${SED_COMMAND} -i 's/ripple/xrpl/g' "${FILE}"
done
${SED_COMMAND} -i -E 's/Rippled?/Xrpl/g' CMakeLists.txt
${SED_COMMAND} -i 's/ripple/xrpl/g' CMakeLists.txt
${SED_COMMAND} -i 's/include(xrpl_add_test)/include(XrplAddTest)/' src/tests/libxrpl/CMakeLists.txt
${SED_COMMAND} -i 's/ripple.pb.h/xrpl.pb.h/' include/xrpl/protocol/messages.h
${SED_COMMAND} -i 's/ripple.pb.h/xrpl.pb.h/' BUILD.md
${SED_COMMAND} -i 's/ripple.pb.h/xrpl.pb.h/' BUILD.md
# Restore the name of the validator keys repository.
${SED_COMMAND} -i 's@xrpl/validator-keys-tool@ripple/validator-keys-tool@' cmake/XrplValidatorKeys.cmake
# Ensure the name of the binary and config remain 'rippled' for now.
${SED_COMMAND} -i -E 's/xrpld(-example)?\.cfg/rippled\1.cfg/g' cmake/XrplInstall.cmake
if grep -q '"xrpld"' cmake/XrplCore.cmake; then
# The script has been rerun, so just restore the name of the binary.
${SED_COMMAND} -i 's/"xrpld"/"rippled"/' cmake/XrplCore.cmake
elif ! grep -q '"rippled"' cmake/XrplCore.cmake; then
ghead -n -1 cmake/XrplCore.cmake > cmake.tmp
echo ' # For the time being, we will keep the name of the binary as it was.' >> cmake.tmp
echo ' set_target_properties(xrpld PROPERTIES OUTPUT_NAME "rippled")' >> cmake.tmp
tail -1 cmake/XrplCore.cmake >> cmake.tmp
mv cmake.tmp cmake/XrplCore.cmake
fi
popd
echo "Renaming complete."

View File

@@ -1,106 +0,0 @@
#!/bin/bash
# Exit the script as soon as an error occurs.
set -e
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
SED_COMMAND=sed
if [[ "${OSTYPE}" == 'darwin'* ]]; then
if ! command -v gsed &> /dev/null; then
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
exit 1
fi
SED_COMMAND=gsed
fi
# This script removes superfluous copyright notices in source and header files
# in this project. Specifically, it removes all notices referencing Ripple,
# XRPLF, and certain individual contributors upon mutual agreement, so the one
# in the LICENSE.md file applies throughout. Copyright notices referencing
# external contributions, e.g. from Bitcoin, remain as-is.
# Usage: .github/scripts/rename/copyright.sh <repository directory>
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <repository directory>"
exit 1
fi
DIRECTORY=$1
echo "Processing directory: ${DIRECTORY}"
if [ ! -d "${DIRECTORY}" ]; then
echo "Error: Directory '${DIRECTORY}' does not exist."
exit 1
fi
pushd ${DIRECTORY}
# Prevent sed and echo from removing newlines and tabs in string literals by
# temporarily replacing them with placeholders. This only affects one file.
PLACEHOLDER_NEWLINE="__NEWLINE__"
PLACEHOLDER_TAB="__TAB__"
${SED_COMMAND} -i -E "s@\\\n@${PLACEHOLDER_NEWLINE}@g" src/test/rpc/ValidatorInfo_test.cpp
${SED_COMMAND} -i -E "s@\\\t@${PLACEHOLDER_TAB}@g" src/test/rpc/ValidatorInfo_test.cpp
# Process the include/ and src/ directories.
DIRECTORIES=("include" "src")
for DIRECTORY in "${DIRECTORIES[@]}"; do
echo "Processing directory: ${DIRECTORY}"
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.macro" \) | while read -r FILE; do
echo "Processing file: ${FILE}"
# Handle the cases where the copyright notice is enclosed in /* ... */
# and usually surrounded by //---- and //======.
${SED_COMMAND} -z -i -E 's@^//-------+\n+@@' "${FILE}"
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}"
${SED_COMMAND} -z -i -E 's@^//=======+\n+@@' "${FILE}"
# Handle the cases where the copyright notice is commented out with //.
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}"
done
done
# Restore copyright notices that were removed from specific files, without
# restoring the verbiage that is already present in LICENSE.md. Ensure that if
# the script is run multiple times, duplicate notices are not added.
if ! grep -q 'Raw Material Software' include/xrpl/beast/core/CurrentThreadName.h; then
echo -e "// Portions of this file are from JUCE (http://www.juce.com).\n// Copyright (c) 2013 - Raw Material Software Ltd.\n// Please visit http://www.juce.com\n\n$(cat include/xrpl/beast/core/CurrentThreadName.h)" > include/xrpl/beast/core/CurrentThreadName.h
fi
if ! grep -q 'Dev Null' src/test/app/NetworkID_test.cpp; then
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/NetworkID_test.cpp)" > src/test/app/NetworkID_test.cpp
fi
if ! grep -q 'Dev Null' src/test/app/tx/apply_test.cpp; then
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/tx/apply_test.cpp)" > src/test/app/tx/apply_test.cpp
fi
if ! grep -q 'Dev Null' src/test/app/NetworkOPs_test.cpp; then
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/NetworkOPs_test.cpp)" > src/test/app/NetworkOPs_test.cpp
fi
if ! grep -q 'Dev Null' src/test/rpc/ManifestRPC_test.cpp; then
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ManifestRPC_test.cpp)" > src/test/rpc/ManifestRPC_test.cpp
fi
if ! grep -q 'Dev Null' src/test/rpc/ValidatorInfo_test.cpp; then
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ValidatorInfo_test.cpp)" > src/test/rpc/ValidatorInfo_test.cpp
fi
if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/DoManifest.cpp; then
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/DoManifest.cpp)" > src/xrpld/rpc/handlers/DoManifest.cpp
fi
if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/ValidatorInfo.cpp; then
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/ValidatorInfo.cpp)" > src/xrpld/rpc/handlers/ValidatorInfo.cpp
fi
if ! grep -q 'Bougalis' include/xrpl/basics/SlabAllocator.h; then
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h
fi
if ! grep -q 'Bougalis' include/xrpl/basics/spinlock.h; then
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h
fi
if ! grep -q 'Bougalis' include/xrpl/basics/tagged_integer.h; then
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h
fi
if ! grep -q 'Ritchford' include/xrpl/beast/utility/Zero.h; then
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h
fi
# Restore newlines and tabs in string literals in the affected file.
${SED_COMMAND} -i -E "s@${PLACEHOLDER_NEWLINE}@\\\n@g" src/test/rpc/ValidatorInfo_test.cpp
${SED_COMMAND} -i -E "s@${PLACEHOLDER_TAB}@\\\t@g" src/test/rpc/ValidatorInfo_test.cpp
popd
echo "Removal complete."

View File

@@ -1,42 +0,0 @@
#!/bin/bash
# Exit the script as soon as an error occurs.
set -e
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
SED_COMMAND=sed
if [[ "${OSTYPE}" == 'darwin'* ]]; then
if ! command -v gsed &> /dev/null; then
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
exit 1
fi
SED_COMMAND=gsed
fi
# This script renames definitions, such as include guards, in this project.
# Specifically, it renames "RIPPLED_XXX" and "RIPPLE_XXX" to "XRPL_XXX" by
# scanning all cmake, header, and source files in the specified directory and
# its subdirectories.
# Usage: .github/scripts/rename/definitions.sh <repository directory>
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <repository directory>"
exit 1
fi
DIRECTORY=$1
echo "Processing directory: ${DIRECTORY}"
if [ ! -d "${DIRECTORY}" ]; then
echo "Error: Directory '${DIRECTORY}' does not exist."
exit 1
fi
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" \) | while read -r FILE; do
echo "Processing file: ${FILE}"
${SED_COMMAND} -i -E 's@#(define|endif|if|ifdef|ifndef)(.*)(RIPPLED_|RIPPLE_)([A-Z0-9_]+)@#\1\2XRPL_\4@g' "${FILE}"
done
find "${DIRECTORY}" -type f \( -name "*.cmake" -o -name "*.txt" \) | while read -r FILE; do
echo "Processing file: ${FILE}"
${SED_COMMAND} -i -E 's@(RIPPLED_|RIPPLE_)([A-Z0-9_]+)@XRPL_\2@g' "${FILE}"
done
echo "Renaming complete."

View File

@@ -1,197 +0,0 @@
#!/usr/bin/env python3
import argparse
import itertools
import json
from pathlib import Path
from dataclasses import dataclass
THIS_DIR = Path(__file__).parent.resolve()
@dataclass
class Config:
architecture: list[dict]
os: list[dict]
build_type: list[str]
cmake_args: list[str]
'''
Generate a strategy matrix for GitHub Actions CI.
On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and
Windows configurations, while upon merge into the develop, release, or master
branches, we will build all configurations, and test most of them.
We will further set additional CMake arguments as follows:
- All builds will have the `tests`, `werr`, and `xrpld` options.
- All builds will have the `wextra` option except for GCC 12 and Clang 16.
- All release builds will have the `assert` option.
- Certain Debian Bookworm configurations will change the reference fee, enable
codecov, and enable voidstar in PRs.
'''
def generate_strategy_matrix(all: bool, config: Config) -> list:
configurations = []
for architecture, os, build_type, cmake_args in itertools.product(config.architecture, config.os, config.build_type, config.cmake_args):
# The default CMake target is 'all' for Linux and MacOS and 'install'
# for Windows, but it can get overridden for certain configurations.
cmake_target = 'install' if os["distro_name"] == 'windows' else 'all'
# We build and test all configurations by default, except for Windows in
# Debug, because it is too slow, as well as when code coverage is
# enabled as that mode already runs the tests.
build_only = False
if os['distro_name'] == 'windows' and build_type == 'Debug':
build_only = True
# Only generate a subset of configurations in PRs.
if not all:
# Debian:
# - Bookworm using GCC 13: Release and Unity on linux/amd64, set
# the reference fee to 500.
# - Bookworm using GCC 15: Debug and no Unity on linux/amd64, enable
# code coverage (which will be done below).
# - Bookworm using Clang 16: Debug and no Unity on linux/arm64,
# enable voidstar.
# - Bookworm using Clang 17: Release and no Unity on linux/amd64,
# set the reference fee to 1000.
# - Bookworm using Clang 20: Debug and Unity on linux/amd64.
if os['distro_name'] == 'debian':
skip = True
if os['distro_version'] == 'bookworm':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-13' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}'
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-16' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64':
cmake_args = f'-Dvoidstar=ON {cmake_args}'
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-17' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
cmake_args = f'-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}'
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if skip:
continue
# RHEL:
# - 9 using GCC 12: Debug and Unity on linux/amd64.
# - 10 using Clang: Release and no Unity on linux/amd64.
if os['distro_name'] == 'rhel':
skip = True
if os['distro_version'] == '9':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
elif os['distro_version'] == '10':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if skip:
continue
# Ubuntu:
# - Jammy using GCC 12: Debug and no Unity on linux/arm64.
# - Noble using GCC 14: Release and Unity on linux/amd64.
# - Noble using Clang 18: Debug and no Unity on linux/amd64.
# - Noble using Clang 19: Release and Unity on linux/arm64.
if os['distro_name'] == 'ubuntu':
skip = True
if os['distro_version'] == 'jammy':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/arm64':
skip = False
elif os['distro_version'] == 'noble':
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-14' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-18' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
skip = False
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-19' and build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/arm64':
skip = False
if skip:
continue
# MacOS:
# - Debug and no Unity on macos/arm64.
if os['distro_name'] == 'macos' and not (build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'macos/arm64'):
continue
# Windows:
# - Release and Unity on windows/amd64.
if os['distro_name'] == 'windows' and not (build_type == 'Release' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'windows/amd64'):
continue
# Additional CMake arguments.
cmake_args = f'{cmake_args} -Dtests=ON -Dwerr=ON -Dxrpld=ON'
if not f'{os['compiler_name']}-{os['compiler_version']}' in ['gcc-12', 'clang-16']:
cmake_args = f'{cmake_args} -Dwextra=ON'
if build_type == 'Release':
cmake_args = f'{cmake_args} -Dassert=ON'
# We skip all RHEL on arm64 due to a build failure that needs further
# investigation.
if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64':
continue
# We skip all clang-20 on arm64 due to boost 1.86 build error
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64':
continue
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
# Unity on linux/amd64
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-15' and build_type == 'Debug' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
cmake_args = f'-Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0 {cmake_args}'
cmake_target = 'coverage'
build_only = True
# Generate a unique name for the configuration, e.g. macos-arm64-debug
# or debian-bookworm-gcc-12-amd64-release-unity.
config_name = os['distro_name']
if (n := os['distro_version']) != '':
config_name += f'-{n}'
if (n := os['compiler_name']) != '':
config_name += f'-{n}'
if (n := os['compiler_version']) != '':
config_name += f'-{n}'
config_name += f'-{architecture['platform'][architecture['platform'].find('/')+1:]}'
config_name += f'-{build_type.lower()}'
if '-Dunity=ON' in cmake_args:
config_name += '-unity'
# Add the configuration to the list, with the most unique fields first,
# so that they are easier to identify in the GitHub Actions UI, as long
# names get truncated.
configurations.append({
'config_name': config_name,
'cmake_args': cmake_args,
'cmake_target': cmake_target,
'build_only': build_only,
'build_type': build_type,
'os': os,
'architecture': architecture,
})
return configurations
def read_config(file: Path) -> Config:
config = json.loads(file.read_text())
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
raise Exception('Invalid configuration file.')
return Config(**config)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--all', help='Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).', action="store_true")
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=False, type=Path)
args = parser.parse_args()
matrix = []
if args.config is None or args.config == '':
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "linux.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "macos.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "windows.json"))
else:
matrix += generate_strategy_matrix(args.all, read_config(args.config))
# Generate the strategy matrix.
print(f'matrix={json.dumps({"include": matrix})}')

View File

@@ -1,184 +0,0 @@
{
"architecture": [
{
"platform": "linux/amd64",
"runner": ["self-hosted", "Linux", "X64", "heavy"]
},
{
"platform": "linux/arm64",
"runner": ["self-hosted", "Linux", "ARM64", "heavy-arm64"]
}
],
"os": [
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "97ba375"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "97ba375"
},
{
"distro_name": "ubuntu",
"distro_version": "jammy",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "97ba375"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "97ba375"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "97ba375"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "97ba375"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "97ba375"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "97ba375"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "97ba375"
}
],
"build_type": ["Debug", "Release"],
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
}

View File

@@ -1,22 +0,0 @@
{
"architecture": [
{
"platform": "macos/arm64",
"runner": ["self-hosted", "macOS", "ARM64", "mac-runner-m1"]
}
],
"os": [
{
"distro_name": "macos",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
}
],
"build_type": ["Debug", "Release"],
"cmake_args": [
"-Dunity=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5",
"-Dunity=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5"
]
}

View File

@@ -1,19 +0,0 @@
{
"architecture": [
{
"platform": "windows/amd64",
"runner": ["self-hosted", "Windows", "devbox"]
}
],
"os": [
{
"distro_name": "windows",
"distro_version": "",
"compiler_name": "",
"compiler_version": "",
"image_sha": ""
}
],
"build_type": ["Debug", "Release"],
"cmake_args": ["-Dunity=OFF", "-Dunity=ON"]
}

64
.github/workflows/clang-format.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
name: clang-format
on:
push:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
jobs:
check:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
runs-on: ubuntu-24.04
container: ghcr.io/xrplf/ci/tools-rippled-clang-format
steps:
# For jobs running in containers, $GITHUB_WORKSPACE and ${{ github.workspace }} might not be the
# same directory. The actions/checkout step is *supposed* to checkout into $GITHUB_WORKSPACE and
# then add it to safe.directory (see instructions at https://github.com/actions/checkout)
# but that's apparently not happening for some container images. We can't be sure what is actually
# happening, so let's pre-emptively add both directories to safe.directory. There's a
# Github issue opened in 2022 and not resolved in 2025 https://github.com/actions/runner/issues/2058 ¯\_(ツ)_/¯
- run: |
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory ${{ github.workspace }}
- uses: actions/checkout@v4
- name: Format first-party sources
run: |
clang-format --version
find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format -i {} +
- name: Check for differences
id: assert
shell: bash
run: |
set -o pipefail
git diff --exit-code | tee "clang-format.patch"
- name: Upload patch
if: failure() && steps.assert.outcome == 'failure'
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: clang-format.patch
if-no-files-found: ignore
path: clang-format.patch
- name: What happened?
if: failure() && steps.assert.outcome == 'failure'
env:
PREAMBLE: |
If you are reading this, you are looking at a failed Github Actions
job. That means you pushed one or more files that did not conform
to the formatting specified in .clang-format. That may be because
you neglected to run 'git clang-format' or 'clang-format' before
committing, or that your version of clang-format has an
incompatibility with the one on this
machine, which is:
SUGGESTION: |
To fix it, you can do one of two things:
1. Download and apply the patch generated as an artifact of this
job to your repo, commit, and push.
2. Run 'git-clang-format --extensions cpp,h,hpp,ipp develop'
in your repo, commit, and push.
run: |
echo "${PREAMBLE}"
clang-format --version
echo "${SUGGESTION}"
exit 1

37
.github/workflows/doxygen.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: Build and publish Doxygen documentation
# To test this workflow, push your changes to your fork's `develop` branch.
on:
push:
branches:
- develop
- doxygen
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
documentation:
runs-on: ubuntu-latest
permissions:
contents: write
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
steps:
- name: checkout
uses: actions/checkout@v4
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
cmake --version
doxygen --version
env | sort
- name: build
run: |
mkdir build
cd build
cmake -Donly_docs=TRUE ..
cmake --build . --target docs --parallel $(nproc)
- name: publish
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: build/docs/html

53
.github/workflows/levelization.yml vendored Normal file
View File

@@ -0,0 +1,53 @@
name: levelization
on:
push:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
jobs:
check:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
runs-on: ubuntu-latest
env:
CLANG_VERSION: 10
steps:
- uses: actions/checkout@v4
- name: Check levelization
run: Builds/levelization/levelization.sh
- name: Check for differences
id: assert
run: |
set -o pipefail
git diff --exit-code | tee "levelization.patch"
- name: Upload patch
if: failure() && steps.assert.outcome == 'failure'
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: levelization.patch
if-no-files-found: ignore
path: levelization.patch
- name: What happened?
if: failure() && steps.assert.outcome == 'failure'
env:
MESSAGE: |
If you are reading this, you are looking at a failed Github
Actions job. That means you changed the dependency relationships
between the modules in rippled. That may be an improvement or a
regression. This check doesn't judge.
A rule of thumb, though, is that if your changes caused
something to be removed from loops.txt, that's probably an
improvement. If something was added, it's probably a regression.
To fix it, you can do one of two things:
1. Download and apply the patch generated as an artifact of this
job to your repo, commit, and push.
2. Run './Builds/levelization/levelization.sh' in your repo,
commit, and push.
See Builds/levelization/README.md for more info.
run: |
echo "${MESSAGE}"
exit 1

91
.github/workflows/libxrpl.yml vendored Normal file
View File

@@ -0,0 +1,91 @@
name: Check libXRPL compatibility with Clio
env:
CONAN_REMOTE_URL: https://conan.ripplex.io
CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }}
on:
pull_request:
paths:
- "src/libxrpl/protocol/BuildInfo.cpp"
- ".github/workflows/libxrpl.yml"
types: [opened, reopened, synchronize, ready_for_review]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
publish:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
name: Publish libXRPL
outputs:
outcome: ${{ steps.upload.outputs.outcome }}
version: ${{ steps.version.outputs.version }}
channel: ${{ steps.channel.outputs.channel }}
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
steps:
- name: Wait for essential checks to succeed
uses: lewagon/wait-on-check-action@v1.3.4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
running-workflow-name: wait-for-check-regexp
check-regexp: "(dependencies|test).*linux.*" # Ignore windows and mac tests but make sure linux passes
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 10
- name: Checkout
uses: actions/checkout@v4
- name: Generate channel
id: channel
shell: bash
run: |
echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee ${GITHUB_OUTPUT}
- name: Export new package
shell: bash
run: |
conan export . ${{ steps.channel.outputs.channel }}
- name: Add Conan remote
shell: bash
run: |
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force
echo "Listing Conan remotes."
conan remote list
- name: Parse new version
id: version
shell: bash
run: |
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
- name: Try to authenticate to Conan remote
id: remote
shell: bash
run: |
# `conan user` implicitly uses the environment variables CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
echo outcome=$(conan user --remote xrplf --password >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
- name: Upload new package
id: upload
if: (steps.remote.outputs.outcome == 'success')
shell: bash
run: |
echo "conan upload version ${{ steps.version.outputs.version }} on channel ${{ steps.channel.outputs.channel }}"
echo outcome=$(conan upload xrpl/${{ steps.version.outputs.version }}@${{ steps.channel.outputs.channel }} --remote ripple --confirm >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
notify_clio:
name: Notify Clio
runs-on: ubuntu-latest
needs: publish
env:
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
steps:
- name: Notify Clio about new version
if: (needs.publish.outputs.outcome == 'success')
shell: bash
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[version]=${{ needs.publish.outputs.version }}@${{ needs.publish.outputs.channel }}" \
-F "client_payload[pr]=${{ github.event.pull_request.number }}"

112
.github/workflows/macos.yml vendored Normal file
View File

@@ -0,0 +1,112 @@
name: macos
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
push:
# If the branches list is ever changed, be sure to change it on all
# build/test jobs (nix, macos, windows, instrumentation)
branches:
# Always build the package branches
- develop
- release
- master
# Branches that opt-in to running
- "ci/**"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# This part of Conan configuration is specific to this workflow only; we do not want
# to pollute conan/profiles directory with settings which might not work for others
env:
CONAN_REMOTE_URL: https://conan.ripplex.io
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
# This part of the Conan configuration is specific to this workflow only; we
# do not want to pollute the 'conan/profiles' directory with settings that
# might not work for other workflows.
CONAN_GLOBAL_CONF: |
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose
jobs:
test:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
strategy:
matrix:
platform:
- macos
generator:
- Ninja
configuration:
- Release
runs-on: [self-hosted, macOS, mac-runner-m1]
env:
# The `build` action requires these variables.
build_dir: .build
NUM_PROCESSORS: 12
steps:
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: install Conan
run: |
brew install conan
- name: install Ninja
if: matrix.generator == 'Ninja'
run: brew install ninja
- name: install python
run: |
if which python > /dev/null 2>&1; then
echo "Python executable exists"
else
brew install python@3.13
ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python
fi
- name: install cmake
run: |
if which cmake > /dev/null 2>&1; then
echo "cmake executable exists"
else
brew install cmake
fi
- name: install nproc
run: |
brew install coreutils
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
python --version
conan --version
cmake --version
nproc --version
echo -n "nproc returns: "
nproc
system_profiler SPHardwareDataType
sysctl -n hw.logicalcpu
clang --version
- name: configure Conan
run: |
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
conan config install conan/profiles/ -tf $(conan config home)/profiles/
conan profile show
- name: build dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: ${{ matrix.generator }}
configuration: ${{ matrix.configuration }}
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
- name: test
run: |
n=$(nproc)
echo "Using $n test jobs"
cd ${build_dir}
./rippled --unittest --unittest-jobs $n
ctest -j $n --output-on-failure

60
.github/workflows/missing-commits.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: missing-commits
on:
push:
branches:
# Only check that the branches are up to date when updating the
# relevant branches.
- develop
- release
jobs:
up_to_date:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check for missing commits
id: commits
env:
SUGGESTION: |
If you are reading this, then the commits indicated above are
missing from "develop" and/or "release". Do a reverse-merge
as soon as possible. See CONTRIBUTING.md for instructions.
run: |
set -o pipefail
# Branches ordered by how "canonical" they are. Every commit in
# one branch should be in all the branches behind it
order=( master release develop )
branches=()
for branch in "${order[@]}"
do
# Check that the branches exist so that this job will work on
# forked repos, which don't necessarily have master and
# release branches.
if git ls-remote --exit-code --heads origin \
refs/heads/${branch} > /dev/null
then
branches+=( origin/${branch} )
fi
done
prior=()
for branch in "${branches[@]}"
do
if [[ ${#prior[@]} -ne 0 ]]
then
echo "Checking ${prior[@]} for commits missing from ${branch}"
git log --oneline --no-merges "${prior[@]}" \
^$branch | tee -a "missing-commits.txt"
echo
fi
prior+=( "${branch}" )
done
if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]]
then
echo "${SUGGESTION}"
exit 1
fi

422
.github/workflows/nix.yml vendored Normal file
View File

@@ -0,0 +1,422 @@
name: nix
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
push:
# If the branches list is ever changed, be sure to change it on all
# build/test jobs (nix, macos, windows)
branches:
# Always build the package branches
- develop
- release
- master
# Branches that opt-in to running
- "ci/**"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
CONAN_REMOTE_URL: https://conan.ripplex.io
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
# This part of the Conan configuration is specific to this workflow only; we
# do not want to pollute the 'conan/profiles' directory with settings that
# might not work for other workflows.
CONAN_GLOBAL_CONF: |
core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }}
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose
# This workflow has multiple job matrixes.
# They can be considered phases because most of the matrices ("test",
# "coverage", "conan", ) depend on the first ("dependencies").
#
# The first phase has a job in the matrix for each combination of
# variables that affects dependency ABI:
# platform, compiler, and configuration.
# It creates a GitHub artifact holding the Conan profile,
# and builds and caches binaries for all the dependencies.
# If an Artifactory remote is configured, they are cached there.
# If not, they are added to the GitHub artifact.
# GitHub's "cache" action has a size limit (10 GB) that is too small
# to hold the binaries if they are built locally.
# We must use the "{upload,download}-artifact" actions instead.
#
# The remaining phases have a job in the matrix for each test
# configuration. They install dependency binaries from the cache,
# whichever was used, and build and test rippled.
#
# "instrumentation" is independent, but is included here because it also
# builds on linux in the same "on:" conditions.
jobs:
dependencies:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
- clang
configuration:
- Debug
- Release
include:
- compiler: gcc
compiler_version: 12
distro: ubuntu
codename: jammy
- compiler: clang
compiler_version: 16
distro: debian
codename: bookworm
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }}
env:
build_dir: .build
steps:
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
lsb_release -a || true
${{ matrix.compiler }}-${{ matrix.compiler_version }} --version
conan --version
cmake --version
env | sort
- name: configure Conan
run: |
echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf
conan config install conan/profiles/ -tf $(conan config home)/profiles/
conan profile show
- name: archive profile
# Create this archive before dependencies are added to the local cache.
run: tar -czf conan.tar.gz -C ${CONAN_HOME} .
- name: build dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ matrix.configuration }}
- name: upload archive
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
path: conan.tar.gz
if-no-files-found: error
test:
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
- clang
configuration:
- Debug
- Release
include:
- compiler: gcc
compiler_version: 12
distro: ubuntu
codename: jammy
- compiler: clang
compiler_version: 16
distro: debian
codename: bookworm
cmake-args:
-
- "-Dunity=ON"
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/ci/${{ matrix.distro }}-${{ matrix.codename }}:${{ matrix.compiler }}-${{ matrix.compiler_version }}
env:
build_dir: .build
steps:
- name: download cache
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
- name: extract cache
run: |
mkdir -p ${CONAN_HOME}
tar -xzf conan.tar.gz -C ${CONAN_HOME}
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: Ninja
configuration: ${{ matrix.configuration }}
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
- name: check linking
run: |
cd ${build_dir}
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: test
run: |
cd ${build_dir}
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure
reference-fee-test:
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
configuration:
- Debug
cmake-args:
- "-DUNIT_TEST_REFERENCE_FEE=200"
- "-DUNIT_TEST_REFERENCE_FEE=1000"
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
env:
build_dir: .build
steps:
- name: download cache
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
- name: extract cache
run: |
mkdir -p ${CONAN_HOME}
tar -xzf conan.tar.gz -C ${CONAN_HOME}
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: Ninja
configuration: ${{ matrix.configuration }}
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
- name: test
run: |
cd ${build_dir}
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure
coverage:
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
configuration:
- Debug
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
env:
build_dir: .build
steps:
- name: download cache
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
- name: extract cache
run: |
mkdir -p ${CONAN_HOME}
tar -xzf conan.tar.gz -C ${CONAN_HOME}
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
gcovr --version
env | sort
ls ${CONAN_HOME}
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: Ninja
configuration: ${{ matrix.configuration }}
cmake-args: >-
-Dassert=TRUE
-Dwerr=TRUE
-Dcoverage=ON
-Dcoverage_format=xml
-DCODE_COVERAGE_VERBOSE=ON
-DCMAKE_CXX_FLAGS="-O0"
-DCMAKE_C_FLAGS="-O0"
cmake-target: coverage
- name: move coverage report
shell: bash
run: |
mv "${build_dir}/coverage.xml" ./
- name: archive coverage report
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: coverage.xml
path: coverage.xml
retention-days: 30
- name: upload coverage report
uses: wandalen/wretry.action@v1.4.10
with:
action: codecov/codecov-action@v4.5.0
with: |
files: coverage.xml
fail_ci_if_error: true
disable_search: true
verbose: true
plugin: noop
token: ${{ secrets.CODECOV_TOKEN }}
attempt_limit: 5
attempt_delay: 210000 # in milliseconds
conan:
needs: dependencies
runs-on: [self-hosted, heavy]
container:
image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
env:
build_dir: .build
platform: linux
compiler: gcc
compiler_version: 12
configuration: Release
steps:
- name: download cache
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
with:
name: ${{ env.platform }}-${{ env.compiler }}-${{ env.configuration }}
- name: extract cache
run: |
mkdir -p ${CONAN_HOME}
tar -xzf conan.tar.gz -C ${CONAN_HOME}
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ env.configuration }}
- name: export
run: |
conan export . --version head
- name: build
run: |
cd tests/conan
mkdir ${build_dir} && cd ${build_dir}
conan install .. \
--settings:all build_type=${configuration} \
--output-folder . \
--build missing
cmake .. \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=./build/${configuration}/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${configuration}
cmake --build .
./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
instrumentation-build:
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/ci/debian-bookworm:clang-16
env:
build_dir: .build
steps:
- name: download cache
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
with:
name: linux-clang-Debug
- name: extract cache
run: |
mkdir -p ${CONAN_HOME}
tar -xzf conan.tar.gz -C ${CONAN_HOME}
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
env | sort
ls ${CONAN_HOME}
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: dependencies
uses: ./.github/actions/dependencies
with:
configuration: Debug
- name: prepare environment
run: |
mkdir -p ${build_dir}
echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV
echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV
- name: build with instrumentation
run: |
cd ${BUILD_DIR}
cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \
-Dvoidstar=ON \
-Dtests=ON \
-Dxrpld=ON \
-DCMAKE_BUILD_TYPE=Debug \
-DSECP256K1_BUILD_BENCHMARK=OFF \
-DSECP256K1_BUILD_TESTS=OFF \
-DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \
-DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake
cmake --build . --parallel $(nproc)
- name: verify instrumentation enabled
run: |
cd ${BUILD_DIR}
./rippled --version | grep libvoidstar
- name: run unit tests
run: |
cd ${BUILD_DIR}
./rippled -u --unittest-jobs $(( $(nproc)/4 ))
ctest -j $(nproc) --output-on-failure

View File

@@ -1,140 +0,0 @@
# This workflow runs all workflows to check, build and test the project on
# various Linux flavors, as well as on MacOS and Windows, on every push to a
# user branch. However, it will not run if the pull request is a draft unless it
# has the 'DraftRunCI' label.
name: PR
on:
merge_group:
types:
- checks_requested
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# This job determines whether the rest of the workflow should run. It runs
# when the PR is not a draft (which should also cover merge-group) or
# has the 'DraftRunCI' label.
should-run:
if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Determine changed files
# This step checks whether any files have changed that should
# cause the next jobs to run. We do it this way rather than
# using `paths` in the `on:` section, because all required
# checks must pass, even for changes that do not modify anything
# that affects those checks. We would therefore like to make the
# checks required only if the job runs, but GitHub does not
# support that directly. By always executing the workflow on new
# commits and by using the changed-files action below, we ensure
# that Github considers any skipped jobs to have passed, and in
# turn the required checks as well.
id: changes
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: |
# These paths are unique to `on-pr.yml`.
.github/scripts/levelization/**
.github/scripts/rename/**
.github/workflows/reusable-check-levelization.yml
.github/workflows/reusable-check-rename.yml
.github/workflows/reusable-notify-clio.yml
.github/workflows/on-pr.yml
# Keep the paths below in sync with those in `on-trigger.yml`.
.github/actions/build-deps/**
.github/actions/build-test/**
.github/actions/setup-conan/**
.github/scripts/strategy-matrix/**
.github/workflows/reusable-build.yml
.github/workflows/reusable-build-test-config.yml
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.codecov.yml
cmake/**
conan/**
external/**
include/**
src/**
tests/**
CMakeLists.txt
conanfile.py
conan.lock
- name: Check whether to run
# This step determines whether the rest of the workflow should
# run. The rest of the workflow will run if this job runs AND at
# least one of:
# * Any of the files checked in the `changes` step were modified
# * The PR is NOT a draft and is labeled "Ready to merge"
# * The workflow is running from the merge queue
id: go
env:
FILES: ${{ steps.changes.outputs.any_changed }}
DRAFT: ${{ github.event.pull_request.draft }}
READY: ${{ contains(github.event.pull_request.labels.*.name, 'Ready to merge') }}
MERGE: ${{ github.event_name == 'merge_group' }}
run: |
echo "go=${{ (env.DRAFT != 'true' && env.READY == 'true') || env.FILES == 'true' || env.MERGE == 'true' }}" >> "${GITHUB_OUTPUT}"
cat "${GITHUB_OUTPUT}"
outputs:
go: ${{ steps.go.outputs.go == 'true' }}
check-levelization:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-levelization.yml
check-rename:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-rename.yml
build-test:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-build-test.yml
strategy:
fail-fast: false
matrix:
os: [linux, macos, windows]
with:
os: ${{ matrix.os }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
notify-clio:
needs:
- should-run
- build-test
if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-notify-clio.yml
secrets:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
passed:
if: failure() || cancelled()
needs:
- build-test
- check-levelization
runs-on: ubuntu-latest
steps:
- name: Fail
run: false

View File

@@ -1,80 +0,0 @@
# This workflow runs all workflows to build the dependencies required for the
# project on various Linux flavors, as well as on MacOS and Windows, on a
# scheduled basis, on merge into the 'develop', 'release', or 'master' branches,
# or manually. The missing commits check is only run when the code is merged
# into the 'develop' or 'release' branches, and the documentation is built when
# the code is merged into the 'develop' branch.
name: Trigger
on:
push:
branches:
- "develop"
- "release*"
- "master"
paths:
# These paths are unique to `on-trigger.yml`.
- ".github/workflows/reusable-check-missing-commits.yml"
- ".github/workflows/on-trigger.yml"
- ".github/workflows/publish-docs.yml"
# Keep the paths below in sync with those in `on-pr.yml`.
- ".github/actions/build-deps/**"
- ".github/actions/build-test/**"
- ".github/actions/setup-conan/**"
- ".github/scripts/strategy-matrix/**"
- ".github/workflows/reusable-build.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".codecov.yml"
- "cmake/**"
- "conan/**"
- "external/**"
- "include/**"
- "src/**"
- "tests/**"
- "CMakeLists.txt"
- "conanfile.py"
- "conan.lock"
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
# will force all dependencies to be rebuilt, which is useful to verify that
# all dependencies can be built successfully. Only the dependencies that
# are actually missing from the remote will be uploaded.
schedule:
- cron: "32 6 * * 1-5"
# Run when manually triggered via the GitHub UI or API.
workflow_dispatch:
concurrency:
# When a PR is merged into the develop branch it will be assigned a unique
# group identifier, so execution will continue even if another PR is merged
# while it is still running. In all other cases the group identifier is shared
# per branch, so that any in-progress runs are cancelled when a new commit is
# pushed.
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
check-missing-commits:
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-check-missing-commits.yml
build-test:
uses: ./.github/workflows/reusable-build-test.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
os: [linux, macos, windows]
with:
os: ${{ matrix.os }}
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,15 +0,0 @@
name: Run pre-commit hooks
on:
pull_request:
push:
branches: [develop, release, master]
workflow_dispatch:
jobs:
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
with:
runs_on: ubuntu-latest
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }'

View File

@@ -1,72 +0,0 @@
# This workflow builds the documentation for the repository, and publishes it to
# GitHub Pages when changes are merged into the default branch.
name: Build and publish documentation
on:
push:
paths:
- ".github/workflows/publish-docs.yml"
- "*.md"
- "**/*.md"
- "docs/**"
- "include/**"
- "src/libxrpl/**"
- "src/xrpld/**"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
env:
BUILD_DIR: .build
NPROC_SUBTRACT: 2
jobs:
publish:
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Check configuration
run: |
echo 'Checking path.'
echo ${PATH} | tr ':' '\n'
echo 'Checking environment variables.'
env | sort
echo 'Checking CMake version.'
cmake --version
echo 'Checking Doxygen version.'
doxygen --version
- name: Build documentation
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
mkdir -p "${BUILD_DIR}"
cd "${BUILD_DIR}"
cmake -Donly_docs=ON ..
cmake --build . --target docs --parallel ${BUILD_NPROC}
- name: Publish documentation
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ${{ env.BUILD_DIR }}/docs/html

View File

@@ -1,77 +0,0 @@
name: Build and test configuration
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_only:
description: 'Whether to only build or to build and test the code ("true", "false").'
required: true
type: boolean
build_type:
description: 'The build type to use ("Debug", "Release").'
type: string
required: true
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
type: string
default: ""
cmake_target:
description: "The CMake target to build."
type: string
required: true
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The configuration string (used for naming artifacts and such)."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: false
type: number
default: 2
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
build:
uses: ./.github/workflows/reusable-build.yml
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ inputs.build_type }}
cmake_args: ${{ inputs.cmake_args }}
cmake_target: ${{ inputs.cmake_target }}
runs_on: ${{ inputs.runs_on }}
image: ${{ inputs.image }}
config_name: ${{ inputs.config_name }}
nproc_subtract: ${{ inputs.nproc_subtract }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
test:
needs: build
uses: ./.github/workflows/reusable-test.yml
with:
run_tests: ${{ !inputs.build_only }}
verify_voidstar: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
runs_on: ${{ inputs.runs_on }}
image: ${{ inputs.image }}
config_name: ${{ inputs.config_name }}
nproc_subtract: ${{ inputs.nproc_subtract }}

View File

@@ -1,58 +0,0 @@
# This workflow builds and tests the binary for various configurations.
name: Build and test
# This workflow can only be triggered by other workflows. Note that the
# workflow_call event does not support the 'choice' input type, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
# so we use 'string' instead.
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: true
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary for each configuration.
build-test-config:
needs:
- generate-matrix
uses: ./.github/workflows/reusable-build-test-config.yml
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
with:
build_dir: ${{ inputs.build_dir }}
build_only: ${{ matrix.build_only }}
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
config_name: ${{ matrix.config_name }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,154 +0,0 @@
name: Build rippled
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: true
type: string
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
type: string
cmake_args:
description: "Additional arguments to pass to CMake."
required: true
type: string
cmake_target:
description: "The CMake target to build."
required: true
type: string
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The name of the configuration."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: true
type: number
secrets:
CODECOV_TOKEN:
description: "The Codecov token to use for uploading coverage reports."
required: true
defaults:
run:
shell: bash
jobs:
build:
name: Build ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 60
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ inputs.build_type }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Configure CMake
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_ARGS: ${{ inputs.cmake_args }}
run: |
cmake \
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
${CMAKE_ARGS} \
..
- name: Build the binary
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
BUILD_TYPE: ${{ inputs.build_type }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
cmake \
--build . \
--config "${BUILD_TYPE}" \
--parallel ${BUILD_NPROC} \
--target "${CMAKE_TARGET}"
- name: Put built binaries in one location
shell: bash
working-directory: ${{ inputs.build_dir }}
env:
BUILD_TYPE_DIR: ${{ runner.os == 'Windows' && inputs.build_type || '' }}
CMAKE_TARGET: ${{ inputs.cmake_target }}
run: |
mkdir -p ./binaries/doctest/
cp ./${BUILD_TYPE_DIR}/rippled* ./binaries/
if [ "${CMAKE_TARGET}" != 'coverage' ]; then
cp ./src/tests/libxrpl/${BUILD_TYPE_DIR}/xrpl.test.* ./binaries/doctest/
fi
- name: Upload rippled artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
env:
BUILD_DIR: ${{ inputs.build_dir }}
with:
name: rippled-${{ inputs.config_name }}
path: ${{ env.BUILD_DIR }}/binaries/
retention-days: 3
if-no-files-found: error
- name: Upload coverage report
if: ${{ github.repository_owner == 'XRPLF' && inputs.cmake_target == 'coverage' }}
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
disable_search: true
disable_telem: true
fail_ci_if_error: true
files: ${{ inputs.build_dir }}/coverage.xml
plugins: noop
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View File

@@ -1,46 +0,0 @@
# This workflow checks if the dependencies between the modules are correctly
# indexed.
name: Check levelization
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-levelization
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
levelization:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Check levelization
run: .github/scripts/levelization/generate.sh
- name: Check for differences
env:
MESSAGE: |
The dependency relationships between the modules in rippled have
changed, which may be an improvement or a regression.
A rule of thumb is that if your changes caused something to be
removed from loops.txt, it's probably an improvement, while if
something was added, it's probably a regression.
Run '.github/scripts/levelization/generate.sh' in your repo, commit
and push the changes. See .github/scripts/levelization/README.md for
more info.
run: |
DIFF=$(git status --porcelain)
if [ -n "${DIFF}" ]; then
# Print the differences to give the contributor a hint about what to
# expect when running levelization on their own machine.
git diff
echo "${MESSAGE}"
exit 1
fi

View File

@@ -1,62 +0,0 @@
# This workflow checks that all commits in the "master" branch are also in the
# "release" and "develop" branches, and that all commits in the "release" branch
# are also in the "develop" branch.
name: Check for missing commits
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-missing-commits
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
fetch-depth: 0
- name: Check for missing commits
env:
MESSAGE: |
If you are reading this, then the commits indicated above are missing
from the "develop" and/or "release" branch. Do a reverse-merge as soon
as possible. See CONTRIBUTING.md for instructions.
run: |
set -o pipefail
# Branches are ordered by how "canonical" they are. Every commit in one
# branch should be in all the branches behind it.
order=(master release develop)
branches=()
for branch in "${order[@]}"; do
# Check that the branches exist so that this job will work on forked
# repos, which don't necessarily have master and release branches.
echo "Checking if ${branch} exists."
if git ls-remote --exit-code --heads origin \
refs/heads/${branch} > /dev/null; then
branches+=(origin/${branch})
fi
done
prior=()
for branch in "${branches[@]}"; do
if [[ ${#prior[@]} -ne 0 ]]; then
echo "Checking ${prior[@]} for commits missing from ${branch}."
git log --oneline --no-merges "${prior[@]}" \
^$branch | tee -a "missing-commits.txt"
echo
fi
prior+=("${branch}")
done
if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then
echo "${MESSAGE}"
exit 1
fi

View File

@@ -1,46 +0,0 @@
# This workflow checks if the codebase is properly renamed, see more info in
# .github/scripts/rename/README.md.
name: Check rename
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-rename
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
rename:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Check definitions
run: .github/scripts/rename/definitions.sh .
- name: Check copyright notices
run: .github/scripts/rename/copyright.sh .
- name: Check CMake configs
run: .github/scripts/rename/cmake.sh .
- name: Check for differences
env:
MESSAGE: |
One or more files contain changes that do not adhere to new naming
conventions.
Run the scripts in '.github/scripts/rename/' in your repo, commit
and push the changes. See .github/scripts/rename/README.md for
more info.
run: |
DIFF=$(git status --porcelain)
if [ -n "${DIFF}" ]; then
# Print the differences to give the contributor a hint about what to
# expect when running the renaming scripts on their own machine.
git diff
echo "${MESSAGE}"
exit 1
fi

View File

@@ -1,91 +0,0 @@
# This workflow exports the built libxrpl package to the Conan remote on a
# a channel named after the pull request, and notifies the Clio repository about
# the new version so it can check for compatibility.
name: Notify Clio
# This workflow can only be triggered by other workflows.
on:
workflow_call:
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
type: string
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
type: string
default: https://conan.ripplex.io
secrets:
clio_notify_token:
description: "The GitHub token to notify Clio about new versions."
required: true
conan_remote_username:
description: "The username for logging into the Conan remote."
required: true
conan_remote_password:
description: "The password for logging into the Conan remote."
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-clio
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
upload:
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
runs-on: ubuntu-latest
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Generate outputs
id: generate
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
echo 'Generating user and channel.'
echo "user=clio" >> "${GITHUB_OUTPUT}"
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
echo 'Extracting version.'
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
- name: Calculate conan reference
id: conan_ref
run: |
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
- name: Set up Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
- name: Log into Conan remote
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package
env:
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
run: |
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
outputs:
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
notify:
needs: upload
runs-on: ubuntu-latest
steps:
- name: Notify Clio
env:
GH_TOKEN: ${{ secrets.clio_notify_token }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
-F "client_payload[pr_url]=${PR_URL}"

View File

@@ -1,41 +0,0 @@
name: Generate strategy matrix
on:
workflow_call:
inputs:
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: false
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
outputs:
matrix:
description: "The generated strategy matrix."
value: ${{ jobs.generate-matrix.outputs.matrix }}
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
env:
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"

View File

@@ -1,111 +0,0 @@
name: Test rippled
on:
workflow_call:
inputs:
verify_voidstar:
description: "Whether to verify the presence of voidstar instrumentation."
required: true
type: boolean
run_tests:
description: "Whether to run unit tests"
required: true
type: boolean
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: true
type: string
config_name:
description: "The name of the configuration."
required: true
type: string
nproc_subtract:
description: "The number of processors to subtract when calculating parallelism."
required: true
type: number
jobs:
test:
name: Test ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
timeout-minutes: 30
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Download rippled artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: rippled-${{ inputs.config_name }}
- name: Make binary executable (Linux and macOS)
shell: bash
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
run: |
chmod +x ./rippled
- name: Check linking (Linux)
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
ldd ./rippled
if [ "$(ldd ./rippled | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
echo 'The binary is statically linked.'
else
echo 'The binary is dynamically linked.'
exit 1
fi
- name: Verifying presence of instrumentation
if: ${{ inputs.verify_voidstar }}
shell: bash
run: |
./rippled --version | grep libvoidstar
- name: Run the embedded tests
if: ${{ inputs.run_tests }}
shell: bash
env:
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
run: |
./rippled --unittest --unittest-jobs ${BUILD_NPROC}
- name: Run the separate tests
if: ${{ inputs.run_tests }}
env:
EXT: ${{ runner.os == 'Windows' && '.exe' || '' }}
shell: bash
run: |
for test_file in ./doctest/*${EXT}; do
echo "Executing $test_file"
chmod +x "$test_file"
if [[ "${{ runner.os }}" == "Windows" && "$test_file" == "./doctest/xrpl.test.net.exe" ]]; then
echo "Skipping $test_file on Windows"
else
"$test_file"
fi
done
- name: Debug failure (Linux)
if: ${{ failure() && runner.os == 'Linux' && inputs.run_tests }}
shell: bash
run: |
echo "IPv4 local port range:"
cat /proc/sys/net/ipv4/ip_local_port_range
echo "Netstat:"
netstat -an

View File

@@ -1,107 +0,0 @@
name: Upload Conan Dependencies
on:
schedule:
- cron: "0 3 * * 2-6"
workflow_dispatch:
inputs:
force_source_build:
description: "Force source build of all dependencies"
required: false
default: false
type: boolean
force_upload:
description: "Force upload of all dependencies"
required: false
default: false
type: boolean
pull_request:
branches: [develop]
paths:
# This allows testing changes to the upload workflow in a PR
- .github/workflows/upload-conan-deps.yml
push:
branches: [develop]
paths:
- .github/workflows/upload-conan-deps.yml
- .github/workflows/reusable-strategy-matrix.yml
- .github/actions/build-deps/action.yml
- .github/actions/setup-conan/action.yml
- ".github/scripts/strategy-matrix/**"
- conanfile.py
- conan.lock
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
NPROC_SUBTRACT: 2
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
# Build and upload the dependencies for each configuration.
run-upload-conan-deps:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }}
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
with:
disable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
id: nproc
with:
subtract: ${{ env.NPROC_SUBTRACT }}
- name: Setup Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: .build
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ matrix.build_type }}
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
# Set the verbosity to "quiet" for Windows to avoid an excessive
# amount of logs. For other OSes, the "verbose" logs are more useful.
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
- name: Log into Conan remote
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
env:
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}

106
.github/workflows/windows.yml vendored Normal file
View File

@@ -0,0 +1,106 @@
name: windows
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
push:
# If the branches list is ever changed, be sure to change it on all
# build/test jobs (nix, macos, windows, instrumentation)
branches:
# Always build the package branches
- develop
- release
- master
# Branches that opt-in to running
- "ci/**"
# https://docs.github.com/en/actions/using-jobs/using-concurrency
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
CONAN_REMOTE_URL: https://conan.ripplex.io
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
# This part of the Conan configuration is specific to this workflow only; we
# do not want to pollute the 'conan/profiles' directory with settings that
# might not work for other workflows.
CONAN_GLOBAL_CONF: |
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
tools.build:jobs=24
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose
jobs:
test:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
strategy:
fail-fast: false
matrix:
version:
- generator: Visual Studio 17 2022
runs-on: windows-2022
configuration:
- type: Release
tests: true
- type: Debug
# Skip running unit tests on debug builds, because they
# take an unreasonable amount of time
tests: false
runtime: d
runs-on: ${{ matrix.version.runs-on }}
env:
build_dir: .build
steps:
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: choose Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065
with:
python-version: 3.13
- name: learn Python cache directory
id: pip-cache
shell: bash
run: |
python -m pip install --upgrade pip
echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT}
- name: restore Python cache directory
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }}
- name: install Conan
run: pip install wheel conan
- name: check environment
run: |
dir env:
$env:PATH -split ';'
python --version
conan --version
cmake --version
- name: configure Conan
shell: bash
run: |
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
conan config install conan/profiles/ -tf $(conan config home)/profiles/
conan profile show
- name: build dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ matrix.configuration.type }}
- name: build
uses: ./.github/actions/build
with:
generator: "${{ matrix.version.generator }}"
configuration: ${{ matrix.configuration.type }}
# Hard code for now. Move to the matrix if varied options are needed
cmake-args: "-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON"
cmake-target: install
- name: test
shell: bash
if: ${{ matrix.configuration.tests }}
run: |
cd ${build_dir}/${{ matrix.configuration.type }}
./rippled --unittest --unittest-jobs $(nproc)
ctest -j $(nproc) --output-on-failure

9
.gitignore vendored
View File

@@ -37,9 +37,10 @@ Release/*.*
*.gcov
# Levelization checking
.github/scripts/levelization/results/*
!.github/scripts/levelization/results/loops.txt
!.github/scripts/levelization/results/ordering.txt
Builds/levelization/results/rawincludes.txt
Builds/levelization/results/paths.txt
Builds/levelization/results/includes/
Builds/levelization/results/includedby/
# Ignore tmp directory.
tmp
@@ -110,4 +111,4 @@ bld.rippled/
.vscode
# Suggested in-tree build directory
/.build*/
/.build/

View File

@@ -1,39 +1,6 @@
# To run pre-commit hooks, first install pre-commit:
# - `pip install pre-commit==${PRE_COMMIT_VERSION}`
#
# Then, run the following command to install the git hook scripts:
# - `pre-commit install`
# You can run all configured hooks against all files with:
# - `pre-commit run --all-files`
# To manually run a specific hook, use:
# - `pre-commit run <hook_id> --all-files`
# To run the hooks against only the staged files, use:
# - `pre-commit run`
# .pre-commit-config.yaml
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: mixed-line-ending
- id: check-merge-conflict
args: [--assume-in-merge]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 7d85583be209cb547946c82fbe51f4bc5dd1d017 # frozen: v18.1.8
rev: v18.1.8
hooks:
- id: clang-format
args: [--style=file]
"types_or": [c++, c, proto]
- repo: https://github.com/rbubley/mirrors-prettier
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
hooks:
- id: prettier
exclude: |
(?x)^(
external/.*|
.github/scripts/levelization/results/.*\.txt|
conan\.lock
)$

View File

@@ -1 +0,0 @@
external

View File

@@ -39,12 +39,17 @@ found here](./docs/build/environment.md).
- [Python 3.11](https://www.python.org/downloads/), or higher
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
- [CMake 3.22](https://cmake.org/download/), or higher
- [CMake 3.22](https://cmake.org/download/)[^2], or higher
[^1]:
It is possible to build with Conan 1.60+, but the instructions are
significantly different, which is why we are not recommending it.
[^2]:
CMake 4 is not yet supported by all dependencies required by this project.
If you are affected by this issue, follow [conan workaround for cmake
4](#workaround-for-cmake-4)
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
The [minimum compiler versions][2] required are:
@@ -127,7 +132,7 @@ higher index than the default Conan Center remote, so it is consulted first. You
can do this by running:
```bash
conan remote add --index 0 xrplf https://conan.ripplex.io
conan remote add --index 0 xrplf "https://conan.ripplex.io"
```
Alternatively, you can pull the patched recipes into the repository and use them
@@ -153,10 +158,6 @@ updated dependencies with the newer version. However, if we switch to a newer
version that no longer requires a patch, no action is required on your part, as
the new recipe will be automatically pulled from the official Conan Center.
> [!NOTE]
> You might need to add `--lockfile=""` to your `conan install` command
> to avoid automatic use of the existing `conan.lock` file when you run `conan export` manually on your machine
### Conan profile tweaks
#### Missing compiler version
@@ -277,6 +278,21 @@ sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
```
#### Workaround for CMake 4
If your system CMake is version 4 rather than 3, you may have to configure Conan
profile to use CMake version 3 for dependencies, by adding the following two
lines to your profile:
```text
[tool_requires]
!cmake/*: cmake/[>=3 <4]
```
This will force Conan to download and use a locally cached CMake 3 version, and
is needed because some of the dependencies used by this project do not support
CMake 4.
#### Clang workaround for grpc
If your compiler is clang, version 19 or later, or apple-clang, version 17 or
@@ -450,33 +466,6 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
The location of `rippled` binary in your build directory depends on your
CMake generator. Pass `--help` to see the rest of the command line options.
#### Conan lockfile
To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
It is implicitly used when running `conan` commands, you don't need to specify it.
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
> [!NOTE]
> Conan uses local cache by default when creating a lockfile.
>
> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile.
To create a new lockfile, run the following commands in the repository root:
```bash
conan remove '*' --confirm
rm conan.lock
# This ensure that xrplf remote is the first to be consulted
conan remote add --force --index 0 xrplf https://conan.ripplex.io
conan lock create . -o '&:jemalloc=True' -o '&:rocksdb=True'
```
> [!NOTE]
> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all <PROFILE>`.
## Coverage report
The coverage report is intended for developers using compilers GCC
@@ -575,13 +564,7 @@ After any updates or changes to dependencies, you may need to do the following:
```
3. Re-run [conan export](#patched-recipes) if needed.
4. [Regenerate lockfile](#conan-lockfile).
5. Re-run [conan install](#build-and-test).
#### ERROR: Package not resolved
If you're seeing an error like `ERROR: Package 'snappy/1.1.10' not resolved: Unable to find 'snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246' in remotes.`,
please add `xrplf` remote or re-run `conan export` for [patched recipes](#patched-recipes).
4. Re-run [conan install](#build-and-test).
### `protobuf/port_def.inc` file not found
@@ -590,7 +573,7 @@ you might have generated CMake files for a different `build_type` than the
`CMAKE_BUILD_TYPE` you passed to Conan.
```
/rippled/.build/pb-xrpl.libpb/xrpl/proto/xrpl.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found
/rippled/.build/pb-xrpl.libpb/xrpl/proto/ripple.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found
10 | #include <google/protobuf/port_def.inc>
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1 error generated.

View File

@@ -50,7 +50,7 @@ that `test` code should _never_ be included in `ripple` code.)
## Validation
The [levelization](generate.sh) script takes no parameters,
The [levelization.sh](levelization.sh) script takes no parameters,
reads no environment variables, and can be run from any directory,
as long as it is in the expected location in the rippled repo.
It can be run at any time from within a checked out repo, and will
@@ -72,15 +72,15 @@ It generates many files of [results](results):
desired as described above. In a perfect repo, this file will be
empty.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../.github/workflows/levelization.yml) to validate
that nothing changed.
- [`ordering.txt`](results/ordering.txt): A list showing relationships
between modules where there are no loops as they actually exist, as
opposed to how they are desired as described above.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
Github workflow](../../.github/workflows/levelization.yml) to validate
that nothing changed.
- [`levelization.yml`](../../workflows/reusable-check-levelization.yml)
- [`levelization.yml`](../../.github/workflows/levelization.yml)
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or
@@ -111,4 +111,4 @@ get those details locally.
1. Run `levelization.sh`
2. Grep the modules in `paths.txt`.
- For example, if a cycle is found `A ~= B`, simply `grep -w
A .github/scripts/levelization/results/paths.txt | grep -w B`
A Builds/levelization/results/paths.txt | grep -w B`

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# Usage: generate.sh
# Usage: levelization.sh
# This script takes no parameters, reads no environment variables,
# and can be run from any directory, as long as it is in the expected
# location in the repo.
@@ -19,7 +19,7 @@ export LANG=C
rm -rfv results
mkdir results
includes="$( pwd )/results/rawincludes.txt"
pushd ../../..
pushd ../..
echo Raw includes:
grep -r '^[ ]*#include.*/.*\.h' include src | \
grep -v boost | tee ${includes}

View File

@@ -7,6 +7,9 @@ Loop: test.jtx test.unit_test
Loop: xrpld.app xrpld.core
xrpld.app > xrpld.core
Loop: xrpld.app xrpld.ledger
xrpld.app > xrpld.ledger
Loop: xrpld.app xrpld.overlay
xrpld.overlay > xrpld.app
@@ -17,7 +20,7 @@ Loop: xrpld.app xrpld.rpc
xrpld.rpc > xrpld.app
Loop: xrpld.app xrpld.shamap
xrpld.shamap ~= xrpld.app
xrpld.app > xrpld.shamap
Loop: xrpld.core xrpld.perflog
xrpld.perflog == xrpld.core

View File

@@ -2,16 +2,8 @@ libxrpl.basics > xrpl.basics
libxrpl.crypto > xrpl.basics
libxrpl.json > xrpl.basics
libxrpl.json > xrpl.json
libxrpl.ledger > xrpl.basics
libxrpl.ledger > xrpl.json
libxrpl.ledger > xrpl.ledger
libxrpl.ledger > xrpl.protocol
libxrpl.net > xrpl.basics
libxrpl.net > xrpl.net
libxrpl.nodestore > xrpl.basics
libxrpl.nodestore > xrpl.json
libxrpl.nodestore > xrpl.nodestore
libxrpl.nodestore > xrpl.protocol
libxrpl.protocol > xrpl.basics
libxrpl.protocol > xrpl.json
libxrpl.protocol > xrpl.protocol
@@ -22,9 +14,6 @@ libxrpl.server > xrpl.basics
libxrpl.server > xrpl.json
libxrpl.server > xrpl.protocol
libxrpl.server > xrpl.server
libxrpl.shamap > xrpl.basics
libxrpl.shamap > xrpl.protocol
libxrpl.shamap > xrpl.shamap
test.app > test.jtx
test.app > test.rpc
test.app > test.toplevel
@@ -32,11 +21,11 @@ test.app > test.unit_test
test.app > xrpl.basics
test.app > xrpld.app
test.app > xrpld.core
test.app > xrpld.ledger
test.app > xrpld.nodestore
test.app > xrpld.overlay
test.app > xrpld.rpc
test.app > xrpl.json
test.app > xrpl.ledger
test.app > xrpl.nodestore
test.app > xrpl.protocol
test.app > xrpl.resource
test.basics > test.jtx
@@ -55,8 +44,8 @@ test.consensus > test.unit_test
test.consensus > xrpl.basics
test.consensus > xrpld.app
test.consensus > xrpld.consensus
test.consensus > xrpld.ledger
test.consensus > xrpl.json
test.consensus > xrpl.ledger
test.core > test.jtx
test.core > test.toplevel
test.core > test.unit_test
@@ -74,9 +63,9 @@ test.json > xrpl.json
test.jtx > xrpl.basics
test.jtx > xrpld.app
test.jtx > xrpld.core
test.jtx > xrpld.ledger
test.jtx > xrpld.rpc
test.jtx > xrpl.json
test.jtx > xrpl.ledger
test.jtx > xrpl.net
test.jtx > xrpl.protocol
test.jtx > xrpl.resource
@@ -86,14 +75,15 @@ test.ledger > test.toplevel
test.ledger > xrpl.basics
test.ledger > xrpld.app
test.ledger > xrpld.core
test.ledger > xrpl.ledger
test.ledger > xrpld.ledger
test.ledger > xrpl.protocol
test.nodestore > test.jtx
test.nodestore > test.toplevel
test.nodestore > test.unit_test
test.nodestore > xrpl.basics
test.nodestore > xrpld.core
test.nodestore > xrpl.nodestore
test.nodestore > xrpld.nodestore
test.nodestore > xrpld.unity
test.overlay > test.jtx
test.overlay > test.toplevel
test.overlay > test.unit_test
@@ -101,8 +91,8 @@ test.overlay > xrpl.basics
test.overlay > xrpld.app
test.overlay > xrpld.overlay
test.overlay > xrpld.peerfinder
test.overlay > xrpld.shamap
test.overlay > xrpl.protocol
test.overlay > xrpl.shamap
test.peerfinder > test.beast
test.peerfinder > test.unit_test
test.peerfinder > xrpl.basics
@@ -137,21 +127,15 @@ test.server > xrpl.json
test.server > xrpl.server
test.shamap > test.unit_test
test.shamap > xrpl.basics
test.shamap > xrpl.nodestore
test.shamap > xrpld.nodestore
test.shamap > xrpld.shamap
test.shamap > xrpl.protocol
test.shamap > xrpl.shamap
test.toplevel > test.csf
test.toplevel > xrpl.json
test.unit_test > xrpl.basics
tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net
xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol
xrpl.net > xrpl.basics
xrpl.nodestore > xrpl.basics
xrpl.nodestore > xrpl.protocol
xrpl.protocol > xrpl.basics
xrpl.protocol > xrpl.json
xrpl.resource > xrpl.basics
@@ -160,21 +144,16 @@ xrpl.resource > xrpl.protocol
xrpl.server > xrpl.basics
xrpl.server > xrpl.json
xrpl.server > xrpl.protocol
xrpl.shamap > xrpl.basics
xrpl.shamap > xrpl.nodestore
xrpl.shamap > xrpl.protocol
xrpld.app > test.unit_test
xrpld.app > xrpl.basics
xrpld.app > xrpld.conditions
xrpld.app > xrpld.consensus
xrpld.app > xrpld.nodestore
xrpld.app > xrpld.perflog
xrpld.app > xrpl.json
xrpld.app > xrpl.ledger
xrpld.app > xrpl.net
xrpld.app > xrpl.nodestore
xrpld.app > xrpl.protocol
xrpld.app > xrpl.resource
xrpld.app > xrpl.shamap
xrpld.conditions > xrpl.basics
xrpld.conditions > xrpl.protocol
xrpld.consensus > xrpl.basics
@@ -184,6 +163,14 @@ xrpld.core > xrpl.basics
xrpld.core > xrpl.json
xrpld.core > xrpl.net
xrpld.core > xrpl.protocol
xrpld.ledger > xrpl.basics
xrpld.ledger > xrpl.json
xrpld.ledger > xrpl.protocol
xrpld.nodestore > xrpl.basics
xrpld.nodestore > xrpld.core
xrpld.nodestore > xrpld.unity
xrpld.nodestore > xrpl.json
xrpld.nodestore > xrpl.protocol
xrpld.overlay > xrpl.basics
xrpld.overlay > xrpld.core
xrpld.overlay > xrpld.peerfinder
@@ -199,11 +186,13 @@ xrpld.perflog > xrpl.basics
xrpld.perflog > xrpl.json
xrpld.rpc > xrpl.basics
xrpld.rpc > xrpld.core
xrpld.rpc > xrpld.ledger
xrpld.rpc > xrpld.nodestore
xrpld.rpc > xrpl.json
xrpld.rpc > xrpl.ledger
xrpld.rpc > xrpl.net
xrpld.rpc > xrpl.nodestore
xrpld.rpc > xrpl.protocol
xrpld.rpc > xrpl.resource
xrpld.rpc > xrpl.server
xrpld.shamap > xrpl.shamap
xrpld.shamap > xrpl.basics
xrpld.shamap > xrpld.nodestore
xrpld.shamap > xrpl.protocol

View File

@@ -18,11 +18,11 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
# GCC-specific fixes
add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage)
add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage -Wno-error=deprecated-declarations)
# -Wno-subobject-linkage can be removed when we upgrade GCC version to at least 13.3
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
# Clang-specific fixes
add_compile_options(-Wno-unknown-warning-option) # Ignore unknown warning options
add_compile_options(-Wno-unknown-warning-option -Wno-error=deprecated-declarations) # Ignore unknown warning options
elseif(MSVC)
# MSVC-specific fixes
add_compile_options(/wd4068) # Ignore unknown pragmas
@@ -49,7 +49,7 @@ if(Git_FOUND)
endif() #git
if(thread_safety_analysis)
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DXRPL_ENABLE_THREAD_SAFETY_ANNOTATIONS)
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)
add_compile_options("-stdlib=libc++")
add_link_options("-stdlib=libc++")
endif()
@@ -62,9 +62,9 @@ if (target)
message (FATAL_ERROR "The target option has been removed - use native cmake options to control build")
endif ()
include(XrplSanity)
include(XrplVersion)
include(XrplSettings)
include(RippledSanity)
include(RippledVersion)
include(RippledSettings)
# this check has to remain in the top-level cmake
# because of the early return statement
if (packages_only)
@@ -73,11 +73,11 @@ if (packages_only)
endif()
return ()
endif ()
include(XrplCompiler)
include(XrplInterface)
include(RippledCompiler)
include(RippledInterface)
option(only_docs "Include only the docs target?" FALSE)
include(XrplDocs)
include(RippledDocs)
if(only_docs)
return()
endif()
@@ -85,7 +85,7 @@ endif()
###
include(deps/Boost)
find_package(OpenSSL 1.1.1 REQUIRED)
find_package(OpenSSL 3.6.0 REQUIRED)
set_target_properties(OpenSSL::SSL PROPERTIES
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
)
@@ -112,17 +112,16 @@ option(rocksdb "Enable RocksDB" ON)
if(rocksdb)
find_package(RocksDB REQUIRED)
set_target_properties(RocksDB::rocksdb PROPERTIES
INTERFACE_COMPILE_DEFINITIONS XRPL_ROCKSDB_AVAILABLE=1
INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1
)
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
target_link_libraries(ripple_libs INTERFACE RocksDB::rocksdb)
endif()
find_package(nudb REQUIRED)
find_package(date REQUIRED)
find_package(wasm-xrplf REQUIRED)
find_package(xxHash REQUIRED)
target_link_libraries(xrpl_libs INTERFACE
target_link_libraries(ripple_libs INTERFACE
ed25519::ed25519
lz4::lz4
OpenSSL::Crypto
@@ -140,16 +139,16 @@ elseif(TARGET NuDB::nudb)
else()
message(FATAL_ERROR "unknown nudb target")
endif()
target_link_libraries(xrpl_libs INTERFACE ${nudb})
target_link_libraries(ripple_libs INTERFACE ${nudb})
if(coverage)
include(XrplCov)
include(RippledCov)
endif()
set(PROJECT_EXPORT_SET XrplExports)
include(XrplCore)
include(XrplInstall)
include(XrplValidatorKeys)
set(PROJECT_EXPORT_SET RippleExports)
include(RippledCore)
include(RippledInstall)
include(RippledValidatorKeys)
if(tests)
include(CTest)

View File

@@ -81,7 +81,7 @@ If you create new source files, they must be organized as follows:
The source must be formatted according to the style guide below.
Header includes must be [levelized](.github/scripts/levelization).
Header includes must be [levelized](./Builds/levelization).
Changes should be usually squashed down into a single commit.
Some larger or more complicated change sets make more sense,

View File

@@ -1,7 +1,7 @@
ISC License
Copyright (c) 2011, Arthur Britto, David Schwartz, Jed McCaleb, Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant.
Copyright (c) 2012-2025, the XRP Ledger developers.
Copyright (c) 2012-2020, the XRP Ledger developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above

View File

@@ -6,7 +6,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe
## XRP
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free crypto-asset native to the XRP Ledger, and is designed as a gas token for network services and to bridge different currencies. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
## rippled
@@ -23,26 +23,26 @@ If you are interested in running an **API Server** (including a **Full History S
- **[Censorship-Resistant Transaction Processing][]:** No single party decides which transactions succeed or fail, and no one can "roll back" a transaction after it completes. As long as those who choose to participate in the network keep it healthy, they can settle transactions in seconds.
- **[Fast, Efficient Consensus Algorithm][]:** The XRP Ledger's consensus algorithm settles transactions in 4 to 5 seconds, processing at a throughput of up to 1500 transactions per second. These properties put XRP at least an order of magnitude ahead of other top digital assets.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction fees.
- **[Responsible Software Governance][]:** A team of full-time developers at Ripple & other organizations maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction costs.
- **[Responsible Software Governance][]:** A team of full-time, world-class developers at Ripple maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests, and builds constructive relationships with governments and financial institutions worldwide.
- **[Secure, Adaptable Cryptography][]:** The XRP Ledger relies on industry standard digital signature systems like ECDSA (the same scheme used by Bitcoin) but also supports modern, efficient algorithms like Ed25519. The extensible nature of the XRP Ledger's software makes it possible to add and disable algorithms as the state of the art in cryptography advances.
- **[Modern Features][]:** Features like Escrow, Checks, and Payment Channels support financial applications atop of the XRP Ledger. This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[Modern Features for Smart Contracts][]:** Features like Escrow, Checks, and Payment Channels support cutting-edge financial applications including the [Interledger Protocol](https://interledger.org/). This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[On-Ledger Decentralized Exchange][]:** In addition to all the features that make XRP useful on its own, the XRP Ledger also has a fully-functional accounting system for tracking and trading obligations denominated in any way users want, and an exchange built into the protocol. The XRP Ledger can settle long, cross-currency payment paths and exchanges of multiple currencies in atomic transactions, bridging gaps of trust with XRP.
[Censorship-Resistant Transaction Processing]: https://xrpl.org/transaction-censorship-detection.html#transaction-censorship-detection
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/consensus-research.html#consensus-research
[Finite XRP Supply]: https://xrpl.org/what-is-xrp.html
[Responsible Software Governance]: https://xrpl.org/contribute-code.html#contribute-code-to-the-xrp-ledger
[Secure, Adaptable Cryptography]: https://xrpl.org/cryptographic-keys.html#cryptographic-keys
[Modern Features]: https://xrpl.org/use-specialized-payment-types.html
[On-Ledger Decentralized Exchange]: https://xrpl.org/decentralized-exchange.html#decentralized-exchange
[Censorship-Resistant Transaction Processing]: https://xrpl.org/xrp-ledger-overview.html#censorship-resistant-transaction-processing
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/xrp-ledger-overview.html#fast-efficient-consensus-algorithm
[Finite XRP Supply]: https://xrpl.org/xrp-ledger-overview.html#finite-xrp-supply
[Responsible Software Governance]: https://xrpl.org/xrp-ledger-overview.html#responsible-software-governance
[Secure, Adaptable Cryptography]: https://xrpl.org/xrp-ledger-overview.html#secure-adaptable-cryptography
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
## Source Code
Here are some good places to start learning the source code:
- Read the markdown files in the source tree: `src/ripple/**/*.md`.
- Read [the levelization document](.github/scripts/levelization) to get an idea of the internal dependency graph.
- Read [the levelization document](./Builds/levelization) to get an idea of the internal dependency graph.
- In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component.
### Repository Contents

View File

@@ -5,7 +5,7 @@ then
name=$( basename $0 )
cat <<- USAGE
Usage: $name <username>
Where <username> is the Github username of the upstream repo. e.g. XRPLF
USAGE
exit 0
@@ -83,3 +83,4 @@ fi
_run git fetch --jobs=$(nproc) upstreams
exit 0

View File

@@ -5,7 +5,7 @@ then
name=$( basename $0 )
cat <<- USAGE
Usage: $name workbranch base/branch user/branch [user/branch [...]]
* workbranch will be created locally from base/branch
* base/branch and user/branch may be specified as user:branch to allow
easy copying from Github PRs
@@ -66,3 +66,4 @@ git push $push HEAD:$b
git fetch $repo
-------------------------------------------------------------------
PUSH

View File

@@ -396,8 +396,8 @@
# true - enables compression
# false - disables compression [default].
#
# The rippled server can save bandwidth by compressing its peer-to-peer communications,
# at a cost of greater CPU usage. If you enable link compression,
# The rippled server can save bandwidth by compressing its peer-to-peer communications,
# at a cost of greater CPU usage. If you enable link compression,
# the server automatically compresses communications with peer servers
# that also have link compression enabled.
# https://xrpl.org/enable-link-compression.html
@@ -975,47 +975,6 @@
# number of ledger records online. Must be greater
# than or equal to ledger_history.
#
# Optional keys for NuDB only:
#
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
# Must be a power of 2 between 4096 and 32768. Default is 4096.
#
# This parameter controls the fundamental storage unit
# size for NuDB's internal data structures. The choice
# of block size can significantly impact performance
# depending on your storage hardware and filesystem:
#
# - 4096 bytes: Optimal for most standard SSDs and
# traditional filesystems (ext4, NTFS, HFS+).
# Provides good balance of performance and storage
# efficiency. Recommended for most deployments.
# Minimizes memory footprint and provides consistent
# low-latency access patterns across diverse hardware.
#
# - 8192-16384 bytes: May improve performance on
# high-end NVMe SSDs and copy-on-write filesystems
# like ZFS or Btrfs that benefit from larger block
# alignment. Can reduce metadata overhead for large
# databases. Offers better sequential throughput and
# reduced I/O operations at the cost of higher memory
# usage per operation.
#
# - 32768 bytes (32K): Maximum supported block size
# for high-performance scenarios with very fast
# storage. May increase memory usage and reduce
# efficiency for smaller databases. Best suited for
# enterprise environments with abundant RAM.
#
# Performance testing is recommended before deploying
# any non-default block size in production environments.
#
# Note: This setting cannot be changed after database
# creation without rebuilding the entire database.
# Choose carefully based on your hardware and expected
# database size.
#
# Example: nudb_block_size=4096
#
# These keys modify the behavior of online_delete, and thus are only
# relevant if online_delete is defined and non-zero:
#
@@ -1052,7 +1011,7 @@
# that rippled is still in sync with the network,
# and that the validated ledger is less than
# 'age_threshold_seconds' old. If not, then continue
# sleeping for this number of seconds and
# sleeping for this number of seconds and
# checking until healthy.
# Default is 5.
#
@@ -1154,7 +1113,7 @@
# page_size Valid values: integer (MUST be power of 2 between 512 and 65536)
# The default is 4096 bytes. This setting determines
# the size of a page in the transaction.db file.
# See https://www.sqlite.org/pragma.html#pragma_page_size
# See https://www.sqlite.org/pragma.html#pragma_page_size
# for more details about the available options.
#
# journal_size_limit Valid values: integer
@@ -1512,7 +1471,6 @@ secure_gateway = 127.0.0.1
[node_db]
type=NuDB
path=/var/lib/rippled/db/nudb
nudb_block_size=4096
online_delete=512
advisory_delete=0

View File

@@ -101,14 +101,6 @@
# 2025-05-12, Jingchen Wu
# - add -fprofile-update=atomic to ensure atomic profile generation
#
# 2025-08-28, Bronek Kozicki
# - fix "At least one COMMAND must be given" CMake warning from policy CMP0175
#
# 2025-09-03, Jingchen Wu
# - remove the unused function append_coverage_compiler_flags and append_coverage_compiler_flags_to_target
# - add a new function add_code_coverage_to_target
# - remove some unused code
#
# USAGE:
#
# 1. Copy this file into your cmake modules path.
@@ -117,8 +109,10 @@
# using a CMake option() to enable it just optionally):
# include(CodeCoverage)
#
# 3. Append necessary compiler flags and linker flags for all supported source files:
# add_code_coverage_to_target(<target> <PRIVATE|PUBLIC|INTERFACE>)
# 3. Append necessary compiler flags for all supported source files:
# append_coverage_compiler_flags()
# Or for specific target:
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
#
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
#
@@ -207,69 +201,67 @@ endforeach()
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
CACHE INTERNAL "")
set(COVERAGE_CXX_COMPILER_FLAGS "")
set(COVERAGE_C_COMPILER_FLAGS "")
set(COVERAGE_CXX_LINKER_FLAGS "")
set(COVERAGE_C_LINKER_FLAGS "")
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
include(CheckCXXCompilerFlag)
include(CheckCCompilerFlag)
include(CheckLinkerFlag)
set(COVERAGE_CXX_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_C_COMPILER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_CXX_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS})
set(COVERAGE_C_LINKER_FLAGS ${COVERAGE_COMPILER_FLAGS})
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
if(HAVE_cxx_fprofile_abs_path)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-abs-path")
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
if(HAVE_c_fprofile_abs_path)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-abs-path")
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
check_linker_flag(CXX -fprofile-abs-path HAVE_cxx_linker_fprofile_abs_path)
if(HAVE_cxx_linker_fprofile_abs_path)
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-abs-path")
endif()
check_linker_flag(C -fprofile-abs-path HAVE_c_linker_fprofile_abs_path)
if(HAVE_c_linker_fprofile_abs_path)
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-abs-path")
endif()
check_cxx_compiler_flag(-fprofile-update=atomic HAVE_cxx_fprofile_update)
check_cxx_compiler_flag(-fprofile-update HAVE_cxx_fprofile_update)
if(HAVE_cxx_fprofile_update)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_CXX_COMPILER_FLAGS} -fprofile-update=atomic")
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
endif()
check_c_compiler_flag(-fprofile-update=atomic HAVE_c_fprofile_update)
check_c_compiler_flag(-fprofile-update HAVE_c_fprofile_update)
if(HAVE_c_fprofile_update)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_C_COMPILER_FLAGS} -fprofile-update=atomic")
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-update=atomic")
endif()
check_linker_flag(CXX -fprofile-update=atomic HAVE_cxx_linker_fprofile_update)
if(HAVE_cxx_linker_fprofile_update)
set(COVERAGE_CXX_LINKER_FLAGS "${COVERAGE_CXX_LINKER_FLAGS} -fprofile-update=atomic")
endif()
check_linker_flag(C -fprofile-update=atomic HAVE_c_linker_fprofile_update)
if(HAVE_c_linker_fprofile_update)
set(COVERAGE_C_LINKER_FLAGS "${COVERAGE_C_LINKER_FLAGS} -fprofile-update=atomic")
endif()
endif()
set(CMAKE_Fortran_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
FORCE )
set(CMAKE_CXX_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE )
set(CMAKE_C_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C compiler during coverage builds."
FORCE )
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE )
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE )
mark_as_advanced(
CMAKE_Fortran_FLAGS_COVERAGE
CMAKE_CXX_FLAGS_COVERAGE
CMAKE_C_FLAGS_COVERAGE
CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
link_libraries(gcov)
endif()
# Defines a target for running and collection code coverage information
# Builds dependencies, runs the given executable and outputs reports.
# NOTE! The executable should always have a ZERO as exit code otherwise
@@ -454,24 +446,23 @@ function(setup_target_for_coverage_gcovr)
# Show info where to find the report
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
COMMAND echo
COMMAND ;
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
)
endfunction() # setup_target_for_coverage_gcovr
function(add_code_coverage_to_target name scope)
separate_arguments(COVERAGE_CXX_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_COMPILER_FLAGS}")
separate_arguments(COVERAGE_C_COMPILER_FLAGS NATIVE_COMMAND "${COVERAGE_C_COMPILER_FLAGS}")
separate_arguments(COVERAGE_CXX_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_CXX_LINKER_FLAGS}")
separate_arguments(COVERAGE_C_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_C_LINKER_FLAGS}")
function(append_coverage_compiler_flags)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
endfunction() # append_coverage_compiler_flags
# Add compiler options to the target
target_compile_options(${name} ${scope}
$<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
target_link_libraries (${name} ${scope}
$<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS} gcov>
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS} gcov>
)
endfunction() # add_code_coverage_to_target
# Setup coverage for specific library
function(append_coverage_compiler_flags_to_target name)
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
target_compile_options(${name} PRIVATE ${_flag_list})
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
target_link_libraries(${name} PRIVATE gcov)
endif()
endfunction()

View File

@@ -45,7 +45,7 @@ if (static OR APPLE OR MSVC)
set (OPENSSL_USE_STATIC_LIBS ON)
endif ()
set (OPENSSL_MSVC_STATIC_RT ON)
find_dependency (OpenSSL REQUIRED)
find_dependency (OpenSSL 1.1.1 REQUIRED)
find_dependency (ZLIB)
find_dependency (date)
if (TARGET ZLIB::ZLIB)
@@ -53,4 +53,4 @@ if (TARGET ZLIB::ZLIB)
INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
endif ()
include ("${CMAKE_CURRENT_LIST_DIR}/XrplTargets.cmake")
include ("${CMAKE_CURRENT_LIST_DIR}/RippleTargets.cmake")

View File

@@ -7,25 +7,22 @@
toolchain file, especially the ABI-impacting ones
#]=========================================================]
add_library (common INTERFACE)
add_library (Xrpl::common ALIAS common)
add_library (Ripple::common ALIAS common)
# add a single global dependency on this interface lib
link_libraries (Xrpl::common)
link_libraries (Ripple::common)
set_target_properties (common
PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_CXX_EXTENSIONS OFF)
target_compile_definitions (common
INTERFACE
$<$<CONFIG:Debug>:DEBUG _DEBUG>
#[===[
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
explicitly except for the special case of (profile ON) and (assert OFF).
Presumably this is because we don't want profile builds asserting unless
asserts were specifically requested.
]===]
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
OPENSSL_SUPPRESS_DEPRECATED
)
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
# defined, so no need to add it explicitly except for
# this special case of (profile ON) and (assert OFF)
# -- presumably this is because we don't want profile
# builds asserting unless asserts were specifically
# requested
if (MSVC)
# remove existing exception flag since we set it to -EHa

View File

@@ -13,7 +13,7 @@ set_target_properties(xrpl.libpb PROPERTIES UNITY_BUILD OFF)
target_protobuf_sources(xrpl.libpb xrpl/proto
LANGUAGE cpp
IMPORT_DIRS include/xrpl/proto
PROTOS include/xrpl/proto/xrpl.proto
PROTOS include/xrpl/proto/ripple.proto
)
file(GLOB_RECURSE protos "include/xrpl/proto/org/*.proto")
@@ -53,26 +53,20 @@ add_library(xrpl.imports.main INTERFACE)
target_link_libraries(xrpl.imports.main
INTERFACE
LibArchive::LibArchive
OpenSSL::Crypto
Ripple::boost
Ripple::opts
Ripple::syslibs
absl::random_random
date::date
ed25519::ed25519
LibArchive::LibArchive
OpenSSL::Crypto
Xrpl::boost
Xrpl::libs
Xrpl::opts
Xrpl::syslibs
secp256k1::secp256k1
wasm-xrplf::wasm-xrplf
xrpl.libpb
xxHash::xxhash
$<$<BOOL:${voidstar}>:antithesis-sdk-cpp>
)
if (WIN32)
target_link_libraries(xrpl.imports.main INTERFACE ntdll)
endif()
include(add_module)
include(target_link_modules)
@@ -107,7 +101,7 @@ target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
# Level 06
add_module(xrpl net)
target_link_libraries(xrpl.libxrpl.net PUBLIC
target_link_libraries(xrpl.libxrpl.net PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
@@ -117,27 +111,6 @@ target_link_libraries(xrpl.libxrpl.net PUBLIC
add_module(xrpl server)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
add_module(xrpl nodestore)
target_link_libraries(xrpl.libxrpl.nodestore PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
)
add_module(xrpl shamap)
target_link_libraries(xrpl.libxrpl.shamap PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.crypto
xrpl.libxrpl.protocol
xrpl.libxrpl.nodestore
)
add_module(xrpl ledger)
target_link_libraries(xrpl.libxrpl.ledger PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
)
add_library(xrpl.libxrpl)
set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl)
@@ -157,10 +130,7 @@ target_link_modules(xrpl PUBLIC
protocol
resource
server
nodestore
shamap
net
ledger
)
# All headers in libxrpl are in modules.
@@ -173,14 +143,14 @@ target_link_modules(xrpl PUBLIC
# $<INSTALL_INTERFACE:include>)
if(xrpld)
add_executable(xrpld)
add_executable(rippled)
if(tests)
target_compile_definitions(xrpld PUBLIC ENABLE_TESTS)
target_compile_definitions(xrpld PRIVATE
target_compile_definitions(rippled PUBLIC ENABLE_TESTS)
target_compile_definitions(rippled PRIVATE
UNIT_TEST_REFERENCE_FEE=${UNIT_TEST_REFERENCE_FEE}
)
endif()
target_include_directories(xrpld
target_include_directories(rippled
PRIVATE
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
)
@@ -188,36 +158,36 @@ if(xrpld)
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
"${CMAKE_CURRENT_SOURCE_DIR}/src/xrpld/*.cpp"
)
target_sources(xrpld PRIVATE ${sources})
target_sources(rippled PRIVATE ${sources})
if(tests)
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
"${CMAKE_CURRENT_SOURCE_DIR}/src/test/*.cpp"
)
target_sources(xrpld PRIVATE ${sources})
target_sources(rippled PRIVATE ${sources})
endif()
target_link_libraries(xrpld
Xrpl::boost
Xrpl::opts
Xrpl::libs
target_link_libraries(rippled
Ripple::boost
Ripple::opts
Ripple::libs
xrpl.libxrpl
)
exclude_if_included(xrpld)
exclude_if_included(rippled)
# define a macro for tests that might need to
# be exluded or run differently in CI environment
if(is_ci)
target_compile_definitions(xrpld PRIVATE XRPL_RUNNING_IN_CI)
target_compile_definitions(rippled PRIVATE RIPPLED_RUNNING_IN_CI)
endif ()
if(voidstar)
target_compile_options(xrpld
target_compile_options(rippled
PRIVATE
-fsanitize-coverage=trace-pc-guard
)
# xrpld requires access to antithesis-sdk-cpp implementation file
# rippled requires access to antithesis-sdk-cpp implementation file
# antithesis_instrumentation.h, which is not exported as INTERFACE
target_include_directories(xrpld
target_include_directories(rippled
PRIVATE
${CMAKE_SOURCE_DIR}/external/antithesis-sdk
)
@@ -231,6 +201,4 @@ if(xrpld)
src/test/ledger/Invariants_test.cpp
PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE)
endif()
# For the time being, we will keep the name of the binary as it was.
set_target_properties(xrpld PROPERTIES OUTPUT_NAME "rippled")
endif()

View File

@@ -31,10 +31,8 @@ list(APPEND GCOVR_ADDITIONAL_ARGS
setup_target_for_coverage_gcovr(
NAME coverage
FORMAT ${coverage_format}
EXECUTABLE xrpld
EXECUTABLE rippled
EXECUTABLE_ARGS --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log
EXCLUDE "src/test" "src/tests" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES xrpld
EXCLUDE "src/test" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb"
DEPENDENCIES rippled
)
add_code_coverage_to_target(opts INTERFACE)

View File

@@ -8,25 +8,21 @@ install (
TARGETS
common
opts
xrpl_boost
xrpl_libs
xrpl_syslibs
ripple_syslibs
ripple_boost
xrpl.imports.main
xrpl.libpb
xrpl.libxrpl
xrpl.libxrpl.basics
xrpl.libxrpl.beast
xrpl.libxrpl.crypto
xrpl.libxrpl.json
xrpl.libxrpl.ledger
xrpl.libxrpl.net
xrpl.libxrpl.nodestore
xrpl.libxrpl.protocol
xrpl.libxrpl.resource
xrpl.libxrpl.server
xrpl.libxrpl.shamap
xrpl.libxrpl.net
xrpl.libxrpl
antithesis-sdk-cpp
EXPORT XrplExports
EXPORT RippleExports
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
RUNTIME DESTINATION bin
@@ -41,22 +37,22 @@ install(CODE "
set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\")
include(create_symbolic_link)
create_symbolic_link(xrpl \
\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/xrpl)
\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/ripple)
")
install (EXPORT XrplExports
FILE XrplTargets.cmake
NAMESPACE Xrpl::
DESTINATION lib/cmake/xrpl)
install (EXPORT RippleExports
FILE RippleTargets.cmake
NAMESPACE Ripple::
DESTINATION lib/cmake/ripple)
include (CMakePackageConfigHelpers)
write_basic_package_version_file (
XrplConfigVersion.cmake
VERSION ${xrpld_version}
RippleConfigVersion.cmake
VERSION ${rippled_version}
COMPATIBILITY SameMajorVersion)
if (is_root_project AND TARGET xrpld)
install (TARGETS xrpld RUNTIME DESTINATION bin)
set_target_properties(xrpld PROPERTIES INSTALL_RPATH_USE_LINK_PATH ON)
if (is_root_project AND TARGET rippled)
install (TARGETS rippled RUNTIME DESTINATION bin)
set_target_properties(rippled PROPERTIES INSTALL_RPATH_USE_LINK_PATH ON)
# sample configs should not overwrite existing files
# install if-not-exists workaround as suggested by
# https://cmake.org/Bug/view.php?id=12646
@@ -74,13 +70,13 @@ if (is_root_project AND TARGET xrpld)
install(CODE "
set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\")
include(create_symbolic_link)
create_symbolic_link(xrpld${suffix} \
\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix})
create_symbolic_link(rippled${suffix} \
\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix})
")
endif ()
install (
FILES
${CMAKE_CURRENT_SOURCE_DIR}/cmake/XrplConfig.cmake
${CMAKE_CURRENT_BINARY_DIR}/XrplConfigVersion.cmake
DESTINATION lib/cmake/xrpl)
${CMAKE_CURRENT_SOURCE_DIR}/cmake/RippleConfig.cmake
${CMAKE_CURRENT_BINARY_DIR}/RippleConfigVersion.cmake
DESTINATION lib/cmake/ripple)

View File

@@ -1,9 +1,9 @@
#[===================================================================[
xrpld compile options/settings via an interface library
rippled compile options/settings via an interface library
#]===================================================================]
add_library (opts INTERFACE)
add_library (Xrpl::opts ALIAS opts)
add_library (Ripple::opts ALIAS opts)
target_compile_definitions (opts
INTERFACE
BOOST_ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS
@@ -21,18 +21,22 @@ target_compile_definitions (opts
>
$<$<BOOL:${beast_no_unit_test_inline}>:BEAST_NO_UNIT_TEST_INLINE=1>
$<$<BOOL:${beast_disable_autolink}>:BEAST_DONT_AUTOLINK_TO_WIN32_LIBRARIES=1>
$<$<BOOL:${single_io_service_thread}>:XRPL_SINGLE_IO_SERVICE_THREAD=1>
$<$<BOOL:${single_io_service_thread}>:RIPPLE_SINGLE_IO_SERVICE_THREAD=1>
$<$<BOOL:${voidstar}>:ENABLE_VOIDSTAR>)
target_compile_options (opts
INTERFACE
$<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
$<$<BOOL:${is_gcc}>:-Wno-maybe-uninitialized>
$<$<BOOL:${perf}>:-fno-omit-frame-pointer>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
$<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
target_link_libraries (opts
INTERFACE
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
$<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
@@ -59,12 +63,12 @@ if (san)
endif ()
#[===================================================================[
xrpld transitive library deps via an interface library
rippled transitive library deps via an interface library
#]===================================================================]
add_library (xrpl_syslibs INTERFACE)
add_library (Xrpl::syslibs ALIAS xrpl_syslibs)
target_link_libraries (xrpl_syslibs
add_library (ripple_syslibs INTERFACE)
add_library (Ripple::syslibs ALIAS ripple_syslibs)
target_link_libraries (ripple_syslibs
INTERFACE
$<$<BOOL:${MSVC}>:
legacy_stdio_definitions.lib
@@ -89,9 +93,9 @@ target_link_libraries (xrpl_syslibs
if (NOT MSVC)
set (THREADS_PREFER_PTHREAD_FLAG ON)
find_package (Threads)
target_link_libraries (xrpl_syslibs INTERFACE Threads::Threads)
target_link_libraries (ripple_syslibs INTERFACE Threads::Threads)
endif ()
add_library (xrpl_libs INTERFACE)
add_library (Xrpl::libs ALIAS xrpl_libs)
target_link_libraries (xrpl_libs INTERFACE Xrpl::syslibs)
add_library (ripple_libs INTERFACE)
add_library (Ripple::libs ALIAS ripple_libs)
target_link_libraries (ripple_libs INTERFACE Ripple::syslibs)

View File

@@ -61,8 +61,8 @@ if (MSVC AND CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")
endif ()
if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
message (FATAL_ERROR "Xrpld requires a 64 bit target architecture.\n"
"The most likely cause of this warning is trying to build xrpld with a 32-bit OS.")
message (FATAL_ERROR "Rippled requires a 64 bit target architecture.\n"
"The most likely cause of this warning is trying to build rippled with a 32-bit OS.")
endif ()
if (APPLE AND NOT HOMEBREW)

View File

@@ -49,7 +49,7 @@ else()
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
endif()
if(is_linux)
option(BUILD_SHARED_LIBS "build shared xrpl libraries" OFF)
option(BUILD_SHARED_LIBS "build shared ripple libraries" OFF)
option(static "link protobuf, openssl, libc++, and boost statically" ON)
option(perf "Enables flags that assist with perf recording" OFF)
option(use_gold "enables detection of gold (binutils) linker" ON)
@@ -58,7 +58,7 @@ else()
# we are not ready to allow shared-libs on windows because it would require
# export declarations. On macos it's more feasible, but static openssl
# produces odd linker errors, thus we disable shared lib builds for now.
set(BUILD_SHARED_LIBS OFF CACHE BOOL "build shared xrpl libraries - OFF for win/macos" FORCE)
set(BUILD_SHARED_LIBS OFF CACHE BOOL "build shared ripple libraries - OFF for win/macos" FORCE)
set(static ON CACHE BOOL "static link, linux only. ON for WIN/macos" FORCE)
set(perf OFF CACHE BOOL "perf flags, linux only" FORCE)
set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
@@ -118,7 +118,7 @@ option(beast_no_unit_test_inline
"Prevents unit test definitions from being inserted into global table"
OFF)
option(single_io_service_thread
"Restricts the number of threads calling io_context::run to one. \
"Restricts the number of threads calling io_service::run to one. \
This can be useful when debugging."
OFF)
option(boost_show_deprecated

View File

@@ -0,0 +1,22 @@
option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF)
if (validator_keys)
git_branch (current_branch)
# default to tracking VK master branch unless we are on release
if (NOT (current_branch STREQUAL "release"))
set (current_branch "master")
endif ()
message (STATUS "tracking ValidatorKeys branch: ${current_branch}")
FetchContent_Declare (
validator_keys_src
GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
GIT_TAG "${current_branch}"
)
FetchContent_GetProperties (validator_keys_src)
if (NOT validator_keys_src_POPULATED)
message (STATUS "Pausing to download ValidatorKeys...")
FetchContent_Populate (validator_keys_src)
endif ()
add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys)
endif ()

View File

@@ -5,11 +5,11 @@
file(STRINGS src/libxrpl/protocol/BuildInfo.cpp BUILD_INFO)
foreach(line_ ${BUILD_INFO})
if(line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
set(xrpld_version ${CMAKE_MATCH_1})
set(rippled_version ${CMAKE_MATCH_1})
endif()
endforeach()
if(xrpld_version)
message(STATUS "xrpld version: ${xrpld_version}")
if(rippled_version)
message(STATUS "rippled version: ${rippled_version}")
else()
message(FATAL_ERROR "unable to determine xrpld version")
message(FATAL_ERROR "unable to determine rippled version")
endif()

View File

@@ -1,20 +0,0 @@
option (validator_keys "Enables building of validator-keys tool as a separate target (imported via FetchContent)" OFF)
if (validator_keys)
git_branch (current_branch)
# default to tracking VK master branch unless we are on release
if (NOT (current_branch STREQUAL "release"))
set (current_branch "master")
endif ()
message (STATUS "Tracking ValidatorKeys branch: ${current_branch}")
FetchContent_Declare (
validator_keys
GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
GIT_TAG "${current_branch}"
)
FetchContent_MakeAvailable(validator_keys)
set_target_properties(validator-keys PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
install(TARGETS validator-keys RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
endif ()

View File

@@ -12,10 +12,16 @@ find_package(Boost 1.82 REQUIRED
thread
)
add_library(xrpl_boost INTERFACE)
add_library(Xrpl::boost ALIAS xrpl_boost)
add_library(ripple_boost INTERFACE)
add_library(Ripple::boost ALIAS ripple_boost)
if(XCODE)
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
else()
target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
endif()
target_link_libraries(xrpl_boost
target_link_libraries(ripple_boost
INTERFACE
Boost::headers
Boost::chrono
@@ -24,13 +30,12 @@ target_link_libraries(xrpl_boost
Boost::date_time
Boost::filesystem
Boost::json
Boost::process
Boost::program_options
Boost::regex
Boost::system
Boost::thread)
if(Boost_COMPILER)
target_link_libraries(xrpl_boost INTERFACE Boost::disable_autolinking)
target_link_libraries(ripple_boost INTERFACE Boost::disable_autolinking)
endif()
if(san AND is_clang)
# TODO: gcc does not support -fsanitize-blacklist...can we do something else

View File

@@ -7,7 +7,7 @@ function(xrpl_add_test name)
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
)
add_executable(${target} ${ARGN} ${sources})
add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
isolate_headers(
${target}

View File

@@ -1,57 +0,0 @@
{
"version": "0.5",
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"wasm-xrplf/2.4.1-xrplf#dc67c558e283593ef0edd7eb00e9fa0d%1759862247.891",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"openssl/3.5.4#a1d5835cc6ed5c5b8f3cd5b9b5d24205%1759746684.671",
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
],
"build_requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"strawberryperl/5.32.1.1#707032463aa0620fa17ec0d887f5fe41%1756234281.733",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"nasm/2.16.01#31e26f2ee3c4346ecd347911bd126904%1756234232.901",
"msys2/cci.latest#5b73b10144f73cc5bfe0572ed9be39e1%1751977009.857",
"m4/1.4.19#b38ced39a01e31fef5435bc634461fd2%1700758725.451",
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28",
"automake/1.16.5#b91b7c384c3deaa9d535be02da14d04f%1755524470.56",
"autoconf/2.71#51077f068e61700d65bb05541ea1e4b0%1731054366.86"
],
"python_requires": [],
"overrides": {
"protobuf/3.21.12": [
null,
"protobuf/3.21.12"
],
"lz4/1.9.4": [
"lz4/1.10.0"
],
"boost/1.83.0": [
"boost/1.88.0"
],
"sqlite3/3.44.2": [
"sqlite3/3.49.1"
]
},
"config_requires": []
}

View File

@@ -1,5 +0,0 @@
# Global configuration for Conan. This is used to set the number of parallel
# downloads and uploads.
core:non_interactive=True
core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }}

View File

@@ -21,11 +21,14 @@ compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
[conf]
{% if compiler == "clang" and compiler_version >= 19 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "apple-clang" and compiler_version >= 17 %}
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags+=['-Wno-restrict']
tools.build:cxxflags=['-Wno-restrict']
{% endif %}
[tool_requires]
!cmake/*: cmake/[>=3 <4]

View File

@@ -1,7 +1,7 @@
from conan import ConanFile, __version__ as conan_version
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
import re
import subprocess
class Xrpl(ConanFile):
name = 'xrpl'
@@ -28,9 +28,8 @@ class Xrpl(ConanFile):
'grpc/1.50.1',
'libarchive/3.8.1',
'nudb/2.0.9',
'openssl/3.5.4',
'openssl/3.6.0',
'soci/4.0.3',
'wasm-xrplf/2.4.1-xrplf',
'zlib/1.3.1',
]
@@ -91,6 +90,7 @@ class Xrpl(ConanFile):
}
def set_version(self):
subprocess.run(["conan", "export", f'{self.recipe_folder}/external/openssl-3.6.0/'])
if self.version is None:
path = f'{self.recipe_folder}/src/libxrpl/protocol/BuildInfo.cpp'
regex = r'versionString\s?=\s?\"(.*)\"'
@@ -102,13 +102,11 @@ class Xrpl(ConanFile):
def configure(self):
if self.settings.compiler == 'apple-clang':
self.options['boost'].visibility = 'global'
if self.settings.compiler in ['clang', 'gcc']:
self.options['boost'].without_cobalt = True
def requirements(self):
# Conan 2 requires transitive headers to be specified
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
self.requires('boost/1.88.0', force=True, **transitive_headers_opt)
self.requires('boost/1.86.0', force=True, **transitive_headers_opt)
self.requires('date/3.0.4', **transitive_headers_opt)
self.requires('lz4/1.10.0', force=True)
self.requires('protobuf/3.21.12', force=True)
@@ -135,7 +133,6 @@ class Xrpl(ConanFile):
self.folders.generators = 'build/generators'
generators = 'CMakeDeps'
def generate(self):
tc = CMakeToolchain(self)
tc.variables['tests'] = self.options.tests
@@ -180,7 +177,6 @@ class Xrpl(ConanFile):
'boost::filesystem',
'boost::json',
'boost::program_options',
'boost::process',
'boost::regex',
'boost::system',
'boost::thread',
@@ -193,7 +189,6 @@ class Xrpl(ConanFile):
'protobuf::libprotobuf',
'soci::soci',
'sqlite3::sqlite',
'wasm-xrplf::wasm-xrplf',
'xxhash::xxhash',
'zlib::zlib',
]

View File

@@ -5,8 +5,8 @@ skinparam roundcorner 20
skinparam maxmessagesize 160
actor "Rippled Start" as RS
participant "Timer" as T
participant "NetworkOPs" as NOP
participant "Timer" as T
participant "NetworkOPs" as NOP
participant "ValidatorList" as VL #lightgreen
participant "Consensus" as GC
participant "ConsensusAdaptor" as CA #lightgreen
@@ -20,7 +20,7 @@ VL -> NOP
NOP -> VL: update trusted validators
activate VL
VL -> VL: re-calculate quorum
hnote over VL#lightgreen: ignore negative listed validators\nwhen calculate quorum
hnote over VL#lightgreen: ignore negative listed validators\nwhen calculate quorum
VL -> NOP
deactivate VL
NOP -> GC: start round
@@ -36,14 +36,14 @@ activate GC
end
alt phase == OPEN
alt should close ledger
alt should close ledger
GC -> GC: phase = ESTABLISH
GC -> CA: onClose
activate CA
alt sqn%256==0
alt sqn%256==0
CA -[#green]> RM: <font color=green>getValidations
CA -[#green]> CA: <font color=green>create UNLModify Tx
hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
CA -[#green]> CA: <font color=green>create UNLModify Tx
hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet.
end
CA -> GC
GC -> CA: propose
@@ -61,14 +61,14 @@ else phase == ESTABLISH
CA -> CA : build LCL
hnote over CA #lightgreen: copy negative UNL from parent ledger
alt sqn%256==0
CA -[#green]> CA: <font color=green>Adjust negative UNL
CA -[#green]> CA: <font color=green>Adjust negative UNL
CA -[#green]> CA: <font color=green>apply UNLModify Tx
end
CA -> CA : validate and send validation message
activate NOP
CA -> NOP : end consensus and\n<b>begin next consensus round
deactivate NOP
deactivate CA
deactivate CA
hnote over RM: receive validations
end
else phase == ACCEPTED
@@ -76,4 +76,4 @@ else phase == ACCEPTED
end
deactivate GC
@enduml
@enduml

View File

@@ -4,7 +4,7 @@ class TimeoutCounter {
#app_ : Application&
}
TimeoutCounter o-- "1" Application
TimeoutCounter o-- "1" Application
': app_
Stoppable <.. Application
@@ -14,13 +14,13 @@ class Application {
-m_inboundLedgers : uptr<InboundLedgers>
}
Application *-- "1" LedgerReplayer
Application *-- "1" LedgerReplayer
': m_ledgerReplayer
Application *-- "1" InboundLedgers
Application *-- "1" InboundLedgers
': m_inboundLedgers
Stoppable <.. InboundLedgers
Application "1" --o InboundLedgers
Application "1" --o InboundLedgers
': app_
class InboundLedgers {
@@ -28,9 +28,9 @@ class InboundLedgers {
}
Stoppable <.. LedgerReplayer
InboundLedgers "1" --o LedgerReplayer
InboundLedgers "1" --o LedgerReplayer
': inboundLedgers_
Application "1" --o LedgerReplayer
Application "1" --o LedgerReplayer
': app_
class LedgerReplayer {
@@ -42,17 +42,17 @@ class LedgerReplayer {
-skipLists_ : hash_map<u256, wptr<SkipListAcquire>>
}
LedgerReplayer *-- LedgerReplayTask
LedgerReplayer *-- LedgerReplayTask
': tasks_
LedgerReplayer o-- LedgerDeltaAcquire
LedgerReplayer o-- LedgerDeltaAcquire
': deltas_
LedgerReplayer o-- SkipListAcquire
LedgerReplayer o-- SkipListAcquire
': skipLists_
TimeoutCounter <.. LedgerReplayTask
InboundLedgers "1" --o LedgerReplayTask
InboundLedgers "1" --o LedgerReplayTask
': inboundLedgers_
LedgerReplayer "1" --o LedgerReplayTask
LedgerReplayer "1" --o LedgerReplayTask
': replayer_
class LedgerReplayTask {
@@ -63,15 +63,15 @@ class LedgerReplayTask {
+addDelta(sptr<LedgerDeltaAcquire>)
}
LedgerReplayTask *-- "1" SkipListAcquire
LedgerReplayTask *-- "1" SkipListAcquire
': skipListAcquirer_
LedgerReplayTask *-- LedgerDeltaAcquire
LedgerReplayTask *-- LedgerDeltaAcquire
': deltas_
TimeoutCounter <.. SkipListAcquire
InboundLedgers "1" --o SkipListAcquire
InboundLedgers "1" --o SkipListAcquire
': inboundLedgers_
LedgerReplayer "1" --o SkipListAcquire
LedgerReplayer "1" --o SkipListAcquire
': replayer_
LedgerReplayTask --o SkipListAcquire : implicit via callback
@@ -83,9 +83,9 @@ class SkipListAcquire {
}
TimeoutCounter <.. LedgerDeltaAcquire
InboundLedgers "1" --o LedgerDeltaAcquire
InboundLedgers "1" --o LedgerDeltaAcquire
': inboundLedgers_
LedgerReplayer "1" --o LedgerDeltaAcquire
LedgerReplayer "1" --o LedgerDeltaAcquire
': replayer_
LedgerReplayTask --o LedgerDeltaAcquire : implicit via callback
@@ -95,4 +95,4 @@ class LedgerDeltaAcquire {
-replayer_ : LedgerReplayer&
-dataReadyCallbacks_ : vector<callback>
}
@enduml
@enduml

View File

@@ -38,7 +38,7 @@ deactivate lr
loop
lr -> lda : make_shared(ledgerId, ledgerSeq)
return delta
lr -> lrt : addDelta(delta)
lr -> lrt : addDelta(delta)
lrt -> lda : addDataCallback(callback)
return
return
@@ -62,7 +62,7 @@ deactivate peer
lr -> lda : processData(ledgerHeader, txns)
lda -> lda : notify()
note over lda: call the callbacks added by\naddDataCallback(callback).
lda -> lrt : callback(ledgerId)
lda -> lrt : callback(ledgerId)
lrt -> lrt : deltaReady(ledgerId)
lrt -> lrt : tryAdvance()
loop as long as child can be built
@@ -82,4 +82,4 @@ deactivate peer
deactivate peer
@enduml
@enduml

3
external/README.md vendored
View File

@@ -1,6 +1,7 @@
# External Conan recipes
The subdirectories in this directory contain external libraries used by rippled.
The subdirectories in this directory contain copies of external libraries used
by rippled.
| Folder | Upstream | Description |
| :--------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------- |

View File

@@ -1,3 +1,3 @@
---
DisableFormat: true
SortIncludes: Never
SortIncludes: false

View File

@@ -1,12 +1,12 @@
[ed25519](http://ed25519.cr.yp.to/) is an
[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA),
developed by [Dan Bernstein](http://cr.yp.to/djb.html),
[Niels Duif](http://www.nielsduif.nl/),
[Tanja Lange](http://hyperelliptic.org/tanja),
[Peter Schwabe](http://www.cryptojedi.org/users/peter/),
[ed25519](http://ed25519.cr.yp.to/) is an
[Elliptic Curve Digital Signature Algortithm](http://en.wikipedia.org/wiki/Elliptic_Curve_DSA),
developed by [Dan Bernstein](http://cr.yp.to/djb.html),
[Niels Duif](http://www.nielsduif.nl/),
[Tanja Lange](http://hyperelliptic.org/tanja),
[Peter Schwabe](http://www.cryptojedi.org/users/peter/),
and [Bo-Yin Yang](http://www.iis.sinica.edu.tw/pages/byyang/).
This project provides performant, portable 32-bit & 64-bit implementations. All implementations are
This project provides performant, portable 32-bit & 64-bit implementations. All implementations are
of course constant time in regard to secret data.
#### Performance
@@ -52,35 +52,35 @@ are made.
#### Compilation
No configuration is needed **if you are compiling against OpenSSL**.
No configuration is needed **if you are compiling against OpenSSL**.
##### Hash Options
If you are not compiling aginst OpenSSL, you will need a hash function.
To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`.
To use a simple/**slow** implementation of SHA-512, use `-DED25519_REFHASH` when compiling `ed25519.c`.
This should never be used except to verify the code works when OpenSSL is not available.
To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your
To use a custom hash function, use `-DED25519_CUSTOMHASH` when compiling `ed25519.c` and put your
custom hash implementation in ed25519-hash-custom.h. The hash must have a 512bit digest and implement
struct ed25519_hash_context;
struct ed25519_hash_context;
void ed25519_hash_init(ed25519_hash_context *ctx);
void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
void ed25519_hash_init(ed25519_hash_context *ctx);
void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen);
void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash);
void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen);
##### Random Options
If you are not compiling aginst OpenSSL, you will need a random function for batch verification.
To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your
To use a custom random function, use `-DED25519_CUSTOMRANDOM` when compiling `ed25519.c` and put your
custom hash implementation in ed25519-randombytes-custom.h. The random function must implement:
void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
void ED25519_FN(ed25519_randombytes_unsafe) (void *p, size_t len);
Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG
Use `-DED25519_TEST` when compiling `ed25519.c` to use a deterministically seeded, non-thread safe CSPRNG
variant of Bob Jenkins [ISAAC](http://en.wikipedia.org/wiki/ISAAC_%28cipher%29)
##### Minor options
@@ -91,80 +91,79 @@ Use `-DED25519_FORCE_32BIT` to force the use of 32 bit routines even when compil
##### 32-bit
gcc ed25519.c -m32 -O3 -c
gcc ed25519.c -m32 -O3 -c
##### 64-bit
gcc ed25519.c -m64 -O3 -c
gcc ed25519.c -m64 -O3 -c
##### SSE2
gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2
gcc ed25519.c -m64 -O3 -c -DED25519_SSE2
gcc ed25519.c -m32 -O3 -c -DED25519_SSE2 -msse2
gcc ed25519.c -m64 -O3 -c -DED25519_SSE2
clang and icc are also supported
#### Usage
To use the code, link against `ed25519.o -mbits` and:
#include "ed25519.h"
#include "ed25519.h"
Add `-lssl -lcrypto` when using OpenSSL (Some systems don't need -lcrypto? It might be trial and error).
To generate a private key, simply generate 32 bytes from a secure
cryptographic source:
ed25519_secret_key sk;
randombytes(sk, sizeof(ed25519_secret_key));
ed25519_secret_key sk;
randombytes(sk, sizeof(ed25519_secret_key));
To generate a public key:
ed25519_public_key pk;
ed25519_publickey(sk, pk);
ed25519_public_key pk;
ed25519_publickey(sk, pk);
To sign a message:
ed25519_signature sig;
ed25519_sign(message, message_len, sk, pk, signature);
ed25519_signature sig;
ed25519_sign(message, message_len, sk, pk, signature);
To verify a signature:
int valid = ed25519_sign_open(message, message_len, pk, signature) == 0;
int valid = ed25519_sign_open(message, message_len, pk, signature) == 0;
To batch verify signatures:
const unsigned char *mp[num] = {message1, message2..}
size_t ml[num] = {message_len1, message_len2..}
const unsigned char *pkp[num] = {pk1, pk2..}
const unsigned char *sigp[num] = {signature1, signature2..}
int valid[num]
const unsigned char *mp[num] = {message1, message2..}
size_t ml[num] = {message_len1, message_len2..}
const unsigned char *pkp[num] = {pk1, pk2..}
const unsigned char *sigp[num] = {signature1, signature2..}
int valid[num]
/* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */
int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0;
/* valid[i] will be set to 1 if the individual signature was valid, 0 otherwise */
int all_valid = ed25519_sign_open_batch(mp, ml, pkp, sigp, num, valid) == 0;
**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in
`ed25519-randombytes.h`, to generate random scalars for the verification code.
**Note**: Batch verification uses `ed25519_randombytes_unsafe`, implemented in
`ed25519-randombytes.h`, to generate random scalars for the verification code.
The default implementation now uses OpenSSLs `RAND_bytes`.
Unlike the [SUPERCOP](http://bench.cr.yp.to/supercop.html) version, signatures are
not appended to messages, and there is no need for padding in front of messages.
Additionally, the secret key does not contain a copy of the public key, so it is
not appended to messages, and there is no need for padding in front of messages.
Additionally, the secret key does not contain a copy of the public key, so it is
32 bytes instead of 64 bytes, and the public key must be provided to the signing
function.
##### Curve25519
Curve25519 public keys can be generated thanks to
[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html)
Curve25519 public keys can be generated thanks to
[Adam Langley](http://www.imperialviolet.org/2013/05/10/fastercurve25519.html)
leveraging Ed25519's precomputed basepoint scalar multiplication.
curved25519_key sk, pk;
randombytes(sk, sizeof(curved25519_key));
curved25519_scalarmult_basepoint(pk, sk);
curved25519_key sk, pk;
randombytes(sk, sizeof(curved25519_key));
curved25519_scalarmult_basepoint(pk, sk);
Note the name is curved25519, a combination of curve and ed25519, to prevent
Note the name is curved25519, a combination of curve and ed25519, to prevent
name clashes. Performance is slightly faster than short message ed25519
signing due to both using the same code for the scalar multiply.
@@ -180,4 +179,4 @@ with extreme values to ensure they function correctly. SSE2 is now supported.
#### Papers
[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html)
[Available on the Ed25519 website](http://ed25519.cr.yp.to/papers.html)

View File

@@ -1,78 +1,78 @@
This code fuzzes ed25519-donna (and optionally ed25519-donna-sse2) against the ref10 implementations of
[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and
[curve25519](https://github.com/floodyberry/supercop/tree/master/crypto_scalarmult/curve25519/ref10) and
[ed25519](https://github.com/floodyberry/supercop/tree/master/crypto_sign/ed25519/ref10).
Curve25519 tests that generating a public key from a secret key
# Building
## *nix + PHP
## \*nix + PHP
`php build-nix.php (required parameters) (optional parameters)`
Required parameters:
* `--function=[curve25519,ed25519]`
* `--bits=[32,64]`
- `--function=[curve25519,ed25519]`
- `--bits=[32,64]`
Optional parameters:
* `--with-sse2`
- `--with-sse2`
Also fuzz against ed25519-donna-sse2
* `--with-openssl`
Also fuzz against ed25519-donna-sse2
Build with OpenSSL's SHA-512.
- `--with-openssl`
Default: Reference SHA-512 implementation (slow!)
Build with OpenSSL's SHA-512.
* `--compiler=[gcc,clang,icc]`
Default: Reference SHA-512 implementation (slow!)
Default: gcc
- `--compiler=[gcc,clang,icc]`
* `--no-asm`
Default: gcc
Do not use platform specific assembler
- `--no-asm`
Do not use platform specific assembler
example:
php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc
php build-nix.php --bits=64 --function=ed25519 --with-sse2 --compiler=icc
## Windows
Create a project with access to the ed25519 files.
If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects
If you are not using OpenSSL, add the `ED25519_REFHASH` define to the projects
"Properties/Preprocessor/Preprocessor Definitions" option
Add the following files to the project:
* `fuzz/curve25519-ref10.c`
* `fuzz/ed25519-ref10.c`
* `fuzz/ed25519-donna.c`
* `fuzz/ed25519-donna-sse2.c` (optional)
* `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz)
- `fuzz/curve25519-ref10.c`
- `fuzz/ed25519-ref10.c`
- `fuzz/ed25519-donna.c`
- `fuzz/ed25519-donna-sse2.c` (optional)
- `fuzz-[curve25519/ed25519].c` (depending on which you want to fuzz)
If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under
If you are also fuzzing against ed25519-donna-sse2, add the `ED25519_SSE2` define for `fuzz-[curve25519/ed25519].c` under
its "Properties/Preprocessor/Preprocessor Definitions" option.
# Running
If everything agrees, the program will only output occasional status dots (every 0x1000 passes)
If everything agrees, the program will only output occasional status dots (every 0x1000 passes)
and a 64bit progress count (every 0x20000 passes):
fuzzing: ref10 curved25519 curved25519-sse2
................................ [0000000000020000]
................................ [0000000000040000]
................................ [0000000000060000]
................................ [0000000000080000]
................................ [00000000000a0000]
................................ [00000000000c0000]
If any of the implementations do not agree with the ref10 implementation, the program will dump
the random data that was used, the data generated by the ref10 implementation, and diffs of the
the random data that was used, the data generated by the ref10 implementation, and diffs of the
ed25519-donna data against the ref10 data.
## Example errors
@@ -83,21 +83,21 @@ These are example error dumps (with intentionally introduced errors).
Random data:
* sk, or Secret Key
* m, or Message
- sk, or Secret Key
- m, or Message
Generated data:
* pk, or Public Key
* sig, or Signature
* valid, or if the signature of the message is valid with the public key
- pk, or Public Key
- sig, or Signature
- valid, or if the signature of the message is valid with the public key
Dump:
sk:
0x3b,0xb7,0x17,0x7a,0x66,0xdc,0xb7,0x9a,0x90,0x25,0x07,0x99,0x96,0xf3,0x92,0xef,
0x78,0xf8,0xad,0x6c,0x35,0x87,0x81,0x67,0x03,0xe6,0x95,0xba,0x06,0x18,0x7c,0x9c,
m:
0x7c,0x8d,0x3d,0xe1,0x92,0xee,0x7a,0xb8,0x4d,0xc9,0xfb,0x02,0x34,0x1e,0x5a,0x91,
0xee,0x01,0xa6,0xb8,0xab,0x37,0x3f,0x3d,0x6d,0xa2,0x47,0xe3,0x27,0x93,0x7c,0xb7,
@@ -107,67 +107,66 @@ Dump:
0x63,0x14,0xe0,0x81,0x52,0xec,0xcd,0xcf,0x70,0x54,0x7d,0xa3,0x49,0x8b,0xf0,0x89,
0x70,0x07,0x12,0x2a,0xd9,0xaa,0x16,0x01,0xb2,0x16,0x3a,0xbb,0xfc,0xfa,0x13,0x5b,
0x69,0x83,0x92,0x70,0x95,0x76,0xa0,0x8e,0x16,0x79,0xcc,0xaa,0xb5,0x7c,0xf8,0x7a,
ref10:
pk:
0x71,0xb0,0x5e,0x62,0x1b,0xe3,0xe7,0x36,0x91,0x8b,0xc0,0x13,0x36,0x0c,0xc9,0x04,
0x16,0xf5,0xff,0x48,0x0c,0x83,0x6b,0x88,0x53,0xa2,0xc6,0x0f,0xf7,0xac,0x42,0x04,
sig:
0x3e,0x05,0xc5,0x37,0x16,0x0b,0x29,0x30,0x89,0xa3,0xe7,0x83,0x08,0x16,0xdd,0x96,
0x02,0xfa,0x0d,0x44,0x2c,0x43,0xaa,0x80,0x93,0x04,0x58,0x22,0x09,0xbf,0x11,0xa5,
0xcc,0xa5,0x3c,0x9f,0xa0,0xa4,0x64,0x5a,0x4a,0xdb,0x20,0xfb,0xc7,0x9b,0xfd,0x3f,
0x08,0xae,0xc4,0x3c,0x1e,0xd8,0xb6,0xb4,0xd2,0x6d,0x80,0x92,0xcb,0x71,0xf3,0x02,
valid: yes
ed25519-donna:
pk diff:
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
sig diff:
0x2c,0xb9,0x25,0x14,0xd0,0x94,0xeb,0xfe,0x46,0x02,0xc2,0xe8,0xa3,0xeb,0xbf,0xb5,
0x72,0x84,0xbf,0xc1,0x8a,0x32,0x30,0x99,0xf7,0x58,0xfe,0x06,0xa8,0xdc,0xdc,0xab,
0xb5,0x57,0x03,0x33,0x87,0xce,0x54,0x55,0x6a,0x69,0x8a,0xc4,0xb7,0x2a,0xed,0x97,
0xb4,0x68,0xe7,0x52,0x7a,0x07,0x55,0x3b,0xa2,0x94,0xd6,0x5e,0xa1,0x61,0x80,0x08,
valid: no
In this case, the generated public key matches, but the generated signature is completely
In this case, the generated public key matches, but the generated signature is completely
different and does not validate.
### Curve25519
Random data:
* sk, or Secret Key
- sk, or Secret Key
Generated data:
* pk, or Public Key
- pk, or Public Key
Dump:
sk:
0x44,0xec,0x0b,0x0e,0xa2,0x0e,0x9c,0x5b,0x8c,0xce,0x7b,0x1d,0x68,0xae,0x0f,0x9e,
0x81,0xe2,0x04,0x76,0xda,0x87,0xa4,0x9e,0xc9,0x4f,0x3b,0xf9,0xc3,0x89,0x63,0x70,
ref10:
0x24,0x55,0x55,0xc0,0xf9,0x80,0xaf,0x02,0x43,0xee,0x8c,0x7f,0xc1,0xad,0x90,0x95,
0x57,0x91,0x14,0x2e,0xf2,0x14,0x22,0x80,0xdd,0x4e,0x3c,0x85,0x71,0x84,0x8c,0x62,
curved25519 diff:
0x12,0xd1,0x61,0x2b,0x16,0xb3,0xd8,0x29,0xf8,0xa3,0xba,0x70,0x4e,0x49,0x4f,0x43,
0xa1,0x3c,0x6b,0x42,0x11,0x61,0xcc,0x30,0x87,0x73,0x46,0xfb,0x85,0xc7,0x9a,0x35,
curved25519-sse2 diff:
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,____,
In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference
implementation.
In this case, curved25519 is totally wrong, while curved25519-sse2 matches the reference
implementation.

28
external/openssl-3.6.0/conandata.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
# Versions to keep:
# - The last patch version of the supported releases, as per https://openssl-library.org/source/
# - The last patch version of the current LTS release
# - The most recent FIPS validated version
# Note: please do not maintain multiple patch versions of the same minor,
# unless required by the above.
sources:
3.6.0: # pre-release
url: "https://github.com/openssl/openssl/releases/download/openssl-3.6.0-beta1/openssl-3.6.0-beta1.tar.gz"
sha256: 1cfcda5da5d7221861749113b5090038588784e82a3ba5f893e0c347e5bb1626
3.5.2: # LTS: keep until next LTS is designed
url: "https://github.com/openssl/openssl/releases/download/openssl-3.5.2/openssl-3.5.2.tar.gz"
sha256: c53a47e5e441c930c3928cf7bf6fb00e5d129b630e0aa873b08258656e7345ec
3.4.2:
url: "https://github.com/openssl/openssl/releases/download/openssl-3.4.2/openssl-3.4.2.tar.gz"
sha256: 17b02459fc28be415470cccaae7434f3496cac1306b86b52c83886580e82834c
3.3.4:
url: "https://github.com/openssl/openssl/releases/download/openssl-3.3.4/openssl-3.3.4.tar.gz"
sha256: 8d1a5fc323d3fd351dc05458457fd48f78652d2a498e1d70ffea07b4d0eb3fa8
3.2.5:
url: "https://github.com/openssl/openssl/releases/download/openssl-3.2.5/openssl-3.2.5.tar.gz"
sha256: b36347d024a0f5bd09fefcd6af7a58bb30946080eb8ce8f7be78562190d09879
3.1.2: # Most recent FIPS validated as of 2025-03-14
url: "https://github.com/openssl/openssl/releases/download/openssl-3.1.2/openssl-3.1.2.tar.gz"
sha256: a0ce69b8b97ea6a35b96875235aa453b966ba3cba8af2de23657d8b6767d6539
3.0.17: # LTS: 3.5.1 should be used, but this is supported by OpenSSL until 2026-09-07
url: "https://github.com/openssl/openssl/releases/download/openssl-3.0.17/openssl-3.0.17.tar.gz"
sha256: dfdd77e4ea1b57ff3a6dbde6b0bdc3f31db5ac99e7fdd4eaf9e1fbb6ec2db8ce

682
external/openssl-3.6.0/conanfile.py vendored Normal file
View File

@@ -0,0 +1,682 @@
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import fix_apple_shared_install_name, is_apple_os, XCRun
from conan.tools.build import build_jobs
from conan.tools.files import chdir, copy, get, replace_in_file, rm, rmdir, save
from conan.tools.gnu import AutotoolsToolchain
from conan.tools.layout import basic_layout
from conan.tools.microsoft import is_msvc, msvc_runtime_flag, unix_path
from conan.tools.scm import Version
import fnmatch
import os
import textwrap
required_conan_version = ">=1.57.0"
class OpenSSLConan(ConanFile):
name = "openssl"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/openssl/openssl"
license = "Apache-2.0"
topics = ("ssl", "tls", "encryption", "security")
description = "A toolkit for the Transport Layer Security (TLS) and Secure Sockets Layer (SSL) protocols"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_weak_ssl_ciphers": [True, False],
"386": [True, False],
"capieng_dialog": [True, False],
"enable_capieng": [True, False],
"enable_trace": [True, False],
"no_aria": [True, False],
"no_apps": [True, False],
"no_autoload_config": [True, False],
"no_asm": [True, False],
"no_async": [True, False],
"no_blake2": [True, False],
"no_bf": [True, False],
"no_camellia": [True, False],
"no_chacha": [True, False],
"no_cms": [True, False],
"no_comp": [True, False],
"no_ct": [True, False],
"no_cast": [True, False],
"no_deprecated": [True, False],
"no_des": [True, False],
"no_dgram": [True, False],
"no_dh": [True, False],
"no_dsa": [True, False],
"no_dso": [True, False],
"no_ec": [True, False],
"no_ecdh": [True, False],
"no_ecdsa": [True, False],
"no_engine": [True, False],
"no_filenames": [True, False],
"no_fips": [True, False],
"no_gost": [True, False],
"no_idea": [True, False],
"no_legacy": [True, False],
"no_md2": [True, False],
"no_md4": [True, False],
"no_mdc2": [True, False],
"no_module": [True, False],
"no_ocsp": [True, False],
"no_pinshared": [True, False],
"no_rc2": [True, False],
"no_rc4": [True, False],
"no_rc5": [True, False],
"no_rfc3779": [True, False],
"no_rmd160": [True, False],
"no_sm2": [True, False],
"no_sm3": [True, False],
"no_sm4": [True, False],
"no_srp": [True, False],
"no_srtp": [True, False],
"no_sse2": [True, False],
"no_ssl": [True, False],
"no_stdio": [True, False],
"no_seed": [True, False],
"no_sock": [True, False],
"no_ssl3": [True, False],
"no_threads": [True, False],
"no_tls1": [True, False],
"no_ts": [True, False],
"no_whirlpool": [True, False],
"no_zlib": [True, False],
"openssldir": [None, "ANY"],
"tls_security_level": [None, 0, 1, 2, 3, 4, 5],
}
default_options = {key: False for key in options.keys()}
default_options["fPIC"] = True
default_options["no_md2"] = True
default_options["openssldir"] = None
default_options["tls_security_level"] = None
@property
def _is_clang_cl(self):
return self.settings.os == "Windows" and self.settings.compiler == "clang" and \
self.settings.compiler.get_safe("runtime")
@property
def _is_mingw(self):
return self.settings.os == "Windows" and self.settings.compiler == "gcc"
@property
def _use_nmake(self):
return self._is_clang_cl or is_msvc(self)
def config_options(self):
if self.settings.os != "Windows":
self.options.rm_safe("capieng_dialog")
self.options.rm_safe("enable_capieng")
else:
self.options.rm_safe("fPIC")
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
if not self.options.no_zlib:
self.requires("zlib/[>=1.2.11 <2]")
def set_version(self):
if self.version:
return
self.version = "3.6.0"
def validate(self):
if self.settings.os == "iOS" and self.options.shared:
raise ConanInvalidConfiguration("OpenSSL 3 does not support building shared libraries for iOS")
def build_requirements(self):
if self.settings_build.os == "Windows":
if self.conf.get("user.openssl:windows_use_jom", False):
self.tool_requires("jom/[*]")
if not self.options.no_asm and self.settings.arch in ["x86", "x86_64"]:
self.tool_requires("nasm/2.16.01")
if self._use_nmake:
self.tool_requires("strawberryperl/5.32.1.1")
else:
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
@property
def _target(self):
target = f"conan-{self.settings.build_type}-{self.settings.os}-{self.settings.arch}-{self.settings.compiler}-{self.settings.compiler.version}"
if self._use_nmake:
target = f"VC-{target}" # VC- prefix is important as it's checked by Configure
if self._is_mingw:
target = f"mingw-{target}"
return target
@property
def _perlasm_scheme(self):
# right now, we need to tweak this for iOS & Android only, as they inherit from generic targets
if self.settings.os in ("iOS", "watchOS", "tvOS"):
return {
"armv7": "ios32",
"armv7s": "ios32",
"armv8": "ios64",
"armv8_32": "ios64",
"armv8.3": "ios64",
"armv7k": "ios32",
}.get(str(self.settings.arch), None)
elif self.settings.os == "Android":
return {
"armv7": "void",
"armv8": "linux64",
"mips": "o32",
"mips64": "64",
"x86": "android",
"x86_64": "elf",
}.get(str(self.settings.arch), None)
return None
@property
def _asm_target(self):
if self.settings.os in ("Android", "iOS", "watchOS", "tvOS"):
return {
"x86": "x86_asm" if self.settings.os == "Android" else None,
"x86_64": "x86_64_asm" if self.settings.os == "Android" else None,
"armv5el": "armv4_asm",
"armv5hf": "armv4_asm",
"armv6": "armv4_asm",
"armv7": "armv4_asm",
"armv7hf": "armv4_asm",
"armv7s": "armv4_asm",
"armv7k": "armv4_asm",
"armv8": "aarch64_asm",
"armv8_32": "aarch64_asm",
"armv8.3": "aarch64_asm",
"mips": "mips32_asm",
"mips64": "mips64_asm",
"sparc": "sparcv8_asm",
"sparcv9": "sparcv9_asm",
"ia64": "ia64_asm",
"ppc32be": "ppc32_asm",
"ppc32": "ppc32_asm",
"ppc64le": "ppc64_asm",
"ppc64": "ppc64_asm",
"s390": "s390x_asm",
"s390x": "s390x_asm"
}.get(str(self.settings.os), None)
@property
def _targets(self):
is_cygwin = self.settings.get_safe("os.subsystem") == "cygwin"
return {
"Linux-x86-clang": "linux-x86-clang",
"Linux-x86_64-clang": "linux-x86_64-clang",
"Linux-x86-*": "linux-x86",
"Linux-x86_64-*": "linux-x86_64",
"Linux-armv4-*": "linux-armv4",
"Linux-armv4i-*": "linux-armv4",
"Linux-armv5el-*": "linux-armv4",
"Linux-armv5hf-*": "linux-armv4",
"Linux-armv6-*": "linux-armv4",
"Linux-armv7-*": "linux-armv4",
"Linux-armv7hf-*": "linux-armv4",
"Linux-armv7s-*": "linux-armv4",
"Linux-armv7k-*": "linux-armv4",
"Linux-armv8-*": "linux-aarch64",
"Linux-armv8.3-*": "linux-aarch64",
"Linux-armv8-32-*": "linux-arm64ilp32",
"Linux-mips-*": "linux-mips32",
"Linux-mips64-*": "linux-mips64",
"Linux-ppc32-*": "linux-ppc32",
"Linux-ppc32le-*": "linux-pcc32",
"Linux-ppc32be-*": "linux-ppc32",
"Linux-ppc64-*": "linux-ppc64",
"Linux-ppc64le-*": "linux-ppc64le",
"Linux-pcc64be-*": "linux-pcc64",
"Linux-s390x-*": "linux64-s390x",
"Linux-e2k-*": "linux-generic64",
"Linux-sparc-*": "linux-sparcv8",
"Linux-sparcv9-*": "linux64-sparcv9",
"Linux-*-*": "linux-generic32",
"Macos-x86-*": "darwin-i386-cc",
"Macos-x86_64-*": "darwin64-x86_64-cc",
"Macos-ppc32-*": "darwin-ppc-cc",
"Macos-ppc32be-*": "darwin-ppc-cc",
"Macos-ppc64-*": "darwin64-ppc-cc",
"Macos-ppc64be-*": "darwin64-ppc-cc",
"Macos-armv8-*": "darwin64-arm64-cc",
"Macos-*-*": "darwin-common",
"iOS-x86_64-*": "darwin64-x86_64-cc",
"iOS-*-*": "iphoneos-cross",
"watchOS-*-*": "iphoneos-cross",
"tvOS-*-*": "iphoneos-cross",
# Android targets are very broken, see https://github.com/openssl/openssl/issues/7398
"Android-armv7-*": "linux-generic32",
"Android-armv7hf-*": "linux-generic32",
"Android-armv8-*": "linux-generic64",
"Android-x86-*": "linux-x86-clang",
"Android-x86_64-*": "linux-x86_64-clang",
"Android-mips-*": "linux-generic32",
"Android-mips64-*": "linux-generic64",
"Android-*-*": "linux-generic32",
"Windows-x86-gcc": "Cygwin-x86" if is_cygwin else "mingw",
"Windows-x86_64-gcc": "Cygwin-x86_64" if is_cygwin else "mingw64",
"Windows-*-gcc": "Cygwin-common" if is_cygwin else "mingw-common",
"Windows-ia64-Visual Studio": "VC-WIN64I", # Itanium
"Windows-x86-Visual Studio": "VC-WIN32",
"Windows-x86_64-Visual Studio": "VC-WIN64A",
"Windows-armv7-Visual Studio": "VC-WIN32-ARM",
"Windows-armv8-Visual Studio": "VC-WIN64-CLANGASM-ARM",
"Windows-*-Visual Studio": "VC-noCE-common",
"Windows-ia64-clang": "VC-WIN64I", # Itanium
"Windows-x86-clang": "VC-WIN32",
"Windows-x86_64-clang": "VC-WIN64A",
"Windows-armv7-clang": "VC-WIN32-ARM",
"Windows-armv8-clang": "VC-WIN64-ARM",
"Windows-*-clang": "VC-noCE-common",
"WindowsStore-x86-*": "VC-WIN32-UWP",
"WindowsStore-x86_64-*": "VC-WIN64A-UWP",
"WindowsStore-armv7-*": "VC-WIN32-ARM-UWP",
"WindowsStore-armv8-*": "VC-WIN64-ARM-UWP",
"WindowsStore-*-*": "VC-WIN32-ONECORE",
"WindowsCE-*-*": "VC-CE",
"SunOS-x86-gcc": "solaris-x86-gcc",
"SunOS-x86_64-gcc": "solaris64-x86_64-gcc",
"SunOS-sparc-gcc": "solaris-sparcv8-gcc",
"SunOS-sparcv9-gcc": "solaris64-sparcv9-gcc",
"SunOS-x86-suncc": "solaris-x86-cc",
"SunOS-x86_64-suncc": "solaris64-x86_64-cc",
"SunOS-sparc-suncc": "solaris-sparcv8-cc",
"SunOS-sparcv9-suncc": "solaris64-sparcv9-cc",
"SunOS-*-*": "solaris-common",
"*BSD-x86-*": "BSD-x86",
"*BSD-x86_64-*": "BSD-x86_64",
"*BSD-ia64-*": "BSD-ia64",
"*BSD-sparc-*": "BSD-sparcv8",
"*BSD-sparcv9-*": "BSD-sparcv9",
"*BSD-armv8-*": "BSD-generic64",
"*BSD-mips64-*": "BSD-generic64",
"*BSD-ppc64-*": "BSD-generic64",
"*BSD-ppc64le-*": "BSD-generic64",
"*BSD-ppc64be-*": "BSD-generic64",
"AIX-ppc32-gcc": "aix-gcc",
"AIX-ppc64-gcc": "aix64-gcc",
"AIX-pcc32-*": "aix-cc",
"AIX-ppc64-*": "aix64-cc",
"AIX-*-*": "aix-common",
"*BSD-*-*": "BSD-generic32",
"Emscripten-*-*": "cc",
"Neutrino-*-*": "BASE_unix",
}
@property
def _ancestor_target(self):
if "CONAN_OPENSSL_CONFIGURATION" in os.environ:
return os.environ["CONAN_OPENSSL_CONFIGURATION"]
compiler = "Visual Studio" if self.settings.compiler == "msvc" else self.settings.compiler
query = f"{self.settings.os}-{self.settings.arch}-{compiler}"
ancestor = next((self._targets[i] for i in self._targets if fnmatch.fnmatch(query, i)), None)
if not ancestor:
raise ConanInvalidConfiguration(
f"Unsupported configuration ({self.settings.os}/{self.settings.arch}/{self.settings.compiler}).\n"
f"Please open an issue at {self.url}.\n"
f"Alternatively, set the CONAN_OPENSSL_CONFIGURATION environment variable into your conan profile."
)
return ancestor
def _get_default_openssl_dir(self):
if self.settings.os == "Linux":
return "/etc/ssl"
return os.path.join(self.package_folder, "res")
def _adjust_path(self, path):
if self._use_nmake:
return path.replace("\\", "/")
return unix_path(self, path)
@property
def _configure_args(self):
openssldir = self.options.openssldir or self._get_default_openssl_dir()
openssldir = unix_path(self, openssldir) if self.win_bash else openssldir
args = [
f'"{self._target}"',
"shared" if self.options.shared else "no-shared",
"--debug" if self.settings.build_type == "Debug" else "--release",
"--prefix=/",
"--libdir=lib",
f"--openssldir=\"{openssldir}\"",
"no-threads" if self.options.no_threads else "threads",
f"PERL={self._perl}",
"no-unit-test",
"no-tests",
]
if self.settings.os == "Android":
args.append(f" -D__ANDROID_API__={str(self.settings.os.api_level)}") # see NOTES.ANDROID
if self.settings.os == "Windows":
if self.options.enable_capieng:
args.append("enable-capieng")
if self.options.capieng_dialog:
args.append("-DOPENSSL_CAPIENG_DIALOG=1")
else:
args.append("-fPIC" if self.options.get_safe("fPIC", True) else "no-pic")
args.append("no-fips" if self.options.get_safe("no_fips", True) else "enable-fips")
args.append("no-md2" if self.options.get_safe("no_md2", True) else "enable-md2")
if str(self.options.tls_security_level) != "None":
args.append(f"-DOPENSSL_TLS_SECURITY_LEVEL={self.options.tls_security_level}")
if self.options.get_safe("enable_trace"):
args.append("enable-trace")
if self.settings.os == "Neutrino":
args.append("no-asm -lsocket -latomic")
if not self.options.no_zlib:
zlib_cpp_info = self.dependencies["zlib"].cpp_info.aggregated_components()
include_path = self._adjust_path(zlib_cpp_info.includedirs[0])
is_shared_zlib = self.dependencies["zlib"].options.shared
# the --with-zlib-lib flag takes a different value depending on platform and if ZLIB is shared
# From https://github.com/openssl/openssl/blob/openssl-3.4.1/INSTALL.md#with-zlib-lib
# On Unix: the directory where the zlib library is (for -L flag)
# On Windows with static zlib: the path to the static library to link (assumed)
# On Windows with shared zlib: the leaf name of the dll (its loaded with LoadLibrary)
if self._use_nmake:
# notes: consider where this should be "if on windows"
# zlib1 is assumed to be the name of the zlib1.dll for all windows configurations
lib_path = self._adjust_path(os.path.join(zlib_cpp_info.libdirs[0], f"{zlib_cpp_info.libs[0]}.lib"))
zlib_lib_flag = "zlib1" if is_shared_zlib else lib_path
else:
# Just path, GNU like compilers will find the right file
zlib_lib_flag = self._adjust_path(zlib_cpp_info.libdirs[0])
zlib_configure_arg = "zlib-dynamic" if is_shared_zlib else "zlib"
args.append(zlib_configure_arg)
args.extend([
f'--with-zlib-include="{include_path}"',
f'--with-zlib-lib="{zlib_lib_flag}"',
])
for option_name in self.default_options.keys():
if self.options.get_safe(option_name, False) and option_name not in ("shared", "fPIC", "openssldir", "tls_security_level", "capieng_dialog", "enable_capieng", "zlib", "no_fips", "no_md2"):
self.output.info(f"Activated option: {option_name}")
args.append(option_name.replace("_", "-"))
return args
def generate(self):
tc = AutotoolsToolchain(self)
env = tc.environment()
env.define_path("PERL", self._perl)
if self.settings.compiler == "apple-clang":
xcrun = XCRun(self)
env.define_path("CROSS_SDK", os.path.basename(xcrun.sdk_path))
env.define_path("CROSS_TOP", os.path.dirname(os.path.dirname(xcrun.sdk_path)))
if is_apple_os(self) and self.options.shared:
# Inject -headerpad_max_install_names for shared library, otherwise fix_apple_shared_install_name() may fail.
# See https://github.com/conan-io/conan-center-index/issues/27424
tc.extra_ldflags.append("-headerpad_max_install_names")
self._create_targets(tc.cflags, tc.cxxflags, tc.defines, tc.ldflags)
tc.generate(env)
def _create_targets(self, cflags, cxxflags, defines, ldflags):
config_template = textwrap.dedent("""\
{targets} = (
"{target}" => {{
inherit_from => {ancestor},
cflags => add("{cflags}"),
cxxflags => add("{cxxflags}"),
{defines}
lflags => add("{lflags}"),
{shared_target}
{shared_cflag}
{shared_extension}
{perlasm_scheme}
}},
);
""")
perlasm_scheme = ""
if self._perlasm_scheme:
perlasm_scheme = f'perlasm_scheme => "{self._perlasm_scheme}",'
defines = '", "'.join(defines)
defines = 'defines => add("%s"),' % defines if defines else ""
targets = "my %targets"
if self._asm_target:
ancestor = f'[ "{self._ancestor_target}", asm("{self._asm_target}") ]'
else:
ancestor = f'[ "{self._ancestor_target}" ]'
shared_cflag = ""
shared_extension = ""
shared_target = ""
if self.settings.os == "Neutrino":
if self.options.shared:
shared_extension = r'shared_extension => ".so.\$(SHLIB_VERSION_NUMBER)",'
shared_target = 'shared_target => "gnu-shared",'
if self.options.get_safe("fPIC", True):
shared_cflag = 'shared_cflag => "-fPIC",'
if self.settings.os in ["iOS", "tvOS", "watchOS"] and self.conf.get("tools.apple:enable_bitcode", check_type=bool):
cflags.append("-fembed-bitcode")
cxxflags.append("-fembed-bitcode")
config = config_template.format(
targets=targets,
target=self._target,
ancestor=ancestor,
cflags=" ".join(cflags),
cxxflags=" ".join(cxxflags),
defines=defines,
perlasm_scheme=perlasm_scheme,
shared_target=shared_target,
shared_extension=shared_extension,
shared_cflag=shared_cflag,
lflags=" ".join(ldflags)
)
self.output.info(f"using target: {self._target} -> {self._ancestor_target}")
self.output.info(config)
save(self, os.path.join(self.source_folder, "Configurations", "20-conan.conf"), config)
def _run_make(self, targets=None, parallel=True, install=False):
command = [self._make_program]
if install:
command.append(f"DESTDIR={self._adjust_path(self.package_folder)}")
if targets:
command.extend(targets)
if self._make_program in ["make", "jom"]:
command.append(f"-j{build_jobs(self)}" if parallel else "-j1")
self.run(" ".join(command), env="conanbuild")
@property
def _perl(self):
if self._use_nmake:
return self.dependencies.build["strawberryperl"].conf_info.get("user.strawberryperl:perl", check_type=str)
return "perl"
def _make(self):
with chdir(self, self.source_folder):
args = " ".join(self._configure_args)
if self._use_nmake:
self._replace_runtime_in_file(os.path.join("Configurations", "10-main.conf"))
self.run(f"{self._perl} ./Configure {args}", env="conanbuild")
if self._use_nmake:
# When `--prefix=/`, the scripts derive `\` without escaping, which
# causes issues on Windows
replace_in_file(self, "Makefile", "INSTALLTOP_dir=\\", "INSTALLTOP_dir=\\\\")
if Version(self.version) >= "3.3.0":
# replace backslashes in paths with forward slashes
mkinstallvars_pl = os.path.join(self.source_folder, "util", "mkinstallvars.pl")
if Version(self.version) >= "3.3.2":
replace_in_file(self, mkinstallvars_pl, "push @{$values{$k}}, $v;", """$v =~ s|\\\\|/|g; push @{$values{$k}}, $v;""")
replace_in_file(self, mkinstallvars_pl, "$values{$k} = $v;", """$v->[0] =~ s|\\\\|/|g; $values{$k} = $v;""")
else:
replace_in_file(self, mkinstallvars_pl, "$ENV{$k} = $v;", """$v =~ s|\\\\|/|g; $ENV{$k} = $v;""")
self._run_make()
def _make_install(self):
with chdir(self, self.source_folder):
self._run_make(targets=["install_sw"], parallel=False, install=True)
def build(self):
self._make()
configdata_pm = self._adjust_path(os.path.join(self.source_folder, "configdata.pm"))
self.run(f"{self._perl} {configdata_pm} --dump")
@property
def _make_program(self):
use_jom = self._use_nmake and self.conf.get("user.openssl:windows_use_jom", False)
if self._use_nmake:
return "jom" if use_jom else "nmake"
else:
return "make"
def _replace_runtime_in_file(self, filename):
runtime = msvc_runtime_flag(self)
for e in ["MDd", "MTd", "MD", "MT"]:
replace_in_file(self, filename, f"/{e} ", f"/{runtime} ", strict=False)
replace_in_file(self, filename, f"/{e}\"", f"/{runtime}\"", strict=False)
def package(self):
copy(self, "*LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
self._make_install()
if is_apple_os(self):
fix_apple_shared_install_name(self)
rm(self, "*.pdb", self.package_folder, "lib")
if self.options.shared:
libdir = os.path.join(self.package_folder, "lib")
for file in os.listdir(libdir):
if self._is_mingw and file.endswith(".dll.a"):
continue
if file.endswith(".a"):
os.unlink(os.path.join(libdir, file))
if not self.options.no_fips:
provdir = os.path.join(self.source_folder, "providers")
modules_dir = os.path.join(self.package_folder, "lib", "ossl-modules")
if self.settings.os == "Macos":
copy(self, "fips.dylib", src=provdir, dst=modules_dir)
elif self.settings.os == "Windows":
copy(self, "fips.dll", src=provdir, dst=modules_dir)
else:
copy(self, "fips.so", src=provdir, dst=modules_dir)
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
self._create_cmake_module_variables(
os.path.join(self.package_folder, self._module_file_rel_path)
)
def _create_cmake_module_variables(self, module_file):
content = textwrap.dedent("""\
set(OPENSSL_FOUND TRUE)
if(DEFINED OpenSSL_INCLUDE_DIR)
set(OPENSSL_INCLUDE_DIR ${OpenSSL_INCLUDE_DIR})
endif()
if(DEFINED OpenSSL_Crypto_LIBS)
set(OPENSSL_CRYPTO_LIBRARY ${OpenSSL_Crypto_LIBS})
set(OPENSSL_CRYPTO_LIBRARIES ${OpenSSL_Crypto_LIBS}
${OpenSSL_Crypto_DEPENDENCIES}
${OpenSSL_Crypto_FRAMEWORKS}
${OpenSSL_Crypto_SYSTEM_LIBS})
elseif(DEFINED openssl_OpenSSL_Crypto_LIBS_%(config)s)
set(OPENSSL_CRYPTO_LIBRARY ${openssl_OpenSSL_Crypto_LIBS_%(config)s})
set(OPENSSL_CRYPTO_LIBRARIES ${openssl_OpenSSL_Crypto_LIBS_%(config)s}
${openssl_OpenSSL_Crypto_DEPENDENCIES_%(config)s}
${openssl_OpenSSL_Crypto_FRAMEWORKS_%(config)s}
${openssl_OpenSSL_Crypto_SYSTEM_LIBS_%(config)s})
endif()
if(DEFINED OpenSSL_SSL_LIBS)
set(OPENSSL_SSL_LIBRARY ${OpenSSL_SSL_LIBS})
set(OPENSSL_SSL_LIBRARIES ${OpenSSL_SSL_LIBS}
${OpenSSL_SSL_DEPENDENCIES}
${OpenSSL_SSL_FRAMEWORKS}
${OpenSSL_SSL_SYSTEM_LIBS})
elseif(DEFINED openssl_OpenSSL_SSL_LIBS_%(config)s)
set(OPENSSL_SSL_LIBRARY ${openssl_OpenSSL_SSL_LIBS_%(config)s})
set(OPENSSL_SSL_LIBRARIES ${openssl_OpenSSL_SSL_LIBS_%(config)s}
${openssl_OpenSSL_SSL_DEPENDENCIES_%(config)s}
${openssl_OpenSSL_SSL_FRAMEWORKS_%(config)s}
${openssl_OpenSSL_SSL_SYSTEM_LIBS_%(config)s})
endif()
if(DEFINED OpenSSL_LIBRARIES)
set(OPENSSL_LIBRARIES ${OpenSSL_LIBRARIES})
endif()
if(DEFINED OpenSSL_VERSION)
set(OPENSSL_VERSION ${OpenSSL_VERSION})
endif()
"""% {"config":str(self.settings.build_type).upper()})
save(self, module_file, content)
@property
def _module_subfolder(self):
return os.path.join("lib", "cmake")
@property
def _module_file_rel_path(self):
return os.path.join(self._module_subfolder,
f"conan-official-{self.name}-variables.cmake")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "OpenSSL")
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("pkg_config_name", "openssl")
self.cpp_info.set_property("cmake_build_modules", [self._module_file_rel_path])
self.cpp_info.components["ssl"].builddirs.append(self._module_subfolder)
self.cpp_info.components["ssl"].set_property("cmake_build_modules", [self._module_file_rel_path])
self.cpp_info.components["crypto"].builddirs.append(self._module_subfolder)
self.cpp_info.components["crypto"].set_property("cmake_build_modules", [self._module_file_rel_path])
if self._use_nmake:
self.cpp_info.components["ssl"].libs = ["libssl"]
self.cpp_info.components["crypto"].libs = ["libcrypto"]
else:
self.cpp_info.components["ssl"].libs = ["ssl"]
self.cpp_info.components["crypto"].libs = ["crypto"]
self.cpp_info.components["ssl"].requires = ["crypto"]
if not self.options.no_zlib:
self.cpp_info.components["crypto"].requires.append("zlib::zlib")
if self.settings.os == "Windows":
self.cpp_info.components["crypto"].system_libs.extend(["crypt32", "ws2_32", "advapi32", "user32", "bcrypt"])
elif self.settings.os == "Linux":
self.cpp_info.components["crypto"].system_libs.extend(["dl", "rt"])
self.cpp_info.components["ssl"].system_libs.append("dl")
if not self.options.no_threads:
self.cpp_info.components["crypto"].system_libs.append("pthread")
self.cpp_info.components["ssl"].system_libs.append("pthread")
elif self.settings.os == "Neutrino":
self.cpp_info.components["crypto"].system_libs.append("atomic")
self.cpp_info.components["ssl"].system_libs.append("atomic")
self.cpp_info.components["crypto"].system_libs.append("socket")
self.cpp_info.components["ssl"].system_libs.append("socket")
self.cpp_info.components["crypto"].set_property("cmake_target_name", "OpenSSL::Crypto")
self.cpp_info.components["crypto"].set_property("pkg_config_name", "libcrypto")
self.cpp_info.components["ssl"].set_property("cmake_target_name", "OpenSSL::SSL")
self.cpp_info.components["ssl"].set_property("pkg_config_name", "libssl")
openssl_modules_dir = os.path.join(self.package_folder, "lib", "ossl-modules")
self.runenv_info.define_path("OPENSSL_MODULES", openssl_modules_dir)

View File

@@ -8,153 +8,189 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [0.6.0] - 2024-11-04
#### Added
- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See:
- Header file `include/secp256k1_musig.h` which defines the new API.
- Document `doc/musig.md` for further notes on API usage.
- Usage example `examples/musig.c`.
- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command.
- New module `musig` implements the MuSig2 multisignature scheme according to the [BIP 327 specification](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki). See:
- Header file `include/secp256k1_musig.h` which defines the new API.
- Document `doc/musig.md` for further notes on API usage.
- Usage example `examples/musig.c`.
- New CMake variable `SECP256K1_APPEND_LDFLAGS` for appending linker flags to the build command.
#### Changed
- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal.
- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++).
- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility.
- API functions now use a significantly more robust method to clear secrets from the stack before returning. However, secret clearing remains a best-effort security measure and cannot guarantee complete removal.
- Any type `secp256k1_foo` can now be forward-declared using `typedef struct secp256k1_foo secp256k1_foo;` (or also `struct secp256k1_foo;` in C++).
- Organized CMake build artifacts into dedicated directories (`bin/` for executables, `lib/` for libraries) to improve build output structure and Windows shared library compatibility.
#### Removed
- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API.
- Removed the `secp256k1_scratch_space` struct and its associated functions `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` because the scratch space was unused in the API.
#### ABI Compatibility
The symbols `secp256k1_scratch_space_create` and `secp256k1_scratch_space_destroy` were removed.
Otherwise, the library maintains backward compatibility with versions 0.3.x through 0.5.x.
## [0.5.1] - 2024-08-01
#### Added
- Added usage example for an ElligatorSwift key exchange.
- Added usage example for an ElligatorSwift key exchange.
#### Changed
- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly.
- The default size of the precomputed table for signing was changed from 22 KiB to 86 KiB. The size can be changed with the configure option `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
- "auto" is no longer an accepted value for the `--with-ecmult-window` and `--with-ecmult-gen-kb` configure options (this also applies to `SECP256K1_ECMULT_WINDOW_SIZE` and `SECP256K1_ECMULT_GEN_KB` in CMake). To achieve the same configuration as previously provided by the "auto" value, omit setting the configure option explicitly.
#### Fixed
- Fixed compilation when the extrakeys module is disabled.
- Fixed compilation when the extrakeys module is disabled.
#### ABI Compatibility
The ABI is backward compatible with versions 0.5.0, 0.4.x and 0.3.x.
## [0.5.0] - 2024-05-06
#### Added
- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order.
- New function `secp256k1_ec_pubkey_sort` that sorts public keys using lexicographic (of compressed serialization) order.
#### Changed
- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations.
- The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
- This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB).
- The implementation of the point multiplication algorithm used for signing and public key generation was changed, resulting in improved performance for those operations.
- The related configure option `--ecmult-gen-precision` was replaced with `--ecmult-gen-kb` (`SECP256K1_ECMULT_GEN_KB` for CMake).
- This changes the supported precomputed table sizes for these operations. The new supported sizes are 2 KiB, 22 KiB, or 86 KiB (while the old supported sizes were 32 KiB, 64 KiB, or 512 KiB).
#### ABI Compatibility
The ABI is backward compatible with versions 0.4.x and 0.3.x.
## [0.4.1] - 2023-12-21
#### Changed
- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one.
- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`.
- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one.
- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`.
#### ABI Compatibility
The ABI is backward compatible with versions 0.4.0 and 0.3.x.
## [0.4.0] - 2023-09-04
#### Added
- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them.
ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See:
- Header file `include/secp256k1_ellswift.h` which defines the new API.
- Document `doc/ellswift.md` which explains the mathematical background of the scheme.
- The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based.
- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases).
- New module `ellswift` implements ElligatorSwift encoding for public keys and x-only Diffie-Hellman key exchange for them.
ElligatorSwift permits representing secp256k1 public keys as 64-byte arrays which cannot be distinguished from uniformly random. See:
- Header file `include/secp256k1_ellswift.h` which defines the new API.
- Document `doc/ellswift.md` which explains the mathematical background of the scheme.
- The [paper](https://eprint.iacr.org/2022/759) on which the scheme is based.
- We now test the library with unreleased development snapshots of GCC and Clang. This gives us an early chance to catch miscompilations and constant-time issues introduced by the compiler (such as those that led to the previous two releases).
#### Fixed
- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported.
- Fixed symbol visibility in Windows DLL builds, where three internal library symbols were wrongly exported.
#### Changed
- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`.
- When consuming libsecp256k1 as a static library on Windows, the user must now define the `SECP256K1_STATIC` macro before including `secp256k1.h`.
#### ABI Compatibility
This release is backward compatible with the ABI of 0.3.0, 0.3.1, and 0.3.2. Symbol visibility is now believed to be handled properly on supported platforms and is now considered to be part of the ABI. Please report any improperly exported symbols as a bug.
## [0.3.2] - 2023-05-13
We strongly recommend updating to 0.3.2 if you use or plan to use GCC >=13 to compile libsecp256k1. When in doubt, check the GCC version using `gcc -v`.
#### Security
- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1.
- Module `ecdh`: Fix "constant-timeness" issue with GCC 13.1 (and potentially future versions of GCC) that could leave applications using libsecp256k1's ECDH module vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow during ECDH computations when libsecp256k1 is compiled with GCC 13.1.
#### Fixed
- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far.
- Fixed an old bug that permitted compilers to potentially output bad assembly code on x86_64. In theory, it could lead to a crash or a read of unrelated memory, but this has never been observed on any compilers so far.
#### Changed
- Various improvements and changes to CMake builds. CMake builds remain experimental.
- Made API versioning consistent with GNU Autotools builds.
- Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library.
- Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts.
- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake).
- Various improvements and changes to CMake builds. CMake builds remain experimental.
- Made API versioning consistent with GNU Autotools builds.
- Switched to `BUILD_SHARED_LIBS` variable for controlling whether to build a static or a shared library.
- Added `SECP256K1_INSTALL` variable for the controlling whether to install the build artefacts.
- Renamed asm build option `arm` to `arm32`. Use `--with-asm=arm32` instead of `--with-asm=arm` (GNU Autotools), and `-DSECP256K1_ASM=arm32` instead of `-DSECP256K1_ASM=arm` (CMake).
#### ABI Compatibility
The ABI is compatible with versions 0.3.0 and 0.3.1.
## [0.3.1] - 2023-04-10
We strongly recommend updating to 0.3.1 if you use or plan to use Clang >=14 to compile libsecp256k1, e.g., Xcode >=14 on macOS has Clang >=14. When in doubt, check the Clang version using `clang -v`.
#### Security
- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14.
- Fix "constant-timeness" issue with Clang >=14 that could leave applications using libsecp256k1 vulnerable to a timing side-channel attack. The fix avoids secret-dependent control flow and secret-dependent memory accesses in conditional moves of memory objects when libsecp256k1 is compiled with Clang >=14.
#### Added
- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases.
- Added tests against [Project Wycheproof's](https://github.com/google/wycheproof/) set of ECDSA test vectors (Bitcoin "low-S" variant), a fixed set of test cases designed to trigger various edge cases.
#### Changed
- Increased minimum required CMake version to 3.13. CMake builds remain experimental.
- Increased minimum required CMake version to 3.13. CMake builds remain experimental.
#### ABI Compatibility
The ABI is compatible with version 0.3.0.
## [0.3.0] - 2023-03-08
#### Added
- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported.
- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory.
- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target.
- Added experimental support for CMake builds. Traditional GNU Autotools builds (`./configure` and `make`) remain fully supported.
- Usage examples: Added a recommended method for securely clearing sensitive data, e.g., secret keys, from memory.
- Tests: Added a new test binary `noverify_tests`. This binary runs the tests without some additional checks present in the ordinary `tests` binary and is thereby closer to production binaries. The `noverify_tests` binary is automatically run as part of the `make check` target.
#### Fixed
- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning.
- Fixed declarations of API variables for MSVC (`__declspec(dllimport)`). This fixes MSVC builds of programs which link against a libsecp256k1 DLL dynamically and use API variables (and not only API functions). Unfortunately, the MSVC linker now will emit warning `LNK4217` when trying to link against libsecp256k1 statically. Pass `/ignore:4217` to the linker to suppress this warning.
#### Changed
- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.)
- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization.
- Forbade cloning or destroying `secp256k1_context_static`. Create a new context instead of cloning the static context. (If this change breaks your code, your code is probably wrong.)
- Forbade randomizing (copies of) `secp256k1_context_static`. Randomizing a copy of `secp256k1_context_static` did not have any effect and did not provide defense-in-depth protection against side-channel attacks. Create a new context if you want to benefit from randomization.
#### Removed
- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags).
- Removed the configuration header `src/libsecp256k1-config.h`. We recommend passing flags to `./configure` or `cmake` to set configuration options (see `./configure --help` or `cmake -LH`). If you cannot or do not want to use one of the supported build systems, pass configuration flags such as `-DSECP256K1_ENABLE_MODULE_SCHNORRSIG` manually to the compiler (see the file `configure.ac` for supported flags).
#### ABI Compatibility
Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is *not* compatible with previous versions.
Due to changes in the API regarding `secp256k1_context_static` described above, the ABI is _not_ compatible with previous versions.
## [0.2.0] - 2022-12-12
#### Added
- Added usage examples for common use cases in a new `examples/` directory.
- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`.
- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms.
- Added usage examples for common use cases in a new `examples/` directory.
- Added `secp256k1_selftest`, to be used in conjunction with `secp256k1_context_static`.
- Added support for 128-bit wide multiplication on MSVC for x86_64 and arm64, giving roughly a 20% speedup on those platforms.
#### Changed
- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`.
- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API.
- Enabled modules `schnorrsig`, `extrakeys` and `ecdh` by default in `./configure`.
- The `secp256k1_nonce_function_rfc6979` nonce function, used by default by `secp256k1_ecdsa_sign`, now reduces the message hash modulo the group order to match the specification. This only affects improper use of ECDSA signing API.
#### Deprecated
- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead.
- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`.
- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`.
- Deprecated context flags `SECP256K1_CONTEXT_VERIFY` and `SECP256K1_CONTEXT_SIGN`. Use `SECP256K1_CONTEXT_NONE` instead.
- Renamed `secp256k1_context_no_precomp` to `secp256k1_context_static`.
- Module `schnorrsig`: renamed `secp256k1_schnorrsig_sign` to `secp256k1_schnorrsig_sign32`.
#### ABI Compatibility
Since this is the first release, we do not compare application binary interfaces.
However, there are earlier unreleased versions of libsecp256k1 that are *not* ABI compatible with this version.
However, there are earlier unreleased versions of libsecp256k1 that are _not_ ABI compatible with this version.
## [0.1.0] - 2013-03-05 to 2021-12-25

View File

@@ -1,5 +1,9 @@
{
"cmakeMinimumRequired": {"major": 3, "minor": 21, "patch": 0},
"cmakeMinimumRequired": {
"major": 3,
"minor": 21,
"patch": 0
},
"version": 3,
"configurePresets": [
{

View File

@@ -12,15 +12,15 @@ The libsecp256k1 project welcomes contributions in the form of new functionality
It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable.
Contributors are recommended to provide the following in addition to the new code:
* **Specification:**
A specification can help significantly in reviewing the new code as it provides documentation and context.
It may justify various design decisions, give a motivation and outline security goals.
If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code.
* **Security Arguments:**
In addition to a defining the security goals, it should be argued that the new functionality meets these goals.
Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security.
* **Relevance Arguments:**
The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases.
- **Specification:**
A specification can help significantly in reviewing the new code as it provides documentation and context.
It may justify various design decisions, give a motivation and outline security goals.
If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code.
- **Security Arguments:**
In addition to a defining the security goals, it should be argued that the new functionality meets these goals.
Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security.
- **Relevance Arguments:**
The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases.
These are not the only factors taken into account when considering to add new functionality.
The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design.
@@ -44,36 +44,36 @@ The Contributor Workflow & Peer Review in libsecp256k1 are similar to Bitcoin Co
In addition, libsecp256k1 tries to maintain the following coding conventions:
* No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations.
* The tests should cover all lines and branches of the library (see [Test coverage](#coverage)).
* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
* Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
* As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)).
- No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Moreover, it should be possible to use the library without any heap allocations.
- The tests should cover all lines and branches of the library (see [Test coverage](#coverage)).
- Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
- Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
- Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
- As a rule of thumb, the default values for configuration options should target standard desktop machines and align with Bitcoin Core's defaults, and the tests should mostly exercise the default configuration (see [#1549](https://github.com/bitcoin-core/secp256k1/issues/1549#issuecomment-2200559257)).
#### Style conventions
* Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures.
* New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting.
* The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block:
```C
void secp256k_foo(void) {
unsigned int x; /* declaration */
int y = 2*x; /* declaration */
x = 17; /* statement */
{
int a, b; /* declaration */
a = x + y; /* statement */
secp256k_bar(x, &b); /* statement */
}
}
```
* Use `unsigned int` instead of just `unsigned`.
* Use `void *ptr` instead of `void* ptr`.
* Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h).
* User-facing comment lines in headers should be limited to 80 chars if possible.
* All identifiers in file scope should start with `secp256k1_`.
* Avoid trailing whitespace.
- Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures.
- New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting.
- The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block:
```C
void secp256k_foo(void) {
unsigned int x; /* declaration */
int y = 2*x; /* declaration */
x = 17; /* statement */
{
int a, b; /* declaration */
a = x + y; /* statement */
secp256k_bar(x, &b); /* statement */
}
}
```
- Use `unsigned int` instead of just `unsigned`.
- Use `void *ptr` instead of `void* ptr`.
- Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h).
- User-facing comment lines in headers should be limited to 80 chars if possible.
- All identifiers in file scope should start with `secp256k1_`.
- Avoid trailing whitespace.
### Tests
@@ -101,7 +101,7 @@ To create a HTML report with coloured and annotated source code:
#### Exhaustive tests
There are tests of several functions in which a small group replaces secp256k1.
These tests are *exhaustive* since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)).
These tests are _exhaustive_ since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)).
### Benchmarks

View File

@@ -1,5 +1,4 @@
libsecp256k1
============
# libsecp256k1
![Dependencies: None](https://img.shields.io/badge/dependencies-none-success)
[![irc.libera.chat #secp256k1](https://img.shields.io/badge/irc.libera.chat-%23secp256k1-success)](https://web.libera.chat/#secp256k1)
@@ -9,60 +8,59 @@ High-performance high-assurance C library for digital signatures and other crypt
This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose.
Features:
* secp256k1 ECDSA signing/verification and key generation.
* Additive and multiplicative tweaking of secret/public keys.
* Serialization/parsing of secret keys, public keys, signatures.
* Constant time, constant memory access signing and public key generation.
* Derandomized ECDSA (via RFC6979 or with a caller provided function.)
* Very efficient implementation.
* Suitable for embedded systems.
* No runtime dependencies.
* Optional module for public key recovery.
* Optional module for ECDH key exchange.
* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
* Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki).
* Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki).
Implementation details
----------------------
- secp256k1 ECDSA signing/verification and key generation.
- Additive and multiplicative tweaking of secret/public keys.
- Serialization/parsing of secret keys, public keys, signatures.
- Constant time, constant memory access signing and public key generation.
- Derandomized ECDSA (via RFC6979 or with a caller provided function.)
- Very efficient implementation.
- Suitable for embedded systems.
- No runtime dependencies.
- Optional module for public key recovery.
- Optional module for ECDH key exchange.
- Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
- Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki).
- Optional module for MuSig2 Schnorr multi-signatures according to [BIP-327](https://github.com/bitcoin/bips/blob/master/bip-0327.mediawiki).
* General
* No runtime heap allocation.
* Extensive testing infrastructure.
* Structured to facilitate review and analysis.
* Intended to be portable to any system with a C89 compiler and uint64_t support.
* No use of floating types.
* Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
* Field operations
* Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
* Using 5 52-bit limbs
* Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
* This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community.
* Scalar operations
* Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
* Using 4 64-bit limbs (relying on __int128 support in the compiler).
* Using 8 32-bit limbs.
* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman).
* Group operations
* Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
* Use addition between points in Jacobian and affine coordinates where possible.
* Use a unified addition/doubling formula where necessary to avoid data-dependent branches.
* Point/x comparison without a field inversion by comparison in the Jacobian coordinate space.
* Point multiplication for verification (a*P + b*G).
* Use wNAF notation for point multiplicands.
* Use a much larger window for multiples of G, using precomputed multiples.
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
* Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
* Point multiplication for signing
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
* Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
* Access the table with branch-free conditional moves so memory access is uniform.
* No data-dependent branches
* Optional runtime blinding which attempts to frustrate differential power analysis.
* The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally.
## Implementation details
Building with Autotools
-----------------------
- General
- No runtime heap allocation.
- Extensive testing infrastructure.
- Structured to facilitate review and analysis.
- Intended to be portable to any system with a C89 compiler and uint64_t support.
- No use of floating types.
- Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
- Field operations
- Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
- Using 5 52-bit limbs
- Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
- This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community.
- Scalar operations
- Optimized implementation without data-dependent branches of arithmetic modulo the curve's order.
- Using 4 64-bit limbs (relying on \_\_int128 support in the compiler).
- Using 8 32-bit limbs.
- Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman).
- Group operations
- Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7).
- Use addition between points in Jacobian and affine coordinates where possible.
- Use a unified addition/doubling formula where necessary to avoid data-dependent branches.
- Point/x comparison without a field inversion by comparison in the Jacobian coordinate space.
- Point multiplication for verification (a*P + b*G).
- Use wNAF notation for point multiplicands.
- Use a much larger window for multiples of G, using precomputed multiples.
- Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
- Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
- Point multiplication for signing
- Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
- Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
- Access the table with branch-free conditional moves so memory access is uniform.
- No data-dependent branches
- Optional runtime blinding which attempts to frustrate differential power analysis.
- The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally.
## Building with Autotools
$ ./autogen.sh
$ ./configure
@@ -72,8 +70,7 @@ Building with Autotools
To compile optional modules (such as Schnorr signatures), you need to run `./configure` with additional flags (such as `--enable-module-schnorrsig`). Run `./configure --help` to see the full list of available flags.
Building with CMake (experimental)
----------------------------------
## Building with CMake (experimental)
To maintain a pristine source tree, CMake encourages to perform an out-of-source build by using a separate dedicated build tree.
@@ -109,18 +106,19 @@ In "Developer Command Prompt for VS 2022":
>cmake -G "Visual Studio 17 2022" -A x64 -S . -B build
>cmake --build build --config RelWithDebInfo
Usage examples
-----------
## Usage examples
Usage examples can be found in the [examples](examples) directory. To compile them you need to configure with `--enable-examples`.
* [ECDSA example](examples/ecdsa.c)
* [Schnorr signatures example](examples/schnorr.c)
* [Deriving a shared secret (ECDH) example](examples/ecdh.c)
* [ElligatorSwift key exchange example](examples/ellswift.c)
- [ECDSA example](examples/ecdsa.c)
- [Schnorr signatures example](examples/schnorr.c)
- [Deriving a shared secret (ECDH) example](examples/ecdh.c)
- [ElligatorSwift key exchange example](examples/ellswift.c)
To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`.
Benchmark
------------
## Benchmark
If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build.
To print the benchmark result to the command line:
@@ -131,12 +129,10 @@ To create a CSV file for the benchmark result :
$ ./bench_name | sed '2d;s/ \{1,\}//g' > bench_name.csv
Reporting a vulnerability
------------
## Reporting a vulnerability
See [SECURITY.md](SECURITY.md)
Contributing to libsecp256k1
------------
## Contributing to libsecp256k1
See [CONTRIBUTING.md](CONTRIBUTING.md)

View File

@@ -6,10 +6,10 @@ To report security issues send an email to secp256k1-security@bitcoincore.org (n
The following keys may be used to communicate sensitive information to developers:
| Name | Fingerprint |
|------|-------------|
| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 |
| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 |
| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 |
| Name | Fingerprint |
| ------------- | ------------------------------------------------- |
| Pieter Wuille | 133E AC17 9436 F14A 5CF1 B794 860F EB80 4E66 9320 |
| Jonas Nick | 36C7 1A37 C9D9 88BD E825 08D9 B1A7 0E4F 8DCD 0366 |
| Tim Ruffing | 09E0 3F87 1092 E40E 106E 902B 33BC 86AB 80FF 5516 |
You can import a key by running the following command with that individuals fingerprint: `gpg --keyserver hkps://keys.openpgp.org --recv-keys "<fingerprint>"` Ensure that you put quotes around fingerprints containing spaces.

View File

@@ -5,17 +5,17 @@ construction in the
["SwiftEC: Shalluevan de Woestijne Indifferentiable Function To Elliptic Curves"](https://eprint.iacr.org/2022/759)
paper by Jorge Chávez-Saab, Francisco Rodríguez-Henríquez, and Mehdi Tibouchi.
* [1. Introduction](#1-introduction)
* [2. The decoding function](#2-the-decoding-function)
+ [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1)
* [3. The encoding function](#3-the-encoding-function)
+ [3.1 Switching to *v, w* coordinates](#31-switching-to-v-w-coordinates)
+ [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses)
+ [3.3 Finding the inverse](#33-finding-the-inverse)
+ [3.4 Dealing with special cases](#34-dealing-with-special-cases)
+ [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1)
* [4. Encoding and decoding full *(x, y)* coordinates](#4-encoding-and-decoding-full-x-y-coordinates)
+ [4.1 Full *(x, y)* coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1)
- [1. Introduction](#1-introduction)
- [2. The decoding function](#2-the-decoding-function)
- [2.1 Decoding for `secp256k1`](#21-decoding-for-secp256k1)
- [3. The encoding function](#3-the-encoding-function)
- [3.1 Switching to _v, w_ coordinates](#31-switching-to-v-w-coordinates)
- [3.2 Avoiding computing all inverses](#32-avoiding-computing-all-inverses)
- [3.3 Finding the inverse](#33-finding-the-inverse)
- [3.4 Dealing with special cases](#34-dealing-with-special-cases)
- [3.5 Encoding for `secp256k1`](#35-encoding-for-secp256k1)
- [4. Encoding and decoding full _(x, y)_ coordinates](#4-encoding-and-decoding-full-x-y-coordinates)
- [4.1 Full _(x, y)_ coordinates for `secp256k1`](#41-full-x-y-coordinates-for-secp256k1)
## 1. Introduction
@@ -34,13 +34,14 @@ are taken modulo $p$), and then evaluating $F_u(t)$, which for every $u$ and $t$
x-coordinate on the curve. The functions $F_u$ will be defined in [Section 2](#2-the-decoding-function).
**Encoding** a given $x$ coordinate is conceptually done as follows:
* Loop:
* Pick a uniformly random field element $u.$
* Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to *8* elements.
* With probability $1 - \dfrac{\\#L}{8}$, restart the loop.
* Select a uniformly random $t \in L$ and return $(u, t).$
This is the *ElligatorSwift* algorithm, here given for just x-coordinates. An extension to full
- Loop:
- Pick a uniformly random field element $u.$
- Compute the set $L = F_u^{-1}(x)$ of $t$ values for which $F_u(t) = x$, which may have up to _8_ elements.
- With probability $1 - \dfrac{\\#L}{8}$, restart the loop.
- Select a uniformly random $t \in L$ and return $(u, t).$
This is the _ElligatorSwift_ algorithm, here given for just x-coordinates. An extension to full
$(x, y)$ points will be given in [Section 4](#4-encoding-and-decoding-full-x-y-coordinates).
The algorithm finds a uniformly random $(u, t)$ among (almost all) those
for which $F_u(t) = x.$ Section 3.2 in the paper proves that the number of such encodings for
@@ -50,37 +51,40 @@ almost all x-coordinates on the curve (all but at most 39) is close to two times
## 2. The decoding function
First some definitions:
* $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$
* For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement.
* Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$
- $\mathbb{F}$ is the finite field of size $q$, of characteristic 5 or more, and $q \equiv 1 \mod 3.$
- For `secp256k1`, $q = 2^{256} - 2^{32} - 977$, which satisfies that requirement.
- Let $E$ be the elliptic curve of points $(x, y) \in \mathbb{F}^2$ for which $y^2 = x^3 + ax + b$, with $a$ and $b$
public constants, for which $\Delta_E = -16(4a^3 + 27b^2)$ is a square, and at least one of $(-b \pm \sqrt{-3 \Delta_E} / 36)/2$ is a square.
This implies that the order of $E$ is either odd, or a multiple of *4*.
This implies that the order of $E$ is either odd, or a multiple of _4_.
If $a=0$, this condition is always fulfilled.
* For `secp256k1`, $a=0$ and $b=7.$
* Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$
* Let the function $h(x) = 3x^3 + 4a.$
* Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$
* Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$
* $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below.
* $\psi_u$ is a function from $S_u$ to $V$ that will be defined below.
- For `secp256k1`, $a=0$ and $b=7.$
- Let the function $g(x) = x^3 + ax + b$, so the $E$ curve equation is also $y^2 = g(x).$
- Let the function $h(x) = 3x^3 + 4a.$
- Define $V$ as the set of solutions $(x_1, x_2, x_3, z)$ to $z^2 = g(x_1)g(x_2)g(x_3).$
- Define $S_u$ as the set of solutions $(X, Y)$ to $X^2 + h(u)Y^2 = -g(u)$ and $Y \neq 0.$
- $P_u$ is a function from $\mathbb{F}$ to $S_u$ that will be defined below.
- $\psi_u$ is a function from $S_u$ to $V$ that will be defined below.
**Note**: In the paper:
* $F_u$ corresponds to $F_{0,u}$ there.
* $P_u(t)$ is called $P$ there.
* All $S_u$ sets together correspond to $S$ there.
* All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there.
- $F_u$ corresponds to $F_{0,u}$ there.
- $P_u(t)$ is called $P$ there.
- All $S_u$ sets together correspond to $S$ there.
- All $\psi_u$ functions together (operating on elements of $S$) correspond to $\psi$ there.
Note that for $V$, the left hand side of the equation $z^2$ is square, and thus the right
hand must also be square. As multiplying non-squares results in a square in $\mathbb{F}$,
out of the three right-hand side factors an even number must be non-squares.
This implies that exactly *1* or exactly *3* out of
This implies that exactly _1_ or exactly _3_ out of
$\\{g(x_1), g(x_2), g(x_3)\\}$ must be square, and thus that for any $(x_1,x_2,x_3,z) \in V$,
at least one of $\\{x_1, x_2, x_3\\}$ must be a valid x-coordinate on $E.$ There is one exception
to this, namely when $z=0$, but even then one of the three values is a valid x-coordinate.
**Define** the decoding function $F_u(t)$ as:
* Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$
* Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square).
- Let $(x_1, x_2, x_3, z) = \psi_u(P_u(t)).$
- Return the first element $x$ of $(x_3, x_2, x_1)$ which is a valid x-coordinate on $E$ (i.e., $g(x)$ is square).
$P_u(t) = (X(u, t), Y(u, t))$, where:
@@ -98,12 +102,13 @@ Y(u, t) & = & \left\\{\begin{array}{ll}
$$
$P_u(t)$ is defined:
* For $a=0$, unless:
* $u = 0$ or $t = 0$ (division by zero)
* $g(u) = -t^2$ (would give $Y=0$).
* For $a \neq 0$, unless:
* $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero)
* $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$).
- For $a=0$, unless:
- $u = 0$ or $t = 0$ (division by zero)
- $g(u) = -t^2$ (would give $Y=0$).
- For $a \neq 0$, unless:
- $X_0(u) = 0$ or $h(u)t^2 = -1$ (division by zero)
- $Y_0(u) (1 - h(u)t^2) = 2X_0(u)t$ (would give $Y=0$).
The functions $X_0(u)$ and $Y_0(u)$ are defined in Appendix A of the paper, and depend on various properties of $E.$
@@ -123,20 +128,22 @@ $$
Put together and specialized for $a=0$ curves, decoding $(u, t)$ to an x-coordinate is:
**Define** $F_u(t)$ as:
* Let $X = \dfrac{u^3 + b - t^2}{2t}.$
* Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$
* Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square.
- Let $X = \dfrac{u^3 + b - t^2}{2t}.$
- Let $Y = \dfrac{X + t}{u\sqrt{-3}}.$
- Return the first $x$ in $(u + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u}{2}, \dfrac{X}{2Y} - \dfrac{u}{2})$ for which $g(x)$ is square.
To make sure that every input decodes to a valid x-coordinate, we remap the inputs in case
$P_u$ is not defined (when $u=0$, $t=0$, or $g(u) = -t^2$):
**Define** $F_u(t)$ as:
* Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$).
* Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$).
* Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$).
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
* Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square.
- Let $u'=u$ if $u \neq 0$; $1$ otherwise (guaranteeing $u' \neq 0$).
- Let $t'=t$ if $t \neq 0$; $1$ otherwise (guaranteeing $t' \neq 0$).
- Let $t''=t'$ if $g(u') \neq -t'^2$; $2t'$ otherwise (guaranteeing $t'' \neq 0$ and $g(u') \neq -t''^2$).
- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
- Return the first $x$ in $(u' + 4Y^2, \dfrac{-X}{2Y} - \dfrac{u'}{2}, \dfrac{X}{2Y} - \dfrac{u'}{2})$ for which $x^3 + b$ is square.
The choices here are not strictly necessary. Just returning a fixed constant in any of the undefined cases would suffice,
but the approach here is simple enough and gives fairly uniform output even in these cases.
@@ -150,10 +157,11 @@ in `secp256k1_ellswift_xswiftec_var` (which outputs the actual x-coordinate).
## 3. The encoding function
To implement $F_u^{-1}(x)$, the function to find the set of inverses $t$ for which $F_u(t) = x$, we have to reverse the process:
* Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$
* Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$
* For each of the found $t$ values, verify that $F_u(t) = x.$
* Return the remaining $t$ values.
- Find all the $(X, Y) \in S_u$ that could have given rise to $x$, through the $x_1$, $x_2$, or $x_3$ formulas in $\psi_u.$
- Map those $(X, Y)$ solutions to $t$ values using $P_u^{-1}(X, Y).$
- For each of the found $t$ values, verify that $F_u(t) = x.$
- Return the remaining $t$ values.
The function $P_u^{-1}$, which finds $t$ given $(X, Y) \in S_u$, is significantly simpler than $P_u:$
@@ -185,13 +193,14 @@ precedence over both. Because of this, the $g(-u-x)$ being square test for $x_1$
values round-trip back to the input $x$ correctly. This is the reason for choosing the $(x_3, x_2, x_1)$ precedence order in the decoder;
any order which does not place $x_3$ first requires more complicated round-trip checks in the encoder.
### 3.1 Switching to *v, w* coordinates
### 3.1 Switching to _v, w_ coordinates
Before working out the formulas for all this, we switch to different variables for $S_u.$ Let $v = (X/Y - u)/2$, and
$w = 2Y.$ Or in the other direction, $X = w(u/2 + v)$ and $Y = w/2:$
* $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$
* For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$
* $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where
- $S_u'$ becomes the set of $(v, w)$ for which $w^2 (u^2 + uv + v^2 + a) = -g(u)$ and $w \neq 0.$
- For $a=0$ curves, $P_u^{-1}$ can be stated for $(v,w)$ as $P_u^{'-1}(v, w) = w\left(\frac{\sqrt{-3}-1}{2}u - v\right).$
- $\psi_u$ can be stated for $(v, w)$ as $\psi_u'(v, w) = (x_1, x_2, x_3, z)$, where
$$
\begin{array}{lcl}
@@ -204,34 +213,37 @@ $$
We can now write the expressions for finding $(v, w)$ given $x$ explicitly, by solving each of the $\\{x_1, x_2, x_3\\}$
expressions for $v$ or $w$, and using the $S_u'$ equation to find the other variable:
* Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
* Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
* Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions).
- Assuming $x = x_1$, we find $v = x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
- Assuming $x = x_2$, we find $v = -u-x$ and $w = \pm\sqrt{-g(u)/(u^2 + uv + v^2 + a)}$ (two solutions).
- Assuming $x = x_3$, we find $w = \pm\sqrt{x-u}$ and $v = -u/2 \pm \sqrt{-w^2(4g(u) + w^2h(u))}/(2w^2)$ (four solutions).
### 3.2 Avoiding computing all inverses
The *ElligatorSwift* algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the
The _ElligatorSwift_ algorithm as stated in Section 1 requires the computation of $L = F_u^{-1}(x)$ (the
set of all $t$ such that $(u, t)$ decode to $x$) in full. This is unnecessary.
Observe that the procedure of restarting with probability $(1 - \frac{\\#L}{8})$ and otherwise returning a
uniformly random element from $L$ is actually equivalent to always padding $L$ with $\bot$ values up to length 8,
picking a uniformly random element from that, restarting whenever $\bot$ is picked:
**Define** *ElligatorSwift(x)* as:
* Loop:
* Pick a uniformly random field element $u.$
* Compute the set $L = F_u^{-1}(x).$
* Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$
* Select a uniformly random $t \in T.$
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
**Define** _ElligatorSwift(x)_ as:
- Loop:
- Pick a uniformly random field element $u.$
- Compute the set $L = F_u^{-1}(x).$
- Let $T$ be the 8-element vector consisting of the elements of $L$, plus $8 - \\#L$ times $\\{\bot\\}.$
- Select a uniformly random $t \in T.$
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
Now notice that the order of elements in $T$ does not matter, as all we do is pick a uniformly
random element in it, so we do not need to have all $\bot$ values at the end.
As we have 8 distinct formulas for finding $(v, w)$ (taking the variants due to $\pm$ into account),
we can associate every index in $T$ with exactly one of those formulas, making sure that:
* Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$
* For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check).
* In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those.
- Formulas that yield no solutions (due to division by zero or non-existing square roots) or invalid solutions are made to return $\bot.$
- For the $x_1$ and $x_2$ cases, if $g(-u-x)$ is a square, $\bot$ is returned instead (the round-trip check).
- In case multiple formulas would return the same non- $\bot$ result, all but one of those must be turned into $\bot$ to avoid biasing those.
The last condition above only occurs with negligible probability for cryptographically-sized curves, but is interesting
to take into account as it allows exhaustive testing in small groups. See [Section 3.4](#34-dealing-with-special-cases)
@@ -240,12 +252,13 @@ for an analysis of all the negligible cases.
If we define $T = (G_{0,u}(x), G_{1,u}(x), \ldots, G_{7,u}(x))$, with each $G_{i,u}$ matching one of the formulas,
the loop can be simplified to only compute one of the inverses instead of all of them:
**Define** *ElligatorSwift(x)* as:
* Loop:
* Pick a uniformly random field element $u.$
* Pick a uniformly random integer $c$ in $[0,8).$
* Let $t = G_{c,u}(x).$
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
**Define** _ElligatorSwift(x)_ as:
- Loop:
- Pick a uniformly random field element $u.$
- Pick a uniformly random integer $c$ in $[0,8).$
- Let $t = G_{c,u}(x).$
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
This is implemented in `secp256k1_ellswift_xelligatorswift_var`.
@@ -256,18 +269,19 @@ Those are then repeated as $c=4$ through $c=7$ for the other sign of $w$ (noting
Ignoring the negligible cases, we get:
**Define** $G_{c,u}(x)$ as:
* If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas):
* If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence).
* If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula)
* Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows).
* Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas):
* Let $s = x-u.$
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
* Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise.
* Let $w = \sqrt{s}.$
* Depending on $c:$
* If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$
* If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$
- If $c \in \\{0, 1, 4, 5\\}$ (for $x_1$ and $x_2$ formulas):
- If $g(-u-x)$ is square, return $\bot$ (as $x_3$ would be valid and take precedence).
- If $c \in \\{0, 4\\}$ (the $x_1$ formula) let $v = x$, otherwise let $v = -u-x$ (the $x_2$ formula)
- Let $s = -g(u)/(u^2 + uv + v^2 + a)$ (using $s = w^2$ in what follows).
- Otherwise, when $c \in \\{2, 3, 6, 7\\}$ (for $x_3$ formulas):
- Let $s = x-u.$
- Let $r = \sqrt{-s(4g(u) + sh(u))}.$
- Let $v = (r/s - u)/2$ if $c \in \\{3, 7\\}$; $(-r/s - u)/2$ otherwise.
- Let $w = \sqrt{s}.$
- Depending on $c:$
- If $c \in \\{0, 1, 2, 3\\}:$ return $P_u^{'-1}(v, w).$
- If $c \in \\{4, 5, 6, 7\\}:$ return $P_u^{'-1}(v, -w).$
Whenever a square root of a non-square is taken, $\bot$ is returned; for both square roots this happens with roughly
50% on random inputs. Similarly, when a division by 0 would occur, $\bot$ is returned as well; this will only happen
@@ -284,20 +298,21 @@ transformation. Furthermore, that transformation has no effect on $s$ in the fir
as $u^2 + ux + x^2 + a = u^2 + u(-u-x) + (-u-x)^2 + a.$ Thus we can extract it out and move it down:
**Define** $G_{c,u}(x)$ as:
* If $c \in \\{0, 1, 4, 5\\}:$
* If $g(-u-x)$ is square, return $\bot.$
* Let $s = -g(u)/(u^2 + ux + x^2 + a).$
* Let $v = x.$
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
* Let $s = x-u.$
* Let $r = \sqrt{-s(4g(u) + sh(u))}.$
* Let $v = (r/s - u)/2.$
* Let $w = \sqrt{s}.$
* Depending on $c:$
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$
* If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$
* If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$
- If $c \in \\{0, 1, 4, 5\\}:$
- If $g(-u-x)$ is square, return $\bot.$
- Let $s = -g(u)/(u^2 + ux + x^2 + a).$
- Let $v = x.$
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
- Let $s = x-u.$
- Let $r = \sqrt{-s(4g(u) + sh(u))}.$
- Let $v = (r/s - u)/2.$
- Let $w = \sqrt{s}.$
- Depending on $c:$
- If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w).$
- If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w).$
- If $c \in \\{4, 6\\}:$ return $P_u^{'-1}(v, -w).$
- If $c \in \\{5, 7\\}:$ return $P_u^{'-1}(-u-v, -w).$
This shows there will always be exactly 0, 4, or 8 $t$ values for a given $(u, x)$ input.
There can be 0, 1, or 2 $(v, w)$ pairs before invoking $P_u^{'-1}$, and each results in 4 distinct $t$ values.
@@ -310,58 +325,60 @@ we analyse them here. They generally fall into two categories: cases in which th
do not decode back to $x$ (or at least cannot guarantee that they do), and cases in which the encoder might produce the same
$t$ value for multiple $c$ inputs (thereby biasing that encoding):
* In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$):
* When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves.
- In the branch for $x_1$ and $x_2$ (where $c \in \\{0, 1, 4, 5\\}$):
- When $g(u) = 0$, we would have $s=w=Y=0$, which is not on $S_u.$ This is only possible on even-ordered curves.
Excluding this also removes the one condition under which the simplified check for $x_3$ on the curve
fails (namely when $g(x_1)=g(x_2)=0$ but $g(x_3)$ is not square).
This does exclude some valid encodings: when both $g(u)=0$ and $u^2+ux+x^2+a=0$ (also implying $g(x)=0$),
the $S_u'$ equation degenerates to $0 = 0$, and many valid $t$ values may exist. Yet, these cannot be targeted uniformly by the
encoder anyway as there will generally be more than 8.
* When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence
- When $g(x) = 0$, the same $t$ would be produced as in the $x_3$ branch (where $c \in \\{2, 3, 6, 7\\}$) which we give precedence
as it can deal with $g(u)=0$.
This is again only possible on even-ordered curves.
* In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$):
* When $s=0$, a division by zero would occur.
* When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases.
- In the branch for $x_3$ (where $c \in \\{2, 3, 6, 7\\}$):
- When $s=0$, a division by zero would occur.
- When $v = -u-v$ and $c \in \\{3, 7\\}$, the same $t$ would be returned as in the $c \in \\{2, 6\\}$ cases.
It is equivalent to checking whether $r=0$.
This cannot occur in the $x_1$ or $x_2$ branches, as it would trigger the $g(-u-x)$ is square condition.
A similar concern for $w = -w$ does not exist, as $w=0$ is already impossible in both branches: in the first
it requires $g(u)=0$ which is already outlawed on even-ordered curves and impossible on others; in the second it would trigger division by zero.
* Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder:
* For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve.
* For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$.
- Curve-specific special cases also exist that need to be rejected, because they result in $(u,t)$ which is invalid to the decoder, or because of division by zero in the encoder:
- For $a=0$ curves, when $u=0$ or when $t=0$. The latter can only be reached by the encoder when $g(u)=0$, which requires an even-ordered curve.
- For $a \neq 0$ curves, when $X_0(u)=0$, when $h(u)t^2 = -1$, or when $w(u + 2v) = 2X_0(u)$ while also either $w \neq 2Y_0(u)$ or $h(u)=0$.
**Define** a version of $G_{c,u}(x)$ which deals with all these cases:
* If $a=0$ and $u=0$, return $\bot.$
* If $a \neq 0$ and $X_0(u)=0$, return $\bot.$
* If $c \in \\{0, 1, 4, 5\\}:$
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
* If $g(-u-x)$ is square, return $\bot.$
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
* Let $v = x.$
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
* Let $s = x-u.$
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
* If $s = 0$, return $\bot.$
* Let $v = (r/s - u)/2.$
* Let $w = \sqrt{s}$; return $\bot$ if not square.
* If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$
* Depending on $c:$
* If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$
* If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$
* If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$
* If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$
* If $a=0$ and $t=0$, return $\bot$ (even curves only).
* If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$
* Return $t.$
- If $a=0$ and $u=0$, return $\bot.$
- If $a \neq 0$ and $X_0(u)=0$, return $\bot.$
- If $c \in \\{0, 1, 4, 5\\}:$
- If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
- If $g(-u-x)$ is square, return $\bot.$
- Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
- Let $v = x.$
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
- Let $s = x-u.$
- Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
- If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
- If $s = 0$, return $\bot.$
- Let $v = (r/s - u)/2.$
- Let $w = \sqrt{s}$; return $\bot$ if not square.
- If $a \neq 0$ and $w(u+2v) = 2X_0(u)$ and either $w \neq 2Y_0(u)$ or $h(u) = 0$, return $\bot.$
- Depending on $c:$
- If $c \in \\{0, 2\\}$, let $t = P_u^{'-1}(v, w).$
- If $c \in \\{1, 3\\}$, let $t = P_u^{'-1}(-u-v, w).$
- If $c \in \\{4, 6\\}$, let $t = P_u^{'-1}(v, -w).$
- If $c \in \\{5, 7\\}$, let $t = P_u^{'-1}(-u-v, -w).$
- If $a=0$ and $t=0$, return $\bot$ (even curves only).
- If $a \neq 0$ and $h(u)t^2 = -1$, return $\bot.$
- Return $t.$
Given any $u$, using this algorithm over all $x$ and $c$ values, every $t$ value will be reached exactly once,
for an $x$ for which $F_u(t) = x$ holds, except for these cases that will not be reached:
* All cases where $P_u(t)$ is not defined:
* For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$
* For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$
* When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch.
- All cases where $P_u(t)$ is not defined:
- For $a=0$ curves, when $u=0$, $t=0$, or $g(u) = -t^2.$
- For $a \neq 0$ curves, when $h(u)t^2 = -1$, $X_0(u) = 0$, or $Y_0(u) (1 - h(u) t^2) = 2X_0(u)t.$
- When $g(u)=0$, the potentially many $t$ values that decode to an $x$ satisfying $g(x)=0$ using the $x_2$ formula. These were excluded by the $g(u)=0$ condition in the $c \in \\{0, 1, 4, 5\\}$ branch.
These cases form a negligible subset of all $(u, t)$ for cryptographically sized curves.
@@ -370,40 +387,42 @@ These cases form a negligible subset of all $(u, t)$ for cryptographically sized
Specialized for odd-ordered $a=0$ curves:
**Define** $G_{c,u}(x)$ as:
* If $u=0$, return $\bot.$
* If $c \in \\{0, 1, 4, 5\\}:$
* If $(-u-x)^3 + b$ is square, return $\bot$
* Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0).
* Let $v = x.$
* Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
* Let $s = x-u.$
* Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square.
* If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
* If $s = 0$, return $\bot.$
* Let $v = (r/s - u)/2.$
* Let $w = \sqrt{s}$; return $\bot$ if not square.
* Depending on $c:$
* If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$
* If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$
* If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$
* If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$
- If $u=0$, return $\bot.$
- If $c \in \\{0, 1, 4, 5\\}:$
- If $(-u-x)^3 + b$ is square, return $\bot$
- Let $s = -(u^3 + b)/(u^2 + ux + x^2)$ (cannot cause division by 0).
- Let $v = x.$
- Otherwise, when $c \in \\{2, 3, 6, 7\\}:$
- Let $s = x-u.$
- Let $r = \sqrt{-s(4(u^3 + b) + 3su^2)}$; return $\bot$ if not square.
- If $c \in \\{3, 7\\}$ and $r=0$, return $\bot.$
- If $s = 0$, return $\bot.$
- Let $v = (r/s - u)/2.$
- Let $w = \sqrt{s}$; return $\bot$ if not square.
- Depending on $c:$
- If $c \in \\{0, 2\\}:$ return $w(\frac{\sqrt{-3}-1}{2}u - v).$
- If $c \in \\{1, 3\\}:$ return $w(\frac{\sqrt{-3}+1}{2}u + v).$
- If $c \in \\{4, 6\\}:$ return $w(\frac{-\sqrt{-3}+1}{2}u + v).$
- If $c \in \\{5, 7\\}:$ return $w(\frac{-\sqrt{-3}-1}{2}u - v).$
This is implemented in `secp256k1_ellswift_xswiftec_inv_var`.
And the x-only ElligatorSwift encoding algorithm is still:
**Define** *ElligatorSwift(x)* as:
* Loop:
* Pick a uniformly random field element $u.$
* Pick a uniformly random integer $c$ in $[0,8).$
* Let $t = G_{c,u}(x).$
* If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
**Define** _ElligatorSwift(x)_ as:
- Loop:
- Pick a uniformly random field element $u.$
- Pick a uniformly random integer $c$ in $[0,8).$
- Let $t = G_{c,u}(x).$
- If $t \neq \bot$, return $(u, t)$; restart loop otherwise.
Note that this logic does not take the remapped $u=0$, $t=0$, and $g(u) = -t^2$ cases into account; it just avoids them.
While it is not impossible to make the encoder target them, this would increase the maximum number of $t$ values for a given $(u, x)$
combination beyond 8, and thereby slow down the ElligatorSwift loop proportionally, for a negligible gain in uniformity.
## 4. Encoding and decoding full *(x, y)* coordinates
## 4. Encoding and decoding full _(x, y)_ coordinates
So far we have only addressed encoding and decoding x-coordinates, but in some cases an encoding
for full points with $(x, y)$ coordinates is desirable. It is possible to encode this information
@@ -422,30 +441,32 @@ four distinct $P_u^{'-1}$ calls in the definition of $G_{u,c}.$
To encode the sign of $y$ in the sign of $Y:$
**Define** *Decode(u, t)* for full $(x, y)$ as:
* Let $(X, Y) = P_u(t).$
* Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square.
* Let $y = \sqrt{g(x)}.$
* If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$
**Define** _Decode(u, t)_ for full $(x, y)$ as:
- Let $(X, Y) = P_u(t).$
- Let $x$ be the first value in $(u + 4Y^2, \frac{-X}{2Y} - \frac{u}{2}, \frac{X}{2Y} - \frac{u}{2})$ for which $g(x)$ is square.
- Let $y = \sqrt{g(x)}.$
- If $sign(y) = sign(Y)$, return $(x, y)$; otherwise return $(x, -y).$
And encoding would be done using a $G_{c,u}(x, y)$ function defined as:
**Define** $G_{c,u}(x, y)$ as:
* If $c \in \\{0, 1\\}:$
* If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
* If $g(-u-x)$ is square, return $\bot.$
* Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
* Let $v = x.$
* Otherwise, when $c \in \\{2, 3\\}:$
* Let $s = x-u.$
* Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
* If $c = 3$ and $r = 0$, return $\bot.$
* Let $v = (r/s - u)/2.$
* Let $w = \sqrt{s}$; return $\bot$ if not square.
* Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise.
* Depending on $c:$
* If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$
* If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$
- If $c \in \\{0, 1\\}:$
- If $g(u) = 0$ or $g(x) = 0$, return $\bot$ (even curves only).
- If $g(-u-x)$ is square, return $\bot.$
- Let $s = -g(u)/(u^2 + ux + x^2 + a)$ (cannot cause division by zero).
- Let $v = x.$
- Otherwise, when $c \in \\{2, 3\\}:$
- Let $s = x-u.$
- Let $r = \sqrt{-s(4g(u) + sh(u))}$; return $\bot$ if not square.
- If $c = 3$ and $r = 0$, return $\bot.$
- Let $v = (r/s - u)/2.$
- Let $w = \sqrt{s}$; return $\bot$ if not square.
- Let $w' = w$ if $sign(w/2) = sign(y)$; $-w$ otherwise.
- Depending on $c:$
- If $c \in \\{0, 2\\}:$ return $P_u^{'-1}(v, w').$
- If $c \in \\{1, 3\\}:$ return $P_u^{'-1}(-u-v, w').$
Note that $c$ now only ranges $[0,4)$, as the sign of $w'$ is decided based on that of $y$, rather than on $c.$
This change makes some valid encodings unreachable: when $y = 0$ and $sign(Y) \neq sign(0)$.
@@ -454,22 +475,23 @@ In the above logic, $sign$ can be implemented in several ways, such as parity of
of the input field element (for prime-sized fields) or the quadratic residuosity (for fields where
$-1$ is not square). The choice does not matter, as long as it only takes on two possible values, and for $x \neq 0$ it holds that $sign(x) \neq sign(-x)$.
### 4.1 Full *(x, y)* coordinates for `secp256k1`
### 4.1 Full _(x, y)_ coordinates for `secp256k1`
For $a=0$ curves, there is another option. Note that for those,
the $P_u(t)$ function translates negations of $t$ to negations of (both) $X$ and $Y.$ Thus, we can use $sign(t)$ to
encode the y-coordinate directly. Combined with the earlier remapping to guarantee all inputs land on the curve, we get
as decoder:
**Define** *Decode(u, t)* as:
* Let $u'=u$ if $u \neq 0$; $1$ otherwise.
* Let $t'=t$ if $t \neq 0$; $1$ otherwise.
* Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise.
* Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
* Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
* Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square.
* Let $y = \sqrt{g(x)}.$
* Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise.
**Define** _Decode(u, t)_ as:
- Let $u'=u$ if $u \neq 0$; $1$ otherwise.
- Let $t'=t$ if $t \neq 0$; $1$ otherwise.
- Let $t''=t'$ if $u'^3 + b + t'^2 \neq 0$; $2t'$ otherwise.
- Let $X = \dfrac{u'^3 + b - t''^2}{2t''}.$
- Let $Y = \dfrac{X + t''}{u'\sqrt{-3}}.$
- Let $x$ be the first element of $(u' + 4Y^2, \frac{-X}{2Y} - \frac{u'}{2}, \frac{X}{2Y} - \frac{u'}{2})$ for which $g(x)$ is square.
- Let $y = \sqrt{g(x)}.$
- Return $(x, y)$ if $sign(y) = sign(t)$; $(x, -y)$ otherwise.
This is implemented in `secp256k1_ellswift_swiftec_var`. The used $sign(x)$ function is the parity of $x$ when represented as in integer in $[0,q).$

View File

@@ -1,5 +1,4 @@
Notes on the musig module API
===========================
# Notes on the musig module API
The following sections contain additional notes on the API of the musig module (`include/secp256k1_musig.h`).
A usage example can be found in `examples/musig.c`.

View File

@@ -2,7 +2,7 @@
This document outlines the process for releasing versions of the form `$MAJOR.$MINOR.$PATCH`.
We distinguish between two types of releases: *regular* and *maintenance* releases.
We distinguish between two types of releases: _regular_ and _maintenance_ releases.
Regular releases are releases of a new major or minor version as well as patches of the most recent release.
Maintenance releases, on the other hand, are required for patches of older releases.
@@ -15,6 +15,7 @@ This process also assumes that there will be no minor releases for old major rel
We aim to cut a regular release every 3-4 months, approximately twice as frequent as major Bitcoin Core releases. Every second release should be published one month before the feature freeze of the next major Bitcoin Core release, allowing sufficient time to update the library in Core.
## Sanity checks
Perform these checks when reviewing the release PR (see below):
1. Ensure `make distcheck` doesn't fail.
@@ -42,15 +43,15 @@ Perform these checks when reviewing the release PR (see below):
## Regular release
1. Open a PR to the master branch with a commit (using message `"release: prepare for $MAJOR.$MINOR.$PATCH"`, for example) that
* finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
* adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
* removing the `[Unreleased]` section header,
* ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and
* including an entry for `### ABI Compatibility` if it doesn't exist,
* sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and,
* if this is not a patch release,
* updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and
* updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`.
- finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
- adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
- removing the `[Unreleased]` section header,
- ensuring that the release notes are not missing entries (check the `needs-changelog` label on github), and
- including an entry for `### ABI Compatibility` if it doesn't exist,
- sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and,
- if this is not a patch release,
- updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac`, and
- updates `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_*` in `CMakeLists.txt`.
2. Perform the [sanity checks](#sanity-checks) on the PR branch.
3. After the PR is merged, tag the commit, and push the tag:
```
@@ -59,11 +60,12 @@ Perform these checks when reviewing the release PR (see below):
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
```
4. Open a PR to the master branch with a commit (using message `"release cleanup: bump version after $MAJOR.$MINOR.$PATCH"`, for example) that
* sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`,
* increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and
* adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md).
- sets `_PKG_VERSION_IS_RELEASE` to `false` and increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`,
- increments the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`, and
- adds an `[Unreleased]` section header to the [CHANGELOG.md](../CHANGELOG.md).
If other maintainers are not present to approve the PR, it can be merged without ACKs.
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
6. Send an announcement email to the bitcoin-dev mailing list.
@@ -77,9 +79,9 @@ Note that bug fixes need to be backported only to releases for which no compatib
git push git@github.com:bitcoin-core/secp256k1.git $MAJOR.$MINOR
```
2. Open a pull request to the `$MAJOR.$MINOR` branch that
* includes the bug fixes,
* finalizes the release notes similar to a regular release,
* increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`
- includes the bug fixes,
- finalizes the release notes similar to a regular release,
- increments `_PKG_VERSION_PATCH` and `_LIB_VERSION_REVISION` in `configure.ac`
and the `$PATCH` component of `project(libsecp256k1 VERSION ...)` and `${PROJECT_NAME}_LIB_VERSION_REVISION` in `CMakeLists.txt`
(with commit message `"release: bump versions for $MAJOR.$MINOR.$PATCH"`, for example).
3. Perform the [sanity checks](#sanity-checks) on the PR branch.
@@ -89,6 +91,6 @@ Note that bug fixes need to be backported only to releases for which no compatib
git tag -s v$MAJOR.$MINOR.$PATCH -m "libsecp256k1 $MAJOR.$MINOR.$PATCH"
git push git@github.com:bitcoin-core/secp256k1.git v$MAJOR.$MINOR.$PATCH
```
6. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
7. Send an announcement email to the bitcoin-dev mailing list.
8. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md).
5. Create a new GitHub release with a link to the corresponding entry in [CHANGELOG.md](../CHANGELOG.md).
6. Send an announcement email to the bitcoin-dev mailing list.
7. Open PR to the master branch that includes a commit (with commit message `"release notes: add $MAJOR.$MINOR.$PATCH"`, for example) that adds release notes to [CHANGELOG.md](../CHANGELOG.md).

View File

@@ -29,65 +29,67 @@ def gcd(f, g):
return abs(f)
```
It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop
keeps rewriting the variables *f* and *g* alongside a state variable *&delta;* that starts at *1*, until
*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a
It computes the greatest common divisor of an odd integer _f_ and any integer _g_. Its inner loop
keeps rewriting the variables _f_ and _g_ alongside a state variable _&delta;_ that starts at _1_, until
_g=0_ is reached. At that point, _|f|_ gives the GCD. Each of the transitions in the loop is called a
"division step" (referred to as divstep in what follows).
For example, *gcd(21, 14)* would be computed as:
- Start with *&delta;=1 f=21 g=14*
- Take the third branch: *&delta;=2 f=21 g=7*
- Take the first branch: *&delta;=-1 f=7 g=-7*
- Take the second branch: *&delta;=0 f=7 g=0*
- The answer *|f| = 7*.
For example, _gcd(21, 14)_ would be computed as:
- Start with _&delta;=1 f=21 g=14_
- Take the third branch: _&delta;=2 f=21 g=7_
- Take the first branch: _&delta;=-1 f=7 g=-7_
- Take the second branch: _&delta;=0 f=7 g=0_
- The answer _|f| = 7_.
Why it works:
- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper):
- (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*.
- (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even).
- (a) If _g_ is odd, replace _(f,g)_ with _(g,g-f)_ or (f,g+f), resulting in an even _g_.
- (b) Replace _(f,g)_ with _(f,g/2)_ (where _g_ is guaranteed to be even).
- Neither of those two operations change the GCD:
- For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a&thinsp;c* and *g=b&thinsp;c* for some integers *a*
and *b*. As *(g,g-f)=(b&thinsp;c,(b-a)c)* and *(f,f+g)=(a&thinsp;c,(a+b)c)*, the result clearly still has
common factor *c*. Reasoning in the other direction shows that no common factor can be added by
- For (a), assume _gcd(f,g)=c_, then it must be the case that _f=a&thinsp;c_ and _g=b&thinsp;c_ for some integers _a_
and _b_. As _(g,g-f)=(b&thinsp;c,(b-a)c)_ and _(f,f+g)=(a&thinsp;c,(a+b)c)_, the result clearly still has
common factor _c_. Reasoning in the other direction shows that no common factor can be added by
doing so either.
- For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove
it from *g*.
- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3).
- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the
gcd of *f'* and *0* is *|f'|* by definition, that is our answer.
- For (b), we know that _f_ is odd, so _gcd(f,g)_ clearly has no factor _2_, and we can remove
it from _g_.
- The algorithm will eventually converge to _g=0_. This is proven in the paper (see theorem G.3).
- It follows that eventually we find a final value _f'_ for which _gcd(f,g) = gcd(f',0)_. As the
gcd of _f'_ and _0_ is _|f'|_ by definition, that is our answer.
Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at
the low-order bits of the variables to decide the next steps, and being easy to make
constant-time (in more low-level languages than Python). The *&delta;* parameter is necessary to
constant-time (in more low-level languages than Python). The _&delta;_ parameter is necessary to
guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look
at high order bits.
Properties that will become important later:
- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*.
- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we
do not need to worry about rounding.
- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N*
bits of *f* and *g*, and on *&delta;*.
- Performing more divsteps than needed is not a problem, as _f_ does not change anymore after _g=0_.
- Only even numbers are divided by _2_. This means that when reasoning about it algebraically we
do not need to worry about rounding.
- At every point during the algorithm's execution the next _N_ steps only depend on the bottom _N_
bits of _f_ and _g_, and on _&delta;_.
## 2. From GCDs to modular inverses
We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a&thinsp;x=1
mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is
prime and *0 < x < M*. In what follows, assume that the modular inverse exists.
We want an algorithm to compute the inverse _a_ of _x_ modulo _M_, i.e. the number a such that _a&thinsp;x=1
mod M_. This inverse only exists if the GCD of _x_ and _M_ is _1_, but that is always the case if _M_ is
prime and _0 < x < M_. In what follows, assume that the modular inverse exists.
It turns out this inverse can be computed as a side effect of computing the GCD by keeping track
of how the internal variables can be written as linear combinations of the inputs at every step
(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)).
Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a&thinsp;x + b&thinsp;M = 1*.
Since the GCD is _1_, such an algorithm will compute numbers _a_ and _b_ such that a&thinsp;x + b&thinsp;M = 1*.
Taking that expression *mod M* gives *a&thinsp;x mod M = 1*, and we see that *a* is the modular inverse of *x
mod M*.
mod M\*.
A similar approach can be used to calculate modular inverses using the divsteps-based GCD
algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping
track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*.
*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M*
and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M
= 1*).
algorithm shown above, if the modulus _M_ is odd. To do so, compute _gcd(f=M,g=x)_, while keeping
track of extra variables _d_ and _e_, for which at every step _d = f/x (mod M)_ and _e = g/x (mod M)_.
_f/x_ here means the number which multiplied with _x_ gives _f mod M_. As _f_ and _g_ are initialized to _M_
and _x_ respectively, _d_ and _e_ just start off being _0_ (_M/x mod M = 0/x mod M = 0_) and _1_ (_x/x mod M
= 1_).
```python
def div2(M, x):
@@ -119,17 +121,16 @@ def modinv(M, x):
return (d * f) % M
```
Also note that this approach to track *d* and *e* throughout the computation to determine the inverse
Also note that this approach to track _d_ and _e_ throughout the computation to determine the inverse
is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the
entire computation is determined (see section 3 below) and the inverse is computed from that.
The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to
be faster at the level of optimization we're able to do in C.
## 3. Batching multiple divsteps
Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)*
to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper):
Every divstep can be expressed as a matrix multiplication, applying a transition matrix _(1/2 t)_
to both vectors _[f, g]_ and _[d, e]_ (see paragraph 8.1 in the paper):
```
t = [ u, v ]
@@ -142,15 +143,15 @@ to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper):
[ out_e ] [ in_e ]
```
where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is
taken. As above, the resulting *f* and *g* are always integers.
where _(u, v, q, r)_ is _(0, 2, -1, 1)_, _(2, 0, 1, 1)_, or _(2, 0, 0, 1)_, depending on which branch is
taken. As above, the resulting _f_ and _g_ are always integers.
Performing multiple divsteps corresponds to a multiplication with the product of all the
individual divsteps' transition matrices. As each transition matrix consists of integers
divided by *2*, the product of these matrices will consist of integers divided by *2<sup>N</sup>* (see also
theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay
them: we compute the integer coefficients of the combined transition matrix scaled by *2<sup>N</sup>*, and
do one division by *2<sup>N</sup>* as a final step:
divided by _2_, the product of these matrices will consist of integers divided by _2<sup>N</sup>_ (see also
theorem 9.2 in the paper). These divisions are expensive when updating _d_ and _e_, so we delay
them: we compute the integer coefficients of the combined transition matrix scaled by _2<sup>N</sup>_, and
do one division by _2<sup>N</sup>_ as a final step:
```python
def divsteps_n_matrix(delta, f, g):
@@ -166,13 +167,13 @@ def divsteps_n_matrix(delta, f, g):
return delta, (u, v, q, r)
```
As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this
As the branches in the divsteps are completely determined by the bottom _N_ bits of _f_ and _g_, this
function to compute the transition matrix only needs to see those bottom bits. Furthermore all
intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*,
*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit
integers could set *N=62* and compute the full transition matrix for 62 steps at once without any
intermediate results and outputs fit in _(N+1)_-bit numbers (unsigned for _f_ and _g_; signed for _u_, _v_,
_q_, and _r_) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit
integers could set _N=62_ and compute the full transition matrix for 62 steps at once without any
big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs
to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps.
to update the full-size _f_, _g_, _d_, and _e_ numbers once every _N_ steps.
We still need functions to compute:
@@ -184,8 +185,8 @@ We still need functions to compute:
[ out_e ] ( [ q, r ]) [ in_e ]
```
Because the divsteps transformation only ever divides even numbers by two, the result of *t&thinsp;[f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f*
and *g* will be multiple of *2<sup>N</sup>*, and division by *2<sup>N</sup>* is simply shifting them down:
Because the divsteps transformation only ever divides even numbers by two, the result of _t&thinsp;[f,g]_ is always even. When _t_ is a composition of _N_ divsteps, it follows that the resulting _f_
and _g_ will be multiple of _2<sup>N</sup>_, and division by _2<sup>N</sup>_ is simply shifting them down:
```python
def update_fg(f, g, t):
@@ -199,8 +200,8 @@ def update_fg(f, g, t):
return cf >> N, cg >> N
```
The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2<sup>N</sup> mod M*.
This is easy if we have precomputed *1/M mod 2<sup>N</sup>* (which always exists for odd *M*):
The same is not true for _d_ and _e_, and we need an equivalent of the `div2` function for division by _2<sup>N</sup> mod M_.
This is easy if we have precomputed _1/M mod 2<sup>N</sup>_ (which always exists for odd _M_):
```python
def div2n(M, Mi, x):
@@ -224,7 +225,7 @@ def update_de(d, e, t, M, Mi):
return div2n(M, Mi, cd), div2n(M, Mi, ce)
```
With all of those, we can write a version of `modinv` that performs *N* divsteps at once:
With all of those, we can write a version of `modinv` that performs _N_ divsteps at once:
```python3
def modinv(M, Mi, x):
@@ -242,20 +243,19 @@ def modinv(M, Mi, x):
return (d * f) % M
```
This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem
because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *&delta;* keeps
This means that in practice we'll always perform a multiple of _N_ divsteps. This is not a problem
because once _g=0_, further divsteps do not affect _f_, _g_, _d_, or _e_ anymore (only _&delta;_ keeps
increasing). For variable time code such excess iterations will be mostly optimized away in later
sections.
## 4. Avoiding modulus operations
So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of
`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the
sign of *f*. These are relatively expensive operations when done generically.
So far, there are two places where we compute a remainder of big numbers modulo _M_: at the end of
`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating _d_ due to the
sign of _f_. These are relatively expensive operations when done generically.
To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range
*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus
To deal with the modulus operation in `div2n`, we simply stop requiring _d_ and _e_ to be in range
_[0,M)_ all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus
operation at the end:
```python
@@ -272,15 +272,15 @@ def update_de(d, e, t, M, Mi):
return cd >> N, ce >> N
```
Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|*
never exceed *2<sup>N</sup>* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have
outputs whose absolute values are at most *2<sup>N</sup>* times the maximum absolute input value. In case the
inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming
*M > 1*, the multiplication results in numbers in range *(-2<sup>N</sup>M,2<sup>N</sup>M)*. Subtracting less than *2<sup>N</sup>*
times *M* to cancel out *N* bits brings that up to *(-2<sup>N+1</sup>M,2<sup>N</sup>M)*, and
dividing by *2<sup>N</sup>* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that
to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be
counteracted by incrementing *d* and *e* by *M* whenever they're negative:
Let's look at bounds on the ranges of these numbers. It can be shown that _|u|+|v|_ and _|q|+|r|_
never exceed _2<sup>N</sup>_ (see paragraph 8.3 in the paper), and thus a multiplication with _t_ will have
outputs whose absolute values are at most _2<sup>N</sup>_ times the maximum absolute input value. In case the
inputs _d_ and _e_ are in _(-M,M)_, which is certainly true for the initial values _d=0_ and _e=1_ assuming
_M > 1_, the multiplication results in numbers in range _(-2<sup>N</sup>M,2<sup>N</sup>M)_. Subtracting less than _2<sup>N</sup>_
times _M_ to cancel out _N_ bits brings that up to _(-2<sup>N+1</sup>M,2<sup>N</sup>M)_, and
dividing by _2<sup>N</sup>_ at the end takes it to _(-2M,M)_. Another application of `update_de` would take that
to _(-3M,2M)_, and so forth. This progressive expansion of the variables' ranges can be
counteracted by incrementing _d_ and _e_ by _M_ whenever they're negative:
```python
...
@@ -293,12 +293,12 @@ counteracted by incrementing *d* and *e* by *M* whenever they're negative:
...
```
With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the
output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de`
With inputs in _(-2M,M)_, they will first be shifted into range _(-M,M)_, which means that the
output will again be in _(-2M,M)_, and this remains the case regardless of how many `update_de`
invocations there are. In what follows, we will try to make this more efficient.
Note that increasing *d* by *M* is equal to incrementing *cd* by *u&thinsp;M* and *ce* by *q&thinsp;M*. Similarly,
increasing *e* by *M* is equal to incrementing *cd* by *v&thinsp;M* and *ce* by *r&thinsp;M*. So we could instead write:
Note that increasing _d_ by _M_ is equal to incrementing _cd_ by _u&thinsp;M_ and _ce_ by _q&thinsp;M_. Similarly,
increasing _e_ by _M_ is equal to incrementing _cd_ by _v&thinsp;M_ and _ce_ by _r&thinsp;M_. So we could instead write:
```python
...
@@ -318,10 +318,10 @@ increasing *e* by *M* is equal to incrementing *cd* by *v&thinsp;M* and *ce* by
...
```
Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this
Now note that we have two steps of corrections to _cd_ and _ce_ that add multiples of _M_: this
increment, and the decrement that cancels out bottom bits. The second one depends on the first
one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce*
at first, and using that to compute the final *md*, *me* values:
one, but they can still be efficiently combined by only computing the bottom bits of _cd_ and _ce_
at first, and using that to compute the final _md_, _me_ values:
```python
def update_de(d, e, t, M, Mi):
@@ -346,8 +346,8 @@ def update_de(d, e, t, M, Mi):
return cd >> N, ce >> N
```
One last optimization: we can avoid the *md&thinsp;M* and *me&thinsp;M* multiplications in the bottom bits of *cd*
and *ce* by moving them to the *md* and *me* correction:
One last optimization: we can avoid the _md&thinsp;M_ and _me&thinsp;M_ multiplications in the bottom bits of _cd_
and _ce_ by moving them to the _md_ and _me_ correction:
```python
...
@@ -362,10 +362,10 @@ and *ce* by moving them to the *md* and *me* correction:
...
```
The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same
range. That also means that the *d* value at the end of `modinv` will be in that range, while we want
a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the
conditional negation of *d* (based on the sign of *f*) into it as well:
The resulting function takes _d_ and _e_ in range _(-2M,M)_ as inputs, and outputs values in the same
range. That also means that the _d_ value at the end of `modinv` will be in that range, while we want
a result in _[0,M)_. To do that, we need a normalization function. It's easy to integrate the
conditional negation of _d_ (based on the sign of _f_) into it as well:
```python
def normalize(sign, v, M):
@@ -391,22 +391,21 @@ And calling it in `modinv` is simply:
return normalize(f, d, M)
```
## 5. Constant-time operation
The primary selling point of the algorithm is fast constant-time operation. What code flow still
depends on the input data so far?
- the number of iterations of the while *g &ne; 0* loop in `modinv`
- the number of iterations of the while _g &ne; 0_ loop in `modinv`
- the branches inside `divsteps_n_matrix`
- the sign checks in `update_de`
- the sign checks in `normalize`
To make the while loop in `modinv` constant time it can be replaced with a constant number of
iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit
inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is
sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of
*&lceil;724/N&rceil;* times.
iterations. The paper proves (Theorem 11.2) that _741_ divsteps are sufficient for any _256_-bit
inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound _724_ is
sufficient even. Given that every loop iteration performs _N_ divsteps, it will run a total of
_&lceil;724/N&rceil;_ times.
To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise
operations (and hope the C compiler isn't smart enough to turn them back into branches; see
@@ -425,10 +424,10 @@ divstep can be written instead as (compare to the inner loop of `gcd` in section
```
To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the
definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As
*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows
that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then
*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*.
definition of negative numbers in two's complement, (_-v == ~v + 1_) holds for every number _v_. As
_-1_ in two's complement is all _1_ bits, bitflipping can be expressed as xor with _-1_. It follows
that _-v == (v ^ -1) - (-1)_. Thus, if we have a variable _c_ that takes on values _0_ or _-1_, then
_(v ^ c) - c_ is _v_ if _c=0_ and _-v_ if _c=-1_.
Using this we can write:
@@ -444,13 +443,13 @@ in constant-time form as:
x = (f ^ c1) - c1
```
To use that trick, we need a helper mask variable *c1* that resolves the condition *&delta;>0* to *-1*
(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by
the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see
`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all
numbers in range *[-2<sup>63</sup>,0)* to *-1*, and numbers in range *[0,2<sup>63</sup>)* to *0*.
To use that trick, we need a helper mask variable _c1_ that resolves the condition _&delta;>0_ to _-1_
(if true) or _0_ (if false). We compute _c1_ using right shifting, which is equivalent to dividing by
the specified power of _2_ and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see
`assumptions.h` for tests that this is the case). Right shifting by _63_ thus maps all
numbers in range _[-2<sup>63</sup>,0)_ to _-1_, and numbers in range _[0,2<sup>63</sup>)_ to _0_.
Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write:
Using the facts that _x&0=0_ and _x&(-1)=x_ (on two's complement systems again), we can write:
```python
if g & 1:
@@ -498,8 +497,8 @@ becomes:
```
It turns out that this can be implemented more efficiently by applying the substitution
*&eta;=-&delta;*. In this representation, negating *&delta;* corresponds to negating *&eta;*, and incrementing
*&delta;* corresponds to decrementing *&eta;*. This allows us to remove the negation in the *c1*
_&eta;=-&delta;_. In this representation, negating _&delta;_ corresponds to negating _&eta;_, and incrementing
_&delta;_ corresponds to decrementing _&eta;_. This allows us to remove the negation in the _c1_
computation:
```python
@@ -519,12 +518,12 @@ computation:
g >>= 1
```
A variant of divsteps with better worst-case performance can be used instead: starting *&delta;* at
*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs
(which can be shown using convex hull analysis). In this case, the substitution *&zeta;=-(&delta;+1/2)*
is used instead to keep the variable integral. Incrementing *&delta;* by *1* still translates to
decrementing *&zeta;* by *1*, but negating *&delta;* now corresponds to going from *&zeta;* to *-(&zeta;+1)*, or
*~&zeta;*. Doing that conditionally based on *c3* is simply:
A variant of divsteps with better worst-case performance can be used instead: starting _&delta;_ at
_1/2_ instead of _1_. This reduces the worst case number of iterations to _590_ for _256_-bit inputs
(which can be shown using convex hull analysis). In this case, the substitution _&zeta;=-(&delta;+1/2)_
is used instead to keep the variable integral. Incrementing _&delta;_ by _1_ still translates to
decrementing _&zeta;_ by _1_, but negating _&delta;_ now corresponds to going from _&zeta;_ to _-(&zeta;+1)_, or
_~&zeta;_. Doing that conditionally based on _c3_ is simply:
```python
...
@@ -534,13 +533,12 @@ decrementing *&zeta;* by *1*, but negating *&delta;* now corresponds to going fr
```
By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to
also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of
also apply all _f_ operations to _u_, _v_ and all _g_ operations to _q_, _r_), a constant-time version of
`divsteps_n_matrix` is obtained. The full code will be in section 7.
These bit fiddling tricks can also be used to make the conditional negations and additions in
`update_de` and `normalize` constant-time.
## 6. Variable-time optimizations
In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time.
@@ -550,7 +548,7 @@ faster non-constant time `divsteps_n_matrix` function.
To do so, first consider yet another way of writing the inner loop of divstep operations in
`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use
the original version with initial *&delta;=1* and *&eta;=-&delta;* here.
the original version with initial _&delta;=1_ and _&eta;=-&delta;_ here.
```python
for _ in range(N):
@@ -562,7 +560,7 @@ for _ in range(N):
g >>= 1
```
Whenever *g* is even, the loop only shifts *g* down and decreases *&eta;*. When *g* ends in multiple zero
Whenever _g_ is even, the loop only shifts _g_ down and decreases _&eta;_. When _g_ ends in multiple zero
bits, these iterations can be consolidated into one step. This requires counting the bottom zero
bits efficiently, which is possible on most platforms; it is abstracted here as the function
`count_trailing_zeros`.
@@ -595,20 +593,20 @@ while True:
# g is even now, and the eta decrement and g shift will happen in the next loop.
```
We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever
there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as
We can now remove multiple bottom _0_ bits from _g_ at once, but still need a full iteration whenever
there is a bottom _1_ bit. In what follows, we will get rid of multiple _1_ bits simultaneously as
well.
Observe that as long as *&eta; &geq; 0*, the loop does not modify *f*. Instead, it cancels out bottom
bits of *g* and shifts them out, and decreases *&eta;* and *i* accordingly - interrupting only when *&eta;*
becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to
*g* to cancel out multiple bottom bits, and then shifting them out.
Observe that as long as _&eta; &geq; 0_, the loop does not modify _f_. Instead, it cancels out bottom
bits of _g_ and shifts them out, and decreases _&eta;_ and _i_ accordingly - interrupting only when _&eta;_
becomes negative, or when _i_ reaches _0_. Combined, this is equivalent to adding a multiple of _f_ to
_g_ to cancel out multiple bottom bits, and then shifting them out.
It is easy to find what that multiple is: we want a number *w* such that *g+w&thinsp;f* has a few bottom
zero bits. If that number of bits is *L*, we want *g+w&thinsp;f mod 2<sup>L</sup> = 0*, or *w = -g/f mod 2<sup>L</sup>*. Since *f*
is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before
doing more) or more than *&eta;+1* steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but
apart from that, we're only limited by the complexity of computing *w*.
It is easy to find what that multiple is: we want a number _w_ such that _g+w&thinsp;f_ has a few bottom
zero bits. If that number of bits is _L_, we want _g+w&thinsp;f mod 2<sup>L</sup> = 0_, or _w = -g/f mod 2<sup>L</sup>_. Since _f_
is odd, such a _w_ exists for any _L_. _L_ cannot be more than _i_ steps (as we'd finish the loop before
doing more) or more than _&eta;+1_ steps (as we'd run `eta, f, g = -eta, g, -f` at that point), but
apart from that, we're only limited by the complexity of computing _w_.
This code demonstrates how to cancel up to 4 bits per step:
@@ -642,26 +640,25 @@ some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pag
Here we need the negated modular inverse, which is a simple transformation of those:
- Instead of a 3-bit table:
- *-f* or *f ^ 6*
- _-f_ or _f ^ 6_
- Instead of a 4-bit table:
- *1 - f(f + 1)*
- *-(f + (((f + 1) & 4) << 1))*
- For larger tables the following technique can be used: if *w=-1/f mod 2<sup>L</sup>*, then *w(w&thinsp;f+2)* is
*-1/f mod 2<sup>2L</sup>*. This allows extending the previous formulas (or tables). In particular we
- _1 - f(f + 1)_
- _-(f + (((f + 1) & 4) << 1))_
- For larger tables the following technique can be used: if _w=-1/f mod 2<sup>L</sup>_, then _w(w&thinsp;f+2)_ is
_-1/f mod 2<sup>2L</sup>_. This allows extending the previous formulas (or tables). In particular we
have this 6-bit function (based on the 3-bit function above):
- *f(f<sup>2</sup> - 2)*
- _f(f<sup>2</sup> - 2)_
This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in
This loop, again extended to also handle _u_, _v_, _q_, and _r_ alongside _f_ and _g_, placed in
`divsteps_n_matrix`, gives a significantly faster, but non-constant time version.
## 7. Final Python version
All together we need the following functions:
- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function
from section 2, but with its loop replaced by a variant of the constant-time divstep from
section 5, extended to handle *u*, *v*, *q*, *r*:
section 5, extended to handle _u_, _v_, _q_, _r_:
```python
def divsteps_n_matrix(zeta, f, g):
@@ -684,7 +681,7 @@ def divsteps_n_matrix(zeta, f, g):
return zeta, (u, v, q, r)
```
- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time
- The functions to update _f_ and _g_, and _d_ and _e_, from section 2 and section 4, with the constant-time
changes to `update_de` from section 5:
```python
@@ -723,7 +720,7 @@ def normalize(sign, v, M):
return v
```
- And finally the `modinv` function too, adapted to use *&zeta;* instead of *&delta;*, and using the fixed
- And finally the `modinv` function too, adapted to use _&zeta;_ instead of _&delta;_, and using the fixed
iteration count from section 5:
```python
@@ -772,20 +769,21 @@ def modinv_var(M, Mi, x):
## 8. From GCDs to Jacobi symbol
We can also use a similar approach to calculate Jacobi symbol *(x | M)* by keeping track of an
extra variable *j*, for which at every step *(x | M) = j (g | f)*. As we update *f* and *g*, we
make corresponding updates to *j* using
We can also use a similar approach to calculate Jacobi symbol _(x | M)_ by keeping track of an
extra variable _j_, for which at every step _(x | M) = j (g | f)_. As we update _f_ and _g_, we
make corresponding updates to _j_ using
[properties of the Jacobi symbol](https://en.wikipedia.org/wiki/Jacobi_symbol#Properties):
* *((g/2) | f)* is either *(g | f)* or *-(g | f)*, depending on the value of *f mod 8* (negating if it's *3* or *5*).
* *(f | g)* is either *(g | f)* or *-(g | f)*, depending on *f mod 4* and *g mod 4* (negating if both are *3*).
These updates depend only on the values of *f* and *g* modulo *4* or *8*, and can thus be applied
very quickly, as long as we keep track of a few additional bits of *f* and *g*. Overall, this
- _((g/2) | f)_ is either _(g | f)_ or _-(g | f)_, depending on the value of _f mod 8_ (negating if it's _3_ or _5_).
- _(f | g)_ is either _(g | f)_ or _-(g | f)_, depending on _f mod 4_ and _g mod 4_ (negating if both are _3_).
These updates depend only on the values of _f_ and _g_ modulo _4_ or _8_, and can thus be applied
very quickly, as long as we keep track of a few additional bits of _f_ and _g_. Overall, this
calculation is slightly simpler than the one for the modular inverse because we no longer need to
keep track of *d* and *e*.
keep track of _d_ and _e_.
However, one difficulty of this approach is that the Jacobi symbol *(a | n)* is only defined for
positive odd integers *n*, whereas in the original safegcd algorithm, *f, g* can take negative
However, one difficulty of this approach is that the Jacobi symbol _(a | n)_ is only defined for
positive odd integers _n_, whereas in the original safegcd algorithm, _f, g_ can take negative
values. We resolve this by using the following modified steps:
```python
@@ -799,15 +797,16 @@ values. We resolve this by using the following modified steps:
```
The algorithm is still correct, since the changed divstep, called a "posdivstep" (see section 8.4
and E.5 in the paper) preserves *gcd(f, g)*. However, there's no proof that the modified algorithm
and E.5 in the paper) preserves _gcd(f, g)_. However, there's no proof that the modified algorithm
will converge. The justification for posdivsteps is completely empirical: in practice, it appears
that the vast majority of nonzero inputs converge to *f=g=gcd(f<sub>0</sub>, g<sub>0</sub>)* in a
that the vast majority of nonzero inputs converge to _f=g=gcd(f<sub>0</sub>, g<sub>0</sub>)_ in a
number of steps proportional to their logarithm.
Note that:
- We require inputs to satisfy *gcd(x, M) = 1*, as otherwise *f=1* is not reached.
- We require inputs *x &neq; 0*, because applying posdivstep with *g=0* has no effect.
- We need to update the termination condition from *g=0* to *f=1*.
- We require inputs to satisfy _gcd(x, M) = 1_, as otherwise _f=1_ is not reached.
- We require inputs _x &neq; 0_, because applying posdivstep with _g=0_ has no effect.
- We need to update the termination condition from _g=0_ to _f=1_.
We account for the possibility of nonconvergence by only performing a bounded number of
posdivsteps, and then falling back to square-root based Jacobi calculation if a solution has not
@@ -815,5 +814,5 @@ yet been found.
The optimizations in sections 3-7 above are described in the context of the original divsteps, but
in the C implementation we also adapt most of them (not including "avoiding modulus operations",
since it's not necessary to track *d, e*, and "constant-time operation", since we never calculate
since it's not necessary to track _d, e_, and "constant-time operation", since we never calculate
Jacobi symbols for secret data) to the posdivsteps version.

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_BASICS_ARCHIVE_H_INCLUDED
#define XRPL_BASICS_ARCHIVE_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2018 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_ARCHIVE_H_INCLUDED
#define RIPPLE_BASICS_ARCHIVE_H_INCLUDED
#include <boost/filesystem.hpp>

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_BASICS_BASICCONFIG_H_INCLUDED
#define XRPL_BASICS_BASICCONFIG_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_BASICCONFIG_H_INCLUDED
#define RIPPLE_BASICS_BASICCONFIG_H_INCLUDED
#include <xrpl/basics/contract.h>

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_BASICS_BLOB_H_INCLUDED
#define XRPL_BASICS_BLOB_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_BLOB_H_INCLUDED
#define RIPPLE_BASICS_BLOB_H_INCLUDED
#include <vector>

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_BASICS_BUFFER_H_INCLUDED
#define XRPL_BASICS_BUFFER_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_BUFFER_H_INCLUDED
#define RIPPLE_BASICS_BUFFER_H_INCLUDED
#include <xrpl/basics/Slice.h>
#include <xrpl/beast/utility/instrumentation.h>

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_BASICS_BYTEUTILITIES_H_INCLUDED
#define XRPL_BASICS_BYTEUTILITIES_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2018 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_BYTEUTILITIES_H_INCLUDED
#define RIPPLE_BASICS_BYTEUTILITIES_H_INCLUDED
namespace ripple {

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_COMPRESSIONALGORITHMS_H_INCLUDED
#define XRPL_COMPRESSIONALGORITHMS_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLED_COMPRESSIONALGORITHMS_H_INCLUDED
#define RIPPLED_COMPRESSIONALGORITHMS_H_INCLUDED
#include <xrpl/basics/contract.h>
@@ -146,4 +165,4 @@ lz4Decompress(
} // namespace ripple
#endif // XRPL_COMPRESSIONALGORITHMS_H_INCLUDED
#endif // RIPPLED_COMPRESSIONALGORITHMS_H_INCLUDED

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_BASICS_COUNTEDOBJECT_H_INCLUDED
#define XRPL_BASICS_COUNTEDOBJECT_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_COUNTEDOBJECT_H_INCLUDED
#define RIPPLE_BASICS_COUNTEDOBJECT_H_INCLUDED
#include <xrpl/beast/type_name.h>

View File

@@ -1,5 +1,24 @@
#ifndef XRPL_BASICS_DECAYINGSAMPLE_H_INCLUDED
#define XRPL_BASICS_DECAYINGSAMPLE_H_INCLUDED
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_DECAYINGSAMPLE_H_INCLUDED
#define RIPPLE_BASICS_DECAYINGSAMPLE_H_INCLUDED
#include <chrono>
#include <cmath>

Some files were not shown because too many files have changed in this diff Show More