mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-04 19:25:51 +00:00
Compare commits
63 Commits
release-3.
...
legleux/li
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f51e2d3f30 | ||
|
|
2dd1d682ac | ||
|
|
4cb1084c02 | ||
|
|
8d1b3b3994 | ||
|
|
b39d7a6519 | ||
|
|
b0910e359e | ||
|
|
44e027e516 | ||
|
|
a10f42a3aa | ||
|
|
efd4c1b95d | ||
|
|
f8b4f692f1 | ||
|
|
80a3ae6386 | ||
|
|
48d38c1e2c | ||
|
|
553fb5be3b | ||
|
|
efa917d9f3 | ||
|
|
bd3bc917f8 | ||
|
|
ed5d6f3e22 | ||
|
|
a8e4da0b11 | ||
|
|
1dd60242de | ||
|
|
76611c3f46 | ||
|
|
5efaf0c328 | ||
|
|
0aa23933ea | ||
|
|
21f3c12d85 | ||
|
|
7d5ed0cd8d | ||
|
|
d9960d5ba0 | ||
|
|
91fa6b2295 | ||
|
|
76f774e22d | ||
|
|
f4f7618173 | ||
|
|
66f16469f9 | ||
|
|
1845b1c656 | ||
|
|
e192ffe964 | ||
|
|
2bf77cc8f6 | ||
|
|
5e33ca56fd | ||
|
|
7c39c810eb | ||
|
|
a7792ebcae | ||
|
|
83ee3788e1 | ||
|
|
ae719b86d3 | ||
|
|
dd722f8b3f | ||
|
|
30190a5feb | ||
|
|
afb6e0e41b | ||
|
|
5523557226 | ||
|
|
b64707f53b | ||
|
|
0b113f371f | ||
|
|
b4c894c1ba | ||
|
|
92281a4ede | ||
|
|
e80642fc12 | ||
|
|
640ce4988f | ||
|
|
a422855ea7 | ||
|
|
108f90586c | ||
|
|
519d1dbc34 | ||
|
|
3d44758e5a | ||
|
|
97bc94a7f6 | ||
|
|
34619f2504 | ||
|
|
3509de9c5f | ||
|
|
459d0da010 | ||
|
|
8637d606a4 | ||
|
|
8456b8275e | ||
|
|
3c88786bb0 | ||
|
|
46ba8a28fe | ||
|
|
5ecde3cf39 | ||
|
|
620fb26823 | ||
|
|
6b6b213cf5 | ||
|
|
f61086b43c | ||
|
|
176fd2b6e4 |
20
.github/actions/build-deps/action.yml
vendored
20
.github/actions/build-deps/action.yml
vendored
@@ -10,10 +10,17 @@ inputs:
|
||||
build_type:
|
||||
description: 'The build type to use ("Debug", "Release").'
|
||||
required: true
|
||||
build_nproc:
|
||||
description: "The number of processors to use for building."
|
||||
required: true
|
||||
force_build:
|
||||
description: 'Force building of all dependencies ("true", "false").'
|
||||
required: false
|
||||
default: "false"
|
||||
log_verbosity:
|
||||
description: "The logging verbosity."
|
||||
required: false
|
||||
default: "verbose"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
@@ -22,16 +29,21 @@ runs:
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_DIR: ${{ inputs.build_dir }}
|
||||
BUILD_NPROC: ${{ inputs.build_nproc }}
|
||||
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
|
||||
run: |
|
||||
echo 'Installing dependencies.'
|
||||
mkdir -p '${{ env.BUILD_DIR }}'
|
||||
cd '${{ env.BUILD_DIR }}'
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
cd "${BUILD_DIR}"
|
||||
conan install \
|
||||
--output-folder . \
|
||||
--build=${{ env.BUILD_OPTION }} \
|
||||
--build="${BUILD_OPTION}" \
|
||||
--options:host='&:tests=True' \
|
||||
--options:host='&:xrpld=True' \
|
||||
--settings:all build_type='${{ env.BUILD_TYPE }}' \
|
||||
--settings:all build_type="${BUILD_TYPE}" \
|
||||
--conf:all tools.build:jobs=${BUILD_NPROC} \
|
||||
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
|
||||
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
|
||||
..
|
||||
|
||||
4
.github/actions/setup-conan/action.yml
vendored
4
.github/actions/setup-conan/action.yml
vendored
@@ -39,8 +39,8 @@ runs:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
CONAN_REMOTE_URL: ${{ inputs.conan_remote_url }}
|
||||
run: |
|
||||
echo "Adding Conan remote '${{ env.CONAN_REMOTE_NAME }}' at '${{ env.CONAN_REMOTE_URL }}'."
|
||||
conan remote add --index 0 --force '${{ env.CONAN_REMOTE_NAME }}' '${{ env.CONAN_REMOTE_URL }}'
|
||||
echo "Adding Conan remote '${CONAN_REMOTE_NAME}' at '${CONAN_REMOTE_URL}'."
|
||||
conan remote add --index 0 --force "${CONAN_REMOTE_NAME}" "${CONAN_REMOTE_URL}"
|
||||
|
||||
echo 'Listing Conan remotes.'
|
||||
conan remote list
|
||||
|
||||
@@ -138,6 +138,7 @@ test.toplevel > test.csf
|
||||
test.toplevel > xrpl.json
|
||||
test.unit_test > xrpl.basics
|
||||
tests.libxrpl > xrpl.basics
|
||||
tests.libxrpl > xrpl.json
|
||||
tests.libxrpl > xrpl.net
|
||||
xrpl.json > xrpl.basics
|
||||
xrpl.ledger > xrpl.basics
|
||||
|
||||
8
.github/scripts/strategy-matrix/generate.py
vendored
8
.github/scripts/strategy-matrix/generate.py
vendored
@@ -74,14 +74,14 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
continue
|
||||
|
||||
# RHEL:
|
||||
# - 9.4 using GCC 12: Debug and Unity on linux/amd64.
|
||||
# - 9.6 using Clang: Release and no Unity on linux/amd64.
|
||||
# - 9 using GCC 12: Debug and Unity on linux/amd64.
|
||||
# - 10 using Clang: Release and no Unity on linux/amd64.
|
||||
if os['distro_name'] == 'rhel':
|
||||
skip = True
|
||||
if os['distro_version'] == '9.4':
|
||||
if os['distro_version'] == '9':
|
||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'gcc-12' and build_type == 'Debug' and '-Dunity=ON' in cmake_args and architecture['platform'] == 'linux/amd64':
|
||||
skip = False
|
||||
elif os['distro_version'] == '9.6':
|
||||
elif os['distro_version'] == '10':
|
||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-any' and build_type == 'Release' and '-Dunity=OFF' in cmake_args and architecture['platform'] == 'linux/amd64':
|
||||
skip = False
|
||||
if skip:
|
||||
|
||||
122
.github/scripts/strategy-matrix/linux.json
vendored
122
.github/scripts/strategy-matrix/linux.json
vendored
@@ -14,139 +14,169 @@
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12"
|
||||
"compiler_version": "12",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13"
|
||||
"compiler_version": "13",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14"
|
||||
"compiler_version": "14",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15"
|
||||
"compiler_version": "15",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16"
|
||||
"compiler_version": "16",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17"
|
||||
"compiler_version": "17",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18"
|
||||
"compiler_version": "18",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19"
|
||||
"compiler_version": "19",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20"
|
||||
"compiler_version": "20",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9.4",
|
||||
"distro_version": "8",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12"
|
||||
"compiler_version": "14",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9.4",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9.4",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9.6",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9.6",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9.4",
|
||||
"distro_version": "8",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any"
|
||||
"compiler_version": "any",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9.6",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any"
|
||||
"compiler_version": "any",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "10",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "10",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any",
|
||||
"image_sha": "10e69b4"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "jammy",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12"
|
||||
"compiler_version": "12",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13"
|
||||
"compiler_version": "13",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14"
|
||||
"compiler_version": "14",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16"
|
||||
"compiler_version": "16",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17"
|
||||
"compiler_version": "17",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18"
|
||||
"compiler_version": "18",
|
||||
"image_sha": "6948666"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19"
|
||||
"compiler_version": "19",
|
||||
"image_sha": "6948666"
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
|
||||
3
.github/scripts/strategy-matrix/macos.json
vendored
3
.github/scripts/strategy-matrix/macos.json
vendored
@@ -10,7 +10,8 @@
|
||||
"distro_name": "macos",
|
||||
"distro_version": "",
|
||||
"compiler_name": "",
|
||||
"compiler_version": ""
|
||||
"compiler_version": "",
|
||||
"image_sha": ""
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
|
||||
3
.github/scripts/strategy-matrix/windows.json
vendored
3
.github/scripts/strategy-matrix/windows.json
vendored
@@ -10,7 +10,8 @@
|
||||
"distro_name": "windows",
|
||||
"distro_version": "",
|
||||
"compiler_name": "",
|
||||
"compiler_version": ""
|
||||
"compiler_version": "",
|
||||
"image_sha": ""
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
|
||||
71
.github/workflows/on-pr.yml
vendored
71
.github/workflows/on-pr.yml
vendored
@@ -1,10 +1,13 @@
|
||||
# This workflow runs all workflows to check, build and test the project on
|
||||
# various Linux flavors, as well as on MacOS and Windows, on every push to a
|
||||
# This workflow runs all workflows to check, build, package and test the project on
|
||||
# various Linux flavors, as well as on macOS and Windows, on every push to a
|
||||
# user branch. However, it will not run if the pull request is a draft unless it
|
||||
# has the 'DraftRunCI' label.
|
||||
name: PR
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- legleux/linux_packages
|
||||
merge_group:
|
||||
types:
|
||||
- checks_requested
|
||||
@@ -62,6 +65,7 @@ jobs:
|
||||
.github/workflows/reusable-build.yml
|
||||
.github/workflows/reusable-build-test-config.yml
|
||||
.github/workflows/reusable-build-test.yml
|
||||
.github/workflows/reusable-build-pkg.yml
|
||||
.github/workflows/reusable-strategy-matrix.yml
|
||||
.github/workflows/reusable-test.yml
|
||||
.codecov.yml
|
||||
@@ -69,6 +73,7 @@ jobs:
|
||||
conan/**
|
||||
external/**
|
||||
include/**
|
||||
pkgs/**
|
||||
src/**
|
||||
tests/**
|
||||
CMakeLists.txt
|
||||
@@ -93,39 +98,57 @@ jobs:
|
||||
outputs:
|
||||
go: ${{ steps.go.outputs.go == 'true' }}
|
||||
|
||||
check-levelization:
|
||||
needs: should-run
|
||||
if: ${{ needs.should-run.outputs.go == 'true' }}
|
||||
uses: ./.github/workflows/reusable-check-levelization.yml
|
||||
# check-levelization:
|
||||
# needs: should-run
|
||||
# if: ${{ needs.should-run.outputs.go == 'true' }}
|
||||
# uses: ./.github/workflows/reusable-check-levelization.yml
|
||||
|
||||
build-test:
|
||||
# build-test:
|
||||
# needs: should-run
|
||||
# if: ${{ needs.should-run.outputs.go == 'true' }}
|
||||
# uses: ./.github/workflows/reusable-build-test.yml
|
||||
# strategy:
|
||||
# matrix:
|
||||
# os: [linux, macos, windows]
|
||||
# with:
|
||||
# os: ${{ matrix.os }}
|
||||
# secrets:
|
||||
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
build-package:
|
||||
name: Build ${{ matrix.pkg_type }}-${{ matrix.arch }} packages
|
||||
needs: should-run
|
||||
if: ${{ needs.should-run.outputs.go == 'true' }}
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
uses: ./.github/workflows/reusable-build-pkg.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
# pkg_type: [rpm]
|
||||
pkg_type: [deb, rpm]
|
||||
arch: [amd64]
|
||||
# arch: [amd64, arm64]
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
pkg_type: ${{ matrix.pkg_type }}
|
||||
arch: ${{ matrix.arch }}
|
||||
|
||||
notify-clio:
|
||||
needs:
|
||||
- should-run
|
||||
- build-test
|
||||
if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }}
|
||||
uses: ./.github/workflows/reusable-notify-clio.yml
|
||||
secrets:
|
||||
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
||||
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
# notify-clio:
|
||||
# needs:
|
||||
# - should-run
|
||||
# - build-test
|
||||
# if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }}
|
||||
# uses: ./.github/workflows/reusable-notify-clio.yml
|
||||
# secrets:
|
||||
# clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
||||
# conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
# conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
|
||||
passed:
|
||||
if: failure() || cancelled()
|
||||
needs:
|
||||
- build-test
|
||||
- check-levelization
|
||||
# - build-test
|
||||
# - check-levelization
|
||||
- build-package
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Fail
|
||||
|
||||
51
.github/workflows/on-trigger.yml
vendored
51
.github/workflows/on-trigger.yml
vendored
@@ -9,9 +9,10 @@ name: Trigger
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- release
|
||||
- master
|
||||
- "develop"
|
||||
- "release*"
|
||||
- "master"
|
||||
- "legleux/linux_packages"
|
||||
paths:
|
||||
# These paths are unique to `on-trigger.yml`.
|
||||
- ".github/workflows/reusable-check-missing-commits.yml"
|
||||
@@ -26,6 +27,7 @@ on:
|
||||
- ".github/workflows/reusable-build.yml"
|
||||
- ".github/workflows/reusable-build-test-config.yml"
|
||||
- ".github/workflows/reusable-build-test.yml"
|
||||
- ".github/workflows/reusable-build-pkg.yml"
|
||||
- ".github/workflows/reusable-strategy-matrix.yml"
|
||||
- ".github/workflows/reusable-test.yml"
|
||||
- ".codecov.yml"
|
||||
@@ -33,6 +35,7 @@ on:
|
||||
- "conan/**"
|
||||
- "external/**"
|
||||
- "include/**"
|
||||
- "pkgs/**"
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- "CMakeLists.txt"
|
||||
@@ -50,7 +53,12 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
# When a PR is merged into the develop branch it will be assigned a unique
|
||||
# group identifier, so execution will continue even if another PR is merged
|
||||
# while it is still running. In all other cases the group identifier is shared
|
||||
# per branch, so that any in-progress runs are cancelled when a new commit is
|
||||
# pushed.
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
@@ -58,17 +66,32 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
check-missing-commits:
|
||||
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
|
||||
uses: ./.github/workflows/reusable-check-missing-commits.yml
|
||||
# check-missing-commits:
|
||||
# if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
|
||||
# uses: ./.github/workflows/reusable-check-missing-commits.yml
|
||||
|
||||
build-test:
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
# build-test:
|
||||
# uses: ./.github/workflows/reusable-build-test.yml
|
||||
# strategy:
|
||||
# matrix:
|
||||
# os: [linux, macos, windows]
|
||||
# with:
|
||||
# os: ${{ matrix.os }}
|
||||
# strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
|
||||
# secrets:
|
||||
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
build-package:
|
||||
name: Build ${{ matrix.pkg_type }}-${{ matrix.arch }} packages
|
||||
uses: ./.github/workflows/reusable-build-pkg.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: ${{ github.event_name == 'merge_group' }}
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
# pkg_type: [rpm]
|
||||
pkg_type: [deb, rpm]
|
||||
arch: [amd64]
|
||||
# arch: [amd64, arm64]
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
pkg_type: ${{ matrix.pkg_type }}
|
||||
arch: ${{ matrix.arch }}
|
||||
|
||||
4
.github/workflows/pre-commit.yml
vendored
4
.github/workflows/pre-commit.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
jobs:
|
||||
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
|
||||
run-hooks:
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@af1b0f0d764cda2e5435f5ac97b240d4bd4d95d3
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
|
||||
with:
|
||||
runs_on: ubuntu-latest
|
||||
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-d1496b8" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-a8c7be1" }'
|
||||
|
||||
20
.github/workflows/publish-docs.yml
vendored
20
.github/workflows/publish-docs.yml
vendored
@@ -23,16 +23,24 @@ defaults:
|
||||
|
||||
env:
|
||||
BUILD_DIR: .build
|
||||
NPROC_SUBTRACT: 2
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-d1496b8
|
||||
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
|
||||
- name: Check configuration
|
||||
run: |
|
||||
echo 'Checking path.'
|
||||
@@ -46,12 +54,16 @@ jobs:
|
||||
|
||||
echo 'Checking Doxygen version.'
|
||||
doxygen --version
|
||||
|
||||
- name: Build documentation
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
mkdir -p ${{ env.BUILD_DIR }}
|
||||
cd ${{ env.BUILD_DIR }}
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
cd "${BUILD_DIR}"
|
||||
cmake -Donly_docs=ON ..
|
||||
cmake --build . --target docs --parallel $(nproc)
|
||||
cmake --build . --target docs --parallel ${BUILD_NPROC}
|
||||
|
||||
- name: Publish documentation
|
||||
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
|
||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||
|
||||
32
.github/workflows/reusable-build-pkg.yml
vendored
Normal file
32
.github/workflows/reusable-build-pkg.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
pkg_type:
|
||||
required: false
|
||||
type: string
|
||||
arch:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ inputs.pkg_type }}-${{ inputs.arch }} package
|
||||
runs-on: heavy${{ inputs.arch == 'arm64' && '-arm64' || '' }}
|
||||
container: ghcr.io/xrplf/ci/${{ inputs.pkg_type == 'rpm' && 'rhel-9' || 'debian-bookworm' }}:gcc-12
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Build packages
|
||||
run: |
|
||||
./pkgs/build.sh
|
||||
cat build_vars >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.pkg_type }}-${{ inputs.arch }}
|
||||
path: "*${{ inputs.arch }}.${{ inputs.pkg_type }}"
|
||||
if-no-files-found: error
|
||||
@@ -39,6 +39,12 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
nproc_subtract:
|
||||
description: "The number of processors to subtract when calculating parallelism."
|
||||
required: false
|
||||
type: number
|
||||
default: 2
|
||||
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
description: "The Codecov token to use for uploading coverage reports."
|
||||
@@ -55,6 +61,7 @@ jobs:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
image: ${{ inputs.image }}
|
||||
config_name: ${{ inputs.config_name }}
|
||||
nproc_subtract: ${{ inputs.nproc_subtract }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
@@ -67,3 +74,4 @@ jobs:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
image: ${{ inputs.image }}
|
||||
config_name: ${{ inputs.config_name }}
|
||||
nproc_subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
4
.github/workflows/reusable-build-test.yml
vendored
4
.github/workflows/reusable-build-test.yml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
- generate-matrix
|
||||
uses: ./.github/workflows/reusable-build-test-config.yml
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: ${{ github.event_name == 'merge_group' }}
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
max-parallel: 10
|
||||
with:
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
cmake_args: ${{ matrix.cmake_args }}
|
||||
cmake_target: ${{ matrix.cmake_target }}
|
||||
runs_on: ${{ toJSON(matrix.architecture.runner) }}
|
||||
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || '' }}
|
||||
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
|
||||
config_name: ${{ matrix.config_name }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
49
.github/workflows/reusable-build.yml
vendored
49
.github/workflows/reusable-build.yml
vendored
@@ -34,6 +34,11 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
nproc_subtract:
|
||||
description: "The number of processors to subtract when calculating parallelism."
|
||||
required: true
|
||||
type: number
|
||||
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
description: "The Codecov token to use for uploading coverage reports."
|
||||
@@ -48,6 +53,7 @@ jobs:
|
||||
name: Build ${{ inputs.config_name }}
|
||||
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
@@ -57,13 +63,19 @@ jobs:
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
- name: Print build environment
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
- name: Setup Conan
|
||||
uses: ./.github/actions/setup-conan
|
||||
|
||||
@@ -71,7 +83,11 @@ jobs:
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_dir: ${{ inputs.build_dir }}
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
# Set the verbosity to "quiet" for Windows to avoid an excessive
|
||||
# amount of logs. For other OSes, the "verbose" logs are more useful.
|
||||
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
|
||||
|
||||
- name: Configure CMake
|
||||
shell: bash
|
||||
@@ -83,33 +99,50 @@ jobs:
|
||||
cmake \
|
||||
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \
|
||||
${{ env.CMAKE_ARGS }} \
|
||||
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
|
||||
${CMAKE_ARGS} \
|
||||
..
|
||||
|
||||
- name: Build the binary
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
CMAKE_TARGET: ${{ inputs.cmake_target }}
|
||||
run: |
|
||||
cmake \
|
||||
--build . \
|
||||
--config ${{ env.BUILD_TYPE }} \
|
||||
--parallel $(nproc) \
|
||||
--target ${{ env.CMAKE_TARGET }}
|
||||
--config "${BUILD_TYPE}" \
|
||||
--parallel ${BUILD_NPROC} \
|
||||
--target "${CMAKE_TARGET}"
|
||||
|
||||
- name: Put built binaries in one location
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.build_dir }}
|
||||
env:
|
||||
BUILD_TYPE_DIR: ${{ runner.os == 'Windows' && inputs.build_type || '' }}
|
||||
CMAKE_TARGET: ${{ inputs.cmake_target }}
|
||||
run: |
|
||||
mkdir -p ./binaries/doctest/
|
||||
|
||||
cp ./${BUILD_TYPE_DIR}/rippled* ./binaries/
|
||||
if [ "${CMAKE_TARGET}" != 'coverage' ]; then
|
||||
cp ./src/tests/libxrpl/${BUILD_TYPE_DIR}/xrpl.test.* ./binaries/doctest/
|
||||
fi
|
||||
|
||||
- name: Upload rippled artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
env:
|
||||
BUILD_DIR: ${{ inputs.build_dir }}
|
||||
with:
|
||||
name: rippled-${{ inputs.config_name }}
|
||||
path: ${{ inputs.build_dir }}/${{ runner.os == 'Windows' && inputs.build_type || '' }}/rippled${{ runner.os == 'Windows' && '.exe' || '' }}
|
||||
path: ${{ env.BUILD_DIR }}/binaries/
|
||||
retention-days: 3
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ inputs.cmake_target == 'coverage' }}
|
||||
if: ${{ github.repository_owner == 'XRPLF' && inputs.cmake_target == 'coverage' }}
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
with:
|
||||
disable_search: true
|
||||
|
||||
10
.github/workflows/reusable-notify-clio.yml
vendored
10
.github/workflows/reusable-notify-clio.yml
vendored
@@ -51,7 +51,7 @@ jobs:
|
||||
run: |
|
||||
echo 'Generating user and channel.'
|
||||
echo "user=clio" >> "${GITHUB_OUTPUT}"
|
||||
echo "channel=pr_${{ env.PR_NUMBER }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "channel=pr_${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
|
||||
echo 'Extracting version.'
|
||||
echo "version=$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')" >> "${GITHUB_OUTPUT}"
|
||||
- name: Calculate conan reference
|
||||
@@ -64,13 +64,15 @@ jobs:
|
||||
conan_remote_name: ${{ inputs.conan_remote_name }}
|
||||
conan_remote_url: ${{ inputs.conan_remote_url }}
|
||||
- name: Log into Conan remote
|
||||
run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
|
||||
env:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
|
||||
- name: Upload package
|
||||
env:
|
||||
CONAN_REMOTE_NAME: ${{ inputs.conan_remote_name }}
|
||||
run: |
|
||||
conan export --user=${{ steps.generate.outputs.user }} --channel=${{ steps.generate.outputs.channel }} .
|
||||
conan upload --confirm --check --remote=${{ env.CONAN_REMOTE_NAME }} xrpl/${{ steps.conan_ref.outputs.conan_ref }}
|
||||
conan upload --confirm --check --remote="${CONAN_REMOTE_NAME}" xrpl/${{ steps.conan_ref.outputs.conan_ref }}
|
||||
outputs:
|
||||
conan_ref: ${{ steps.conan_ref.outputs.conan_ref }}
|
||||
|
||||
@@ -86,4 +88,4 @@ jobs:
|
||||
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
||||
-F "client_payload[conan_ref]=${{ needs.upload.outputs.conan_ref }}" \
|
||||
-F "client_payload[pr_url]=${{ env.PR_URL }}"
|
||||
-F "client_payload[pr_url]=${PR_URL}"
|
||||
|
||||
@@ -38,4 +38,4 @@ jobs:
|
||||
env:
|
||||
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
|
||||
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
|
||||
run: ./generate.py ${{ env.GENERATE_OPTION }} ${{ env.GENERATE_CONFIG }} >> "${GITHUB_OUTPUT}"
|
||||
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
48
.github/workflows/reusable-test.yml
vendored
48
.github/workflows/reusable-test.yml
vendored
@@ -26,12 +26,28 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
nproc_subtract:
|
||||
description: "The number of processors to subtract when calculating parallelism."
|
||||
required: true
|
||||
type: number
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test ${{ inputs.config_name }}
|
||||
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
- name: Download rippled artifact
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
@@ -61,9 +77,35 @@ jobs:
|
||||
run: |
|
||||
./rippled --version | grep libvoidstar
|
||||
|
||||
- name: Test the binary
|
||||
- name: Run the embedded tests
|
||||
if: ${{ inputs.run_tests }}
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
./rippled --unittest --unittest-jobs $(nproc)
|
||||
ctest -j $(nproc) --output-on-failure
|
||||
./rippled --unittest --unittest-jobs ${BUILD_NPROC}
|
||||
|
||||
- name: Run the separate tests
|
||||
if: ${{ inputs.run_tests }}
|
||||
env:
|
||||
EXT: ${{ runner.os == 'Windows' && '.exe' || '' }}
|
||||
shell: bash
|
||||
run: |
|
||||
for test_file in ./doctest/*${EXT}; do
|
||||
echo "Executing $test_file"
|
||||
chmod +x "$test_file"
|
||||
if [[ "${{ runner.os }}" == "Windows" && "$test_file" == "./doctest/xrpl.test.net.exe" ]]; then
|
||||
echo "Skipping $test_file on Windows"
|
||||
else
|
||||
"$test_file"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Debug failure (Linux)
|
||||
if: ${{ failure() && runner.os == 'Linux' && inputs.run_tests }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "IPv4 local port range:"
|
||||
cat /proc/sys/net/ipv4/ip_local_port_range
|
||||
echo "Netstat:"
|
||||
netstat -an
|
||||
|
||||
33
.github/workflows/upload-conan-deps.yml
vendored
33
.github/workflows/upload-conan-deps.yml
vendored
@@ -34,17 +34,20 @@ on:
|
||||
env:
|
||||
CONAN_REMOTE_NAME: xrplf
|
||||
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||
NPROC_SUBTRACT: 2
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Generate the strategy matrix to be used by the following job.
|
||||
generate-matrix:
|
||||
uses: ./.github/workflows/reusable-strategy-matrix.yml
|
||||
with:
|
||||
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
|
||||
|
||||
# Build and upload the dependencies for each configuration.
|
||||
run-upload-conan-deps:
|
||||
needs:
|
||||
- generate-matrix
|
||||
@@ -53,19 +56,29 @@ jobs:
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
max-parallel: 10
|
||||
runs-on: ${{ matrix.architecture.runner }}
|
||||
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
|
||||
|
||||
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }}
|
||||
steps:
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
|
||||
|
||||
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@99685816bb60a95a66852f212f382580e180df3a
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
- name: Print build environment
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
|
||||
- name: Setup Conan
|
||||
uses: ./.github/actions/setup-conan
|
||||
with:
|
||||
@@ -76,15 +89,19 @@ jobs:
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_dir: .build
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
|
||||
# Set the verbosity to "quiet" for Windows to avoid an excessive
|
||||
# amount of logs. For other OSes, the "verbose" logs are more useful.
|
||||
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
|
||||
|
||||
- name: Log into Conan remote
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
|
||||
run: conan remote login ${{ env.CONAN_REMOTE_NAME }} "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
|
||||
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
|
||||
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
|
||||
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
|
||||
env:
|
||||
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
run: conan upload "*" --remote='${{ env.CONAN_REMOTE_NAME }}' --confirm ${{ env.FORCE_OPTION }}
|
||||
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}
|
||||
|
||||
22
BUILD.md
22
BUILD.md
@@ -39,17 +39,12 @@ found here](./docs/build/environment.md).
|
||||
|
||||
- [Python 3.11](https://www.python.org/downloads/), or higher
|
||||
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
|
||||
- [CMake 3.22](https://cmake.org/download/)[^2], or higher
|
||||
- [CMake 3.22](https://cmake.org/download/), or higher
|
||||
|
||||
[^1]:
|
||||
It is possible to build with Conan 1.60+, but the instructions are
|
||||
significantly different, which is why we are not recommending it.
|
||||
|
||||
[^2]:
|
||||
CMake 4 is not yet supported by all dependencies required by this project.
|
||||
If you are affected by this issue, follow [conan workaround for cmake
|
||||
4](#workaround-for-cmake-4)
|
||||
|
||||
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||
The [minimum compiler versions][2] required are:
|
||||
|
||||
@@ -282,21 +277,6 @@ sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
|
||||
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
|
||||
```
|
||||
|
||||
#### Workaround for CMake 4
|
||||
|
||||
If your system CMake is version 4 rather than 3, you may have to configure Conan
|
||||
profile to use CMake version 3 for dependencies, by adding the following two
|
||||
lines to your profile:
|
||||
|
||||
```text
|
||||
[tool_requires]
|
||||
!cmake/*: cmake/[>=3 <4]
|
||||
```
|
||||
|
||||
This will force Conan to download and use a locally cached CMake 3 version, and
|
||||
is needed because some of the dependencies used by this project do not support
|
||||
CMake 4.
|
||||
|
||||
#### Clang workaround for grpc
|
||||
|
||||
If your compiler is clang, version 19 or later, or apple-clang, version 17 or
|
||||
|
||||
@@ -975,6 +975,47 @@
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history.
|
||||
#
|
||||
# Optional keys for NuDB only:
|
||||
#
|
||||
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
|
||||
# Must be a power of 2 between 4096 and 32768. Default is 4096.
|
||||
#
|
||||
# This parameter controls the fundamental storage unit
|
||||
# size for NuDB's internal data structures. The choice
|
||||
# of block size can significantly impact performance
|
||||
# depending on your storage hardware and filesystem:
|
||||
#
|
||||
# - 4096 bytes: Optimal for most standard SSDs and
|
||||
# traditional filesystems (ext4, NTFS, HFS+).
|
||||
# Provides good balance of performance and storage
|
||||
# efficiency. Recommended for most deployments.
|
||||
# Minimizes memory footprint and provides consistent
|
||||
# low-latency access patterns across diverse hardware.
|
||||
#
|
||||
# - 8192-16384 bytes: May improve performance on
|
||||
# high-end NVMe SSDs and copy-on-write filesystems
|
||||
# like ZFS or Btrfs that benefit from larger block
|
||||
# alignment. Can reduce metadata overhead for large
|
||||
# databases. Offers better sequential throughput and
|
||||
# reduced I/O operations at the cost of higher memory
|
||||
# usage per operation.
|
||||
#
|
||||
# - 32768 bytes (32K): Maximum supported block size
|
||||
# for high-performance scenarios with very fast
|
||||
# storage. May increase memory usage and reduce
|
||||
# efficiency for smaller databases. Best suited for
|
||||
# enterprise environments with abundant RAM.
|
||||
#
|
||||
# Performance testing is recommended before deploying
|
||||
# any non-default block size in production environments.
|
||||
#
|
||||
# Note: This setting cannot be changed after database
|
||||
# creation without rebuilding the entire database.
|
||||
# Choose carefully based on your hardware and expected
|
||||
# database size.
|
||||
#
|
||||
# Example: nudb_block_size=4096
|
||||
#
|
||||
# These keys modify the behavior of online_delete, and thus are only
|
||||
# relevant if online_delete is defined and non-zero:
|
||||
#
|
||||
@@ -1471,6 +1512,7 @@ secure_gateway = 127.0.0.1
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/var/lib/rippled/db/nudb
|
||||
nudb_block_size=4096
|
||||
online_delete=512
|
||||
advisory_delete=0
|
||||
|
||||
|
||||
@@ -16,13 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
target_compile_definitions (common
|
||||
INTERFACE
|
||||
$<$<CONFIG:Debug>:DEBUG _DEBUG>
|
||||
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
|
||||
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
|
||||
# defined, so no need to add it explicitly except for
|
||||
# this special case of (profile ON) and (assert OFF)
|
||||
# -- presumably this is because we don't want profile
|
||||
# builds asserting unless asserts were specifically
|
||||
# requested
|
||||
#[===[
|
||||
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
|
||||
explicitly except for the special case of (profile ON) and (assert OFF).
|
||||
Presumably this is because we don't want profile builds asserting unless
|
||||
asserts were specifically requested.
|
||||
]===]
|
||||
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
|
||||
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
|
||||
OPENSSL_SUPPRESS_DEPRECATED
|
||||
)
|
||||
|
||||
if (MSVC)
|
||||
# remove existing exception flag since we set it to -EHa
|
||||
|
||||
@@ -38,7 +38,7 @@ install(CODE "
|
||||
set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\")
|
||||
include(create_symbolic_link)
|
||||
create_symbolic_link(xrpl \
|
||||
\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/ripple)
|
||||
\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/ripple)
|
||||
")
|
||||
|
||||
install (EXPORT RippleExports
|
||||
@@ -72,7 +72,7 @@ if (is_root_project AND TARGET rippled)
|
||||
set(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\")
|
||||
include(create_symbolic_link)
|
||||
create_symbolic_link(rippled${suffix} \
|
||||
\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix})
|
||||
\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}/xrpld${suffix})
|
||||
")
|
||||
endif ()
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ option(beast_no_unit_test_inline
|
||||
"Prevents unit test definitions from being inserted into global table"
|
||||
OFF)
|
||||
option(single_io_service_thread
|
||||
"Restricts the number of threads calling io_service::run to one. \
|
||||
"Restricts the number of threads calling io_context::run to one. \
|
||||
This can be useful when debugging."
|
||||
OFF)
|
||||
option(boost_show_deprecated
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF)
|
||||
option (validator_keys "Enables building of validator-keys tool as a separate target (imported via FetchContent)" OFF)
|
||||
|
||||
if (validator_keys)
|
||||
git_branch (current_branch)
|
||||
@@ -6,17 +6,15 @@ if (validator_keys)
|
||||
if (NOT (current_branch STREQUAL "release"))
|
||||
set (current_branch "master")
|
||||
endif ()
|
||||
message (STATUS "tracking ValidatorKeys branch: ${current_branch}")
|
||||
message (STATUS "Tracking ValidatorKeys branch: ${current_branch}")
|
||||
|
||||
FetchContent_Declare (
|
||||
validator_keys_src
|
||||
validator_keys
|
||||
GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
|
||||
GIT_TAG "${current_branch}"
|
||||
)
|
||||
FetchContent_GetProperties (validator_keys_src)
|
||||
if (NOT validator_keys_src_POPULATED)
|
||||
message (STATUS "Pausing to download ValidatorKeys...")
|
||||
FetchContent_Populate (validator_keys_src)
|
||||
endif ()
|
||||
add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys)
|
||||
FetchContent_MakeAvailable(validator_keys)
|
||||
set_target_properties(validator-keys PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
|
||||
install(TARGETS validator-keys RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
endif ()
|
||||
|
||||
@@ -24,6 +24,7 @@ target_link_libraries(ripple_boost
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::json
|
||||
Boost::process
|
||||
Boost::program_options
|
||||
Boost::regex
|
||||
Boost::system
|
||||
|
||||
@@ -7,7 +7,7 @@ function(xrpl_add_test name)
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/${name}.cpp"
|
||||
)
|
||||
add_executable(${target} EXCLUDE_FROM_ALL ${ARGN} ${sources})
|
||||
add_executable(${target} ${ARGN} ${sources})
|
||||
|
||||
isolate_headers(
|
||||
${target}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"rocksdb/10.0.1#85537f46e538974d67da0c3977de48ac%1756234304.347",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
|
||||
"openssl/3.5.4#a1d5835cc6ed5c5b8f3cd5b9b5d24205%1759746684.671",
|
||||
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
|
||||
@@ -21,7 +21,7 @@
|
||||
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
|
||||
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336",
|
||||
"boost/1.88.0#8852c0b72ce8271fb8ff7c53456d4983%1756223752.326",
|
||||
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
|
||||
],
|
||||
"build_requires": [
|
||||
@@ -46,7 +46,7 @@
|
||||
"lz4/1.10.0"
|
||||
],
|
||||
"boost/1.83.0": [
|
||||
"boost/1.83.0"
|
||||
"boost/1.88.0"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.49.1"
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# Global configuration for Conan. This is used to set the number of parallel
|
||||
# downloads, uploads, and build jobs. The verbosity is set to verbose to
|
||||
# provide more information during the build process.
|
||||
# downloads and uploads.
|
||||
core:non_interactive=True
|
||||
core.download:parallel={{ os.cpu_count() }}
|
||||
core.upload:parallel={{ os.cpu_count() }}
|
||||
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
|
||||
tools.build:verbosity=verbose
|
||||
tools.compilation:verbosity=verbose
|
||||
|
||||
@@ -21,17 +21,11 @@ compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
|
||||
|
||||
[conf]
|
||||
{% if compiler == "clang" and compiler_version >= 19 %}
|
||||
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
||||
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
|
||||
{% endif %}
|
||||
{% if compiler == "apple-clang" and compiler_version >= 17 %}
|
||||
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
||||
{% endif %}
|
||||
{% if compiler == "clang" and compiler_version == 16 %}
|
||||
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||
grpc/1.50.1:tools.build:cxxflags+=['-Wno-missing-template-arg-list-after-template-kw']
|
||||
{% endif %}
|
||||
{% if compiler == "gcc" and compiler_version < 13 %}
|
||||
tools.build:cxxflags=['-Wno-restrict']
|
||||
tools.build:cxxflags+=['-Wno-restrict']
|
||||
{% endif %}
|
||||
|
||||
[tool_requires]
|
||||
!cmake/*: cmake/[>=3 <4]
|
||||
|
||||
@@ -27,7 +27,7 @@ class Xrpl(ConanFile):
|
||||
'grpc/1.50.1',
|
||||
'libarchive/3.8.1',
|
||||
'nudb/2.0.9',
|
||||
'openssl/1.1.1w',
|
||||
'openssl/3.5.4',
|
||||
'soci/4.0.3',
|
||||
'zlib/1.3.1',
|
||||
]
|
||||
@@ -100,11 +100,13 @@ class Xrpl(ConanFile):
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
self.options['boost'].visibility = 'global'
|
||||
if self.settings.compiler in ['clang', 'gcc']:
|
||||
self.options['boost'].without_cobalt = True
|
||||
|
||||
def requirements(self):
|
||||
# Conan 2 requires transitive headers to be specified
|
||||
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
|
||||
self.requires('boost/1.83.0', force=True, **transitive_headers_opt)
|
||||
self.requires('boost/1.88.0', force=True, **transitive_headers_opt)
|
||||
self.requires('date/3.0.4', **transitive_headers_opt)
|
||||
self.requires('lz4/1.10.0', force=True)
|
||||
self.requires('protobuf/3.21.12', force=True)
|
||||
@@ -175,6 +177,7 @@ class Xrpl(ConanFile):
|
||||
'boost::filesystem',
|
||||
'boost::json',
|
||||
'boost::program_options',
|
||||
'boost::process',
|
||||
'boost::regex',
|
||||
'boost::system',
|
||||
'boost::thread',
|
||||
|
||||
@@ -654,12 +654,14 @@ SharedWeakUnion<T>::convertToWeak()
|
||||
break;
|
||||
case destroy:
|
||||
// We just added a weak ref. How could we destroy?
|
||||
// LCOV_EXCL_START
|
||||
UNREACHABLE(
|
||||
"ripple::SharedWeakUnion::convertToWeak : destroying freshly "
|
||||
"added ref");
|
||||
delete p;
|
||||
unsafeSetRawPtr(nullptr);
|
||||
return true; // Should never happen
|
||||
// LCOV_EXCL_STOP
|
||||
case partialDestroy:
|
||||
// This is a weird case. We just converted the last strong
|
||||
// pointer to a weak pointer.
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#include <xrpl/basics/Resolver.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -33,7 +33,7 @@ public:
|
||||
explicit ResolverAsio() = default;
|
||||
|
||||
static std::unique_ptr<ResolverAsio>
|
||||
New(boost::asio::io_service&, beast::Journal);
|
||||
New(boost::asio::io_context&, beast::Journal);
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -23,7 +23,8 @@
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <boost/asio/basic_waitable_timer.hpp>
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/post.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
@@ -32,7 +33,7 @@
|
||||
|
||||
namespace beast {
|
||||
|
||||
/** Measures handler latency on an io_service queue. */
|
||||
/** Measures handler latency on an io_context queue. */
|
||||
template <class Clock>
|
||||
class io_latency_probe
|
||||
{
|
||||
@@ -44,12 +45,12 @@ private:
|
||||
std::condition_variable_any m_cond;
|
||||
std::size_t m_count;
|
||||
duration const m_period;
|
||||
boost::asio::io_service& m_ios;
|
||||
boost::asio::io_context& m_ios;
|
||||
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
|
||||
bool m_cancel;
|
||||
|
||||
public:
|
||||
io_latency_probe(duration const& period, boost::asio::io_service& ios)
|
||||
io_latency_probe(duration const& period, boost::asio::io_context& ios)
|
||||
: m_count(1)
|
||||
, m_period(period)
|
||||
, m_ios(ios)
|
||||
@@ -64,16 +65,16 @@ public:
|
||||
cancel(lock, true);
|
||||
}
|
||||
|
||||
/** Return the io_service associated with the latency probe. */
|
||||
/** Return the io_context associated with the latency probe. */
|
||||
/** @{ */
|
||||
boost::asio::io_service&
|
||||
get_io_service()
|
||||
boost::asio::io_context&
|
||||
get_io_context()
|
||||
{
|
||||
return m_ios;
|
||||
}
|
||||
|
||||
boost::asio::io_service const&
|
||||
get_io_service() const
|
||||
boost::asio::io_context const&
|
||||
get_io_context() const
|
||||
{
|
||||
return m_ios;
|
||||
}
|
||||
@@ -109,8 +110,10 @@ public:
|
||||
std::lock_guard lock(m_mutex);
|
||||
if (m_cancel)
|
||||
throw std::logic_error("io_latency_probe is canceled");
|
||||
m_ios.post(sample_op<Handler>(
|
||||
std::forward<Handler>(handler), Clock::now(), false, this));
|
||||
boost::asio::post(
|
||||
m_ios,
|
||||
sample_op<Handler>(
|
||||
std::forward<Handler>(handler), Clock::now(), false, this));
|
||||
}
|
||||
|
||||
/** Initiate continuous i/o latency sampling.
|
||||
@@ -124,8 +127,10 @@ public:
|
||||
std::lock_guard lock(m_mutex);
|
||||
if (m_cancel)
|
||||
throw std::logic_error("io_latency_probe is canceled");
|
||||
m_ios.post(sample_op<Handler>(
|
||||
std::forward<Handler>(handler), Clock::now(), true, this));
|
||||
boost::asio::post(
|
||||
m_ios,
|
||||
sample_op<Handler>(
|
||||
std::forward<Handler>(handler), Clock::now(), true, this));
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -236,12 +241,13 @@ private:
|
||||
// The latency is too high to maintain the desired
|
||||
// period so don't bother with a timer.
|
||||
//
|
||||
m_probe->m_ios.post(
|
||||
boost::asio::post(
|
||||
m_probe->m_ios,
|
||||
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
||||
}
|
||||
else
|
||||
{
|
||||
m_probe->m_timer.expires_from_now(when - now);
|
||||
m_probe->m_timer.expires_after(when - now);
|
||||
m_probe->m_timer.async_wait(
|
||||
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
||||
}
|
||||
@@ -254,7 +260,8 @@ private:
|
||||
if (!m_probe)
|
||||
return;
|
||||
typename Clock::time_point const now(Clock::now());
|
||||
m_probe->m_ios.post(
|
||||
boost::asio::post(
|
||||
m_probe->m_ios,
|
||||
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -94,7 +94,11 @@ hash_append(Hasher& h, beast::IP::Address const& addr) noexcept
|
||||
else if (addr.is_v6())
|
||||
hash_append(h, addr.to_v6().to_bytes());
|
||||
else
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
UNREACHABLE("beast::hash_append : invalid address type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
} // namespace beast
|
||||
|
||||
|
||||
@@ -8,9 +8,11 @@
|
||||
#ifndef BEAST_TEST_YIELD_TO_HPP
|
||||
#define BEAST_TEST_YIELD_TO_HPP
|
||||
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
#include <boost/thread/csbl/memory/allocator_arg.hpp>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
@@ -29,10 +31,12 @@ namespace test {
|
||||
class enable_yield_to
|
||||
{
|
||||
protected:
|
||||
boost::asio::io_service ios_;
|
||||
boost::asio::io_context ios_;
|
||||
|
||||
private:
|
||||
boost::optional<boost::asio::io_service::work> work_;
|
||||
boost::optional<boost::asio::executor_work_guard<
|
||||
boost::asio::io_context::executor_type>>
|
||||
work_;
|
||||
std::vector<std::thread> threads_;
|
||||
std::mutex m_;
|
||||
std::condition_variable cv_;
|
||||
@@ -42,7 +46,8 @@ public:
|
||||
/// The type of yield context passed to functions.
|
||||
using yield_context = boost::asio::yield_context;
|
||||
|
||||
explicit enable_yield_to(std::size_t concurrency = 1) : work_(ios_)
|
||||
explicit enable_yield_to(std::size_t concurrency = 1)
|
||||
: work_(boost::asio::make_work_guard(ios_))
|
||||
{
|
||||
threads_.reserve(concurrency);
|
||||
while (concurrency--)
|
||||
@@ -56,9 +61,9 @@ public:
|
||||
t.join();
|
||||
}
|
||||
|
||||
/// Return the `io_service` associated with the object
|
||||
boost::asio::io_service&
|
||||
get_io_service()
|
||||
/// Return the `io_context` associated with the object
|
||||
boost::asio::io_context&
|
||||
get_io_context()
|
||||
{
|
||||
return ios_;
|
||||
}
|
||||
@@ -111,13 +116,18 @@ enable_yield_to::spawn(F0&& f, FN&&... fn)
|
||||
{
|
||||
boost::asio::spawn(
|
||||
ios_,
|
||||
boost::allocator_arg,
|
||||
boost::context::fixedsize_stack(2 * 1024 * 1024),
|
||||
[&](yield_context yield) {
|
||||
f(yield);
|
||||
std::lock_guard lock{m_};
|
||||
if (--running_ == 0)
|
||||
cv_.notify_all();
|
||||
},
|
||||
boost::coroutines::attributes(2 * 1024 * 1024));
|
||||
[](std::exception_ptr e) {
|
||||
if (e)
|
||||
std::rethrow_exception(e);
|
||||
});
|
||||
spawn(fn...);
|
||||
}
|
||||
|
||||
|
||||
@@ -217,7 +217,7 @@ Reader::parse(Value& root, BufferSequence const& bs)
|
||||
std::string s;
|
||||
s.reserve(buffer_size(bs));
|
||||
for (auto const& b : bs)
|
||||
s.append(buffer_cast<char const*>(b), buffer_size(b));
|
||||
s.append(static_cast<char const*>(b.data()), buffer_size(b));
|
||||
return parse(s, root);
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ public:
|
||||
* without formatting (not human friendly).
|
||||
*
|
||||
* The JSON document is written in a single line. It is not intended for 'human'
|
||||
* consumption, but may be useful to support feature such as RPC where bandwith
|
||||
* consumption, but may be useful to support feature such as RPC where bandwidth
|
||||
* is limited. \sa Reader, Value
|
||||
*/
|
||||
|
||||
|
||||
@@ -284,12 +284,14 @@ public:
|
||||
{
|
||||
if (key.type != ltOFFER)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
UNREACHABLE(
|
||||
"ripple::ApplyView::dirAppend : only Offers are appended to "
|
||||
"book directories");
|
||||
// Only Offers are appended to book directories. Call dirInsert()
|
||||
// instead
|
||||
return std::nullopt;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
return dirAdd(true, directory, key.key, describe);
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ public:
|
||||
|
||||
public:
|
||||
AutoSocket(
|
||||
boost::asio::io_service& s,
|
||||
boost::asio::io_context& s,
|
||||
boost::asio::ssl::context& c,
|
||||
bool secureOnly,
|
||||
bool plainOnly)
|
||||
@@ -58,7 +58,7 @@ public:
|
||||
mSocket = std::make_unique<ssl_socket>(s, c);
|
||||
}
|
||||
|
||||
AutoSocket(boost::asio::io_service& s, boost::asio::ssl::context& c)
|
||||
AutoSocket(boost::asio::io_context& s, boost::asio::ssl::context& c)
|
||||
: AutoSocket(s, c, false, false)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#include <xrpl/basics/ByteUtilities.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/streambuf.hpp>
|
||||
|
||||
#include <chrono>
|
||||
@@ -51,7 +51,7 @@ public:
|
||||
|
||||
static void
|
||||
get(bool bSSL,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::io_context& io_context,
|
||||
std::deque<std::string> deqSites,
|
||||
unsigned short const port,
|
||||
std::string const& strPath,
|
||||
@@ -65,7 +65,7 @@ public:
|
||||
|
||||
static void
|
||||
get(bool bSSL,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::io_context& io_context,
|
||||
std::string strSite,
|
||||
unsigned short const port,
|
||||
std::string const& strPath,
|
||||
@@ -80,7 +80,7 @@ public:
|
||||
static void
|
||||
request(
|
||||
bool bSSL,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::io_context& io_context,
|
||||
std::string strSite,
|
||||
unsigned short const port,
|
||||
std::function<
|
||||
|
||||
@@ -153,7 +153,7 @@ public:
|
||||
{
|
||||
strm.set_verify_callback(
|
||||
std::bind(
|
||||
&rfc2818_verify,
|
||||
&rfc6125_verify,
|
||||
host,
|
||||
std::placeholders::_1,
|
||||
std::placeholders::_2,
|
||||
@@ -167,7 +167,7 @@ public:
|
||||
|
||||
/**
|
||||
* @brief callback invoked for name verification - just passes through
|
||||
* to the asio rfc2818 implementation.
|
||||
* to the asio `host_name_verification` (rfc6125) implementation.
|
||||
*
|
||||
* @param domain hostname expected
|
||||
* @param preverified passed by implementation
|
||||
@@ -175,13 +175,13 @@ public:
|
||||
* @param j journal for logging
|
||||
*/
|
||||
static bool
|
||||
rfc2818_verify(
|
||||
rfc6125_verify(
|
||||
std::string const& domain,
|
||||
bool preverified,
|
||||
boost::asio::ssl::verify_context& ctx,
|
||||
beast::Journal j)
|
||||
{
|
||||
if (boost::asio::ssl::rfc2818_verification(domain)(preverified, ctx))
|
||||
if (boost::asio::ssl::host_name_verification(domain)(preverified, ctx))
|
||||
return true;
|
||||
|
||||
JLOG(j.warn()) << "Outbound SSL connection to " << domain
|
||||
|
||||
@@ -55,7 +55,10 @@ std::size_t constexpr oversizeMetaDataCap = 5200;
|
||||
/** The maximum number of entries per directory page */
|
||||
std::size_t constexpr dirNodeMaxEntries = 32;
|
||||
|
||||
/** The maximum number of pages allowed in a directory */
|
||||
/** The maximum number of pages allowed in a directory
|
||||
|
||||
Made obsolete by fixDirectoryLimit amendment.
|
||||
*/
|
||||
std::uint64_t constexpr dirNodeMaxPages = 262144;
|
||||
|
||||
/** The maximum number of items in an NFT page */
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#define RIPPLE_PROTOCOL_PUBLICKEY_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/beast/net/IPEndpoint.h>
|
||||
#include <xrpl/protocol/KeyType.h>
|
||||
#include <xrpl/protocol/STExchange.h>
|
||||
#include <xrpl/protocol/UintTypes.h>
|
||||
@@ -264,6 +265,24 @@ calcNodeID(PublicKey const&);
|
||||
AccountID
|
||||
calcAccountID(PublicKey const& pk);
|
||||
|
||||
inline std::string
|
||||
getFingerprint(
|
||||
beast::IP::Endpoint const& address,
|
||||
std::optional<PublicKey> const& publicKey = std::nullopt,
|
||||
std::optional<std::string> const& id = std::nullopt)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << "IP Address: " << address;
|
||||
if (publicKey.has_value())
|
||||
{
|
||||
ss << ", Public Key: " << toBase58(TokenType::NodePublic, *publicKey);
|
||||
}
|
||||
if (id.has_value())
|
||||
{
|
||||
ss << ", Id: " << id.value();
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
} // namespace ripple
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -709,37 +709,6 @@ canAdd(STAmount const& amt1, STAmount const& amt2);
|
||||
bool
|
||||
canSubtract(STAmount const& amt1, STAmount const& amt2);
|
||||
|
||||
// Since `canonicalize` does not have access to a ledger, this is needed to put
|
||||
// the low-level routine stAmountCanonicalize on an amendment switch. Only
|
||||
// transactions need to use this switchover. Outside of a transaction it's safe
|
||||
// to unconditionally use the new behavior.
|
||||
|
||||
bool
|
||||
getSTAmountCanonicalizeSwitchover();
|
||||
|
||||
void
|
||||
setSTAmountCanonicalizeSwitchover(bool v);
|
||||
|
||||
/** RAII class to set and restore the STAmount canonicalize switchover.
|
||||
*/
|
||||
|
||||
class STAmountSO
|
||||
{
|
||||
public:
|
||||
explicit STAmountSO(bool v) : saved_(getSTAmountCanonicalizeSwitchover())
|
||||
{
|
||||
setSTAmountCanonicalizeSwitchover(v);
|
||||
}
|
||||
|
||||
~STAmountSO()
|
||||
{
|
||||
setSTAmountCanonicalizeSwitchover(saved_);
|
||||
}
|
||||
|
||||
private:
|
||||
bool saved_;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -244,6 +244,9 @@ public:
|
||||
getFieldPathSet(SField const& field) const;
|
||||
STVector256 const&
|
||||
getFieldV256(SField const& field) const;
|
||||
// If not found, returns an object constructed with the given field
|
||||
STObject
|
||||
getFieldObject(SField const& field) const;
|
||||
STArray const&
|
||||
getFieldArray(SField const& field) const;
|
||||
STCurrency const&
|
||||
@@ -390,6 +393,8 @@ public:
|
||||
setFieldV256(SField const& field, STVector256 const& v);
|
||||
void
|
||||
setFieldArray(SField const& field, STArray const& v);
|
||||
void
|
||||
setFieldObject(SField const& field, STObject const& v);
|
||||
|
||||
template <class Tag>
|
||||
void
|
||||
|
||||
@@ -87,8 +87,14 @@ public:
|
||||
getFullText() const override;
|
||||
|
||||
// Outer transaction functions / signature functions.
|
||||
static Blob
|
||||
getSignature(STObject const& sigObject);
|
||||
|
||||
Blob
|
||||
getSignature() const;
|
||||
getSignature() const
|
||||
{
|
||||
return getSignature(*this);
|
||||
}
|
||||
|
||||
uint256
|
||||
getSigningHash() const;
|
||||
@@ -119,13 +125,20 @@ public:
|
||||
getJson(JsonOptions options, bool binary) const;
|
||||
|
||||
void
|
||||
sign(PublicKey const& publicKey, SecretKey const& secretKey);
|
||||
sign(
|
||||
PublicKey const& publicKey,
|
||||
SecretKey const& secretKey,
|
||||
std::optional<std::reference_wrapper<SField const>> signatureTarget =
|
||||
{});
|
||||
|
||||
/** Check the signature.
|
||||
@return `true` if valid signature. If invalid, the error message string.
|
||||
*/
|
||||
enum class RequireFullyCanonicalSig : bool { no, yes };
|
||||
|
||||
/** Check the signature.
|
||||
@param requireCanonicalSig If `true`, check that the signature is fully
|
||||
canonical. If `false`, only check that the signature is valid.
|
||||
@param rules The current ledger rules.
|
||||
@return `true` if valid signature. If invalid, the error message string.
|
||||
*/
|
||||
Expected<void, std::string>
|
||||
checkSign(RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules)
|
||||
const;
|
||||
@@ -150,17 +163,34 @@ public:
|
||||
char status,
|
||||
std::string const& escapedMetaData) const;
|
||||
|
||||
std::vector<uint256>
|
||||
std::vector<uint256> const&
|
||||
getBatchTransactionIDs() const;
|
||||
|
||||
private:
|
||||
/** Check the signature.
|
||||
@param requireCanonicalSig If `true`, check that the signature is fully
|
||||
canonical. If `false`, only check that the signature is valid.
|
||||
@param rules The current ledger rules.
|
||||
@param sigObject Reference to object that contains the signature fields.
|
||||
Will be *this more often than not.
|
||||
@return `true` if valid signature. If invalid, the error message string.
|
||||
*/
|
||||
Expected<void, std::string>
|
||||
checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const;
|
||||
checkSign(
|
||||
RequireFullyCanonicalSig requireCanonicalSig,
|
||||
Rules const& rules,
|
||||
STObject const& sigObject) const;
|
||||
|
||||
Expected<void, std::string>
|
||||
checkSingleSign(
|
||||
RequireFullyCanonicalSig requireCanonicalSig,
|
||||
STObject const& sigObject) const;
|
||||
|
||||
Expected<void, std::string>
|
||||
checkMultiSign(
|
||||
RequireFullyCanonicalSig requireCanonicalSig,
|
||||
Rules const& rules) const;
|
||||
Rules const& rules,
|
||||
STObject const& sigObject) const;
|
||||
|
||||
Expected<void, std::string>
|
||||
checkBatchSingleSign(
|
||||
@@ -179,7 +209,7 @@ private:
|
||||
move(std::size_t n, void* buf) override;
|
||||
|
||||
friend class detail::STVar;
|
||||
mutable std::vector<uint256> batch_txn_ids_;
|
||||
mutable std::vector<uint256> batchTxnIds_;
|
||||
};
|
||||
|
||||
bool
|
||||
|
||||
@@ -129,10 +129,12 @@ inplace_bigint_div_rem(std::span<uint64_t> numerator, std::uint64_t divisor)
|
||||
{
|
||||
// should never happen, but if it does then it seems natural to define
|
||||
// the a null set of numbers to be zero, so the remainder is also zero.
|
||||
// LCOV_EXCL_START
|
||||
UNREACHABLE(
|
||||
"ripple::b58_fast::detail::inplace_bigint_div_rem : empty "
|
||||
"numerator");
|
||||
return 0;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
auto to_u128 = [](std::uint64_t high,
|
||||
|
||||
@@ -29,15 +29,14 @@
|
||||
|
||||
// Add new amendments to the top of this list.
|
||||
// Keep it sorted in reverse chronological order.
|
||||
// If you add an amendment here, then do not forget to increment `numFeatures`
|
||||
// in include/xrpl/protocol/Feature.h.
|
||||
|
||||
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(DynamicMPT, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (TokenEscrowV1, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (DelegateV1_1, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (PriceOracleOrder, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (MPTDeliveredAmount, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
|
||||
@@ -91,33 +90,19 @@ XRPL_FIX (TrustLinesToSelf, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FEATURE(NonFungibleTokensV1_1, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(ExpandedSignerList, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(CheckCashMakesTrustLine, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (RmSmallIncreasedQOffers, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (STAmountCanonicalize, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(TicketBatch, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(NegativeUNL, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (AmendmentMajorityCalc, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(HardenedValidations, Supported::yes, VoteBehavior::DefaultYes)
|
||||
// fix1781: XRPEndpointSteps should be included in the circular payment check
|
||||
XRPL_FIX (1781, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (QualityUpperBound, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (PayChanRecipientOwnerDir, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (CheckThreading, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (MasterKeyAsRegularKey, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (TakerDryOfferRemoval, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(MultiSignReserve, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1578, Supported::yes, VoteBehavior::DefaultYes)
|
||||
// fix1515: Use liquidity from strands that consume max offers, but mark as dry
|
||||
XRPL_FIX (1515, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(DepositPreauth, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1623, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1543, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1571, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(DepositAuth, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (1513, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes)
|
||||
|
||||
// The following amendments are obsolete, but must remain supported
|
||||
@@ -138,21 +123,35 @@ XRPL_FEATURE(CryptoConditionsSuite, Supported::yes, VoteBehavior::Obsolete)
|
||||
|
||||
// The following amendments have been active for at least two years. Their
|
||||
// pre-amendment code has been removed and the identifiers are deprecated.
|
||||
// All known amendments and amendments that may appear in a validated
|
||||
// ledger must be registered either here or above with the "active" amendments
|
||||
XRPL_RETIRE(MultiSign)
|
||||
XRPL_RETIRE(TrustSetAuth)
|
||||
XRPL_RETIRE(FeeEscalation)
|
||||
XRPL_RETIRE(PayChan)
|
||||
XRPL_RETIRE(CryptoConditions)
|
||||
XRPL_RETIRE(TickSize)
|
||||
XRPL_RETIRE(fix1368)
|
||||
XRPL_RETIRE(Escrow)
|
||||
XRPL_RETIRE(fix1373)
|
||||
XRPL_RETIRE(EnforceInvariants)
|
||||
XRPL_RETIRE(SortedDirectories)
|
||||
// All known amendments and amendments that may appear in a validated ledger
|
||||
// must be registered either here or above with the "active" amendments
|
||||
//
|
||||
// Please keep this list sorted alphabetically for convenience.
|
||||
XRPL_RETIRE(fix1201)
|
||||
XRPL_RETIRE(fix1368)
|
||||
XRPL_RETIRE(fix1373)
|
||||
XRPL_RETIRE(fix1512)
|
||||
XRPL_RETIRE(fix1513)
|
||||
XRPL_RETIRE(fix1515)
|
||||
XRPL_RETIRE(fix1523)
|
||||
XRPL_RETIRE(fix1528)
|
||||
XRPL_RETIRE(fix1543)
|
||||
XRPL_RETIRE(fix1571)
|
||||
XRPL_RETIRE(fix1578)
|
||||
XRPL_RETIRE(fix1623)
|
||||
XRPL_RETIRE(fix1781)
|
||||
XRPL_RETIRE(fixCheckThreading)
|
||||
XRPL_RETIRE(fixQualityUpperBound)
|
||||
XRPL_RETIRE(fixRmSmallIncreasedQOffers)
|
||||
XRPL_RETIRE(fixSTAmountCanonicalize)
|
||||
XRPL_RETIRE(fixTakerDryOfferRemoval)
|
||||
XRPL_RETIRE(CryptoConditions)
|
||||
XRPL_RETIRE(Escrow)
|
||||
XRPL_RETIRE(EnforceInvariants)
|
||||
XRPL_RETIRE(FeeEscalation)
|
||||
XRPL_RETIRE(FlowCross)
|
||||
XRPL_RETIRE(MultiSign)
|
||||
XRPL_RETIRE(PayChan)
|
||||
XRPL_RETIRE(SortedDirectories)
|
||||
XRPL_RETIRE(TickSize)
|
||||
XRPL_RETIRE(TrustSetAuth)
|
||||
|
||||
@@ -457,7 +457,7 @@ LEDGER_ENTRY(ltCREDENTIAL, 0x0081, Credential, credential, ({
|
||||
{sfExpiration, soeOPTIONAL},
|
||||
{sfURI, soeOPTIONAL},
|
||||
{sfIssuerNode, soeREQUIRED},
|
||||
{sfSubjectNode, soeREQUIRED},
|
||||
{sfSubjectNode, soeOPTIONAL},
|
||||
{sfPreviousTxnID, soeREQUIRED},
|
||||
{sfPreviousTxnLgrSeq, soeREQUIRED},
|
||||
}))
|
||||
|
||||
@@ -851,7 +851,7 @@ TRANSACTION(ttDELEGATE_SET, 64, DelegateSet,
|
||||
TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
|
||||
Delegation::delegatable,
|
||||
featureSingleAssetVault,
|
||||
createPseudoAcct | createMPTIssuance,
|
||||
createPseudoAcct | createMPTIssuance | mustModifyVault,
|
||||
({
|
||||
{sfAsset, soeREQUIRED, soeMPTSupported},
|
||||
{sfAssetsMaximum, soeOPTIONAL},
|
||||
@@ -869,7 +869,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate,
|
||||
TRANSACTION(ttVAULT_SET, 66, VaultSet,
|
||||
Delegation::delegatable,
|
||||
featureSingleAssetVault,
|
||||
noPriv,
|
||||
mustModifyVault,
|
||||
({
|
||||
{sfVaultID, soeREQUIRED},
|
||||
{sfAssetsMaximum, soeOPTIONAL},
|
||||
@@ -884,7 +884,7 @@ TRANSACTION(ttVAULT_SET, 66, VaultSet,
|
||||
TRANSACTION(ttVAULT_DELETE, 67, VaultDelete,
|
||||
Delegation::delegatable,
|
||||
featureSingleAssetVault,
|
||||
mustDeleteAcct | destroyMPTIssuance,
|
||||
mustDeleteAcct | destroyMPTIssuance | mustModifyVault,
|
||||
({
|
||||
{sfVaultID, soeREQUIRED},
|
||||
}))
|
||||
@@ -896,7 +896,7 @@ TRANSACTION(ttVAULT_DELETE, 67, VaultDelete,
|
||||
TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
|
||||
Delegation::delegatable,
|
||||
featureSingleAssetVault,
|
||||
mayAuthorizeMPT,
|
||||
mayAuthorizeMPT | mustModifyVault,
|
||||
({
|
||||
{sfVaultID, soeREQUIRED},
|
||||
{sfAmount, soeREQUIRED, soeMPTSupported},
|
||||
@@ -909,7 +909,7 @@ TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
|
||||
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
|
||||
Delegation::delegatable,
|
||||
featureSingleAssetVault,
|
||||
mayDeleteMPT,
|
||||
mayDeleteMPT | mayAuthorizeMPT | mustModifyVault,
|
||||
({
|
||||
{sfVaultID, soeREQUIRED},
|
||||
{sfAmount, soeREQUIRED, soeMPTSupported},
|
||||
@@ -924,7 +924,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
|
||||
TRANSACTION(ttVAULT_CLAWBACK, 70, VaultClawback,
|
||||
Delegation::delegatable,
|
||||
featureSingleAssetVault,
|
||||
mayDeleteMPT,
|
||||
mayDeleteMPT | mustModifyVault,
|
||||
({
|
||||
{sfVaultID, soeREQUIRED},
|
||||
{sfHolder, soeREQUIRED},
|
||||
|
||||
@@ -569,6 +569,7 @@ JSS(settle_delay); // out: AccountChannels
|
||||
JSS(severity); // in: LogLevel
|
||||
JSS(shares); // out: VaultInfo
|
||||
JSS(signature); // out: NetworkOPs, ChannelAuthorize
|
||||
JSS(signature_target); // in: TransactionSign
|
||||
JSS(signature_verified); // out: ChannelVerify
|
||||
JSS(signing_key); // out: NetworkOPs
|
||||
JSS(signing_keys); // out: ValidatorList
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#define RIPPLE_RESOURCE_CONSUMER_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/protocol/PublicKey.h>
|
||||
#include <xrpl/resource/Charge.h>
|
||||
#include <xrpl/resource/Disposition.h>
|
||||
|
||||
@@ -87,6 +88,9 @@ public:
|
||||
Entry&
|
||||
entry();
|
||||
|
||||
void
|
||||
setPublicKey(PublicKey const& publicKey);
|
||||
|
||||
private:
|
||||
Logic* m_logic;
|
||||
Entry* m_entry;
|
||||
|
||||
@@ -53,7 +53,7 @@ struct Entry : public beast::List<Entry>::Node
|
||||
std::string
|
||||
to_string() const
|
||||
{
|
||||
return key->address.to_string();
|
||||
return getFingerprint(key->address, publicKey);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -82,6 +82,9 @@ struct Entry : public beast::List<Entry>::Node
|
||||
return local_balance.add(charge, now) + remote_balance;
|
||||
}
|
||||
|
||||
// The public key of the peer
|
||||
std::optional<PublicKey> publicKey;
|
||||
|
||||
// Back pointer to the map key (bit of a hack here)
|
||||
Key const* key;
|
||||
|
||||
|
||||
@@ -436,10 +436,12 @@ public:
|
||||
admin_.erase(admin_.iterator_to(entry));
|
||||
break;
|
||||
default:
|
||||
// LCOV_EXCL_START
|
||||
UNREACHABLE(
|
||||
"ripple::Resource::Logic::release : invalid entry "
|
||||
"kind");
|
||||
break;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
inactive_.push_back(entry);
|
||||
entry.whenExpires = m_clock.now() + secondsUntilExpiration;
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
#include <xrpl/server/Port.h>
|
||||
#include <xrpl/server/detail/ServerImpl.h>
|
||||
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -34,10 +34,10 @@ template <class Handler>
|
||||
std::unique_ptr<Server>
|
||||
make_Server(
|
||||
Handler& handler,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::io_context& io_context,
|
||||
beast::Journal journal)
|
||||
{
|
||||
return std::make_unique<ServerImpl<Handler>>(handler, io_service, journal);
|
||||
return std::make_unique<ServerImpl<Handler>>(handler, io_context, journal);
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -88,9 +88,7 @@ public:
|
||||
++iter)
|
||||
{
|
||||
typename BufferSequence::value_type const& buffer(*iter);
|
||||
write(
|
||||
boost::asio::buffer_cast<void const*>(buffer),
|
||||
boost::asio::buffer_size(buffer));
|
||||
write(buffer.data(), boost::asio::buffer_size(buffer));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,7 +102,7 @@ public:
|
||||
|
||||
/** Detach the session.
|
||||
This holds the session open so that the response can be sent
|
||||
asynchronously. Calls to io_service::run made by the server
|
||||
asynchronously. Calls to io_context::run made by the server
|
||||
will not return until all detached sessions are closed.
|
||||
*/
|
||||
virtual std::shared_ptr<Session>
|
||||
|
||||
@@ -24,11 +24,13 @@
|
||||
#include <xrpl/beast/net/IPAddressConversion.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
#include <xrpl/server/Session.h>
|
||||
#include <xrpl/server/detail/Spawn.h>
|
||||
#include <xrpl/server/detail/io_list.h>
|
||||
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/ssl/stream.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/asio/streambuf.hpp>
|
||||
#include <boost/beast/core/stream_traits.hpp>
|
||||
#include <boost/beast/http/dynamic_body.hpp>
|
||||
@@ -215,8 +217,8 @@ BaseHTTPPeer<Handler, Impl>::BaseHTTPPeer(
|
||||
ConstBufferSequence const& buffers)
|
||||
: port_(port)
|
||||
, handler_(handler)
|
||||
, work_(executor)
|
||||
, strand_(executor)
|
||||
, work_(boost::asio::make_work_guard(executor))
|
||||
, strand_(boost::asio::make_strand(executor))
|
||||
, remote_address_(remote_address)
|
||||
, journal_(journal)
|
||||
{
|
||||
@@ -356,7 +358,7 @@ BaseHTTPPeer<Handler, Impl>::on_write(
|
||||
return;
|
||||
if (graceful_)
|
||||
return do_close();
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
strand_,
|
||||
std::bind(
|
||||
&BaseHTTPPeer<Handler, Impl>::do_read,
|
||||
@@ -375,7 +377,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
|
||||
{
|
||||
auto const p = impl().shared_from_this();
|
||||
resume = std::function<void(void)>([this, p, writer, keep_alive]() {
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
strand_,
|
||||
std::bind(
|
||||
&BaseHTTPPeer<Handler, Impl>::do_writer,
|
||||
@@ -406,7 +408,7 @@ BaseHTTPPeer<Handler, Impl>::do_writer(
|
||||
if (!keep_alive)
|
||||
return do_close();
|
||||
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
strand_,
|
||||
std::bind(
|
||||
&BaseHTTPPeer<Handler, Impl>::do_read,
|
||||
@@ -448,14 +450,14 @@ BaseHTTPPeer<Handler, Impl>::write(
|
||||
std::shared_ptr<Writer> const& writer,
|
||||
bool keep_alive)
|
||||
{
|
||||
boost::asio::spawn(bind_executor(
|
||||
util::spawn(
|
||||
strand_,
|
||||
std::bind(
|
||||
&BaseHTTPPeer<Handler, Impl>::do_writer,
|
||||
impl().shared_from_this(),
|
||||
writer,
|
||||
keep_alive,
|
||||
std::placeholders::_1)));
|
||||
std::placeholders::_1));
|
||||
}
|
||||
|
||||
// DEPRECATED
|
||||
@@ -490,12 +492,12 @@ BaseHTTPPeer<Handler, Impl>::complete()
|
||||
}
|
||||
|
||||
// keep-alive
|
||||
boost::asio::spawn(bind_executor(
|
||||
util::spawn(
|
||||
strand_,
|
||||
std::bind(
|
||||
&BaseHTTPPeer<Handler, Impl>::do_read,
|
||||
impl().shared_from_this(),
|
||||
std::placeholders::_1)));
|
||||
std::placeholders::_1));
|
||||
}
|
||||
|
||||
// DEPRECATED
|
||||
|
||||
@@ -91,8 +91,8 @@ BasePeer<Handler, Impl>::BasePeer(
|
||||
return "##" + std::to_string(++id) + " ";
|
||||
}())
|
||||
, j_(sink_)
|
||||
, work_(executor)
|
||||
, strand_(executor)
|
||||
, work_(boost::asio::make_work_guard(executor))
|
||||
, strand_(boost::asio::make_strand(executor))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <xrpl/server/detail/BasePeer.h>
|
||||
#include <xrpl/server/detail/LowestLayer.h>
|
||||
|
||||
#include <boost/asio/error.hpp>
|
||||
#include <boost/beast/core/multi_buffer.hpp>
|
||||
#include <boost/beast/http/message.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
@@ -420,11 +421,17 @@ BaseWSPeer<Handler, Impl>::start_timer()
|
||||
// Max seconds without completing a message
|
||||
static constexpr std::chrono::seconds timeout{30};
|
||||
static constexpr std::chrono::seconds timeoutLocal{3};
|
||||
error_code ec;
|
||||
timer_.expires_from_now(
|
||||
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout, ec);
|
||||
if (ec)
|
||||
return fail(ec, "start_timer");
|
||||
|
||||
try
|
||||
{
|
||||
timer_.expires_after(
|
||||
remote_endpoint().address().is_loopback() ? timeoutLocal : timeout);
|
||||
}
|
||||
catch (boost::system::system_error const& e)
|
||||
{
|
||||
return fail(e.code(), "start_timer");
|
||||
}
|
||||
|
||||
timer_.async_wait(bind_executor(
|
||||
strand_,
|
||||
std::bind(
|
||||
@@ -438,8 +445,14 @@ template <class Handler, class Impl>
|
||||
void
|
||||
BaseWSPeer<Handler, Impl>::cancel_timer()
|
||||
{
|
||||
error_code ec;
|
||||
timer_.cancel(ec);
|
||||
try
|
||||
{
|
||||
timer_.cancel();
|
||||
}
|
||||
catch (boost::system::system_error const&)
|
||||
{
|
||||
// ignored
|
||||
}
|
||||
}
|
||||
|
||||
template <class Handler, class Impl>
|
||||
|
||||
@@ -30,15 +30,29 @@
|
||||
#include <boost/asio/buffer.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/post.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <boost/beast/core/detect_ssl.hpp>
|
||||
#include <boost/beast/core/multi_buffer.hpp>
|
||||
#include <boost/beast/core/tcp_stream.hpp>
|
||||
#include <boost/container/flat_map.hpp>
|
||||
#include <boost/predef.h>
|
||||
|
||||
#if !BOOST_OS_WINDOWS
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <dirent.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -69,7 +83,7 @@ private:
|
||||
stream_type stream_;
|
||||
socket_type& socket_;
|
||||
endpoint_type remote_address_;
|
||||
boost::asio::io_context::strand strand_;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
beast::Journal const j_;
|
||||
|
||||
public:
|
||||
@@ -95,13 +109,30 @@ private:
|
||||
Handler& handler_;
|
||||
boost::asio::io_context& ioc_;
|
||||
acceptor_type acceptor_;
|
||||
boost::asio::io_context::strand strand_;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
bool ssl_;
|
||||
bool plain_;
|
||||
static constexpr std::chrono::milliseconds INITIAL_ACCEPT_DELAY{50};
|
||||
static constexpr std::chrono::milliseconds MAX_ACCEPT_DELAY{2000};
|
||||
std::chrono::milliseconds accept_delay_{INITIAL_ACCEPT_DELAY};
|
||||
boost::asio::steady_timer backoff_timer_;
|
||||
static constexpr double FREE_FD_THRESHOLD = 0.70;
|
||||
|
||||
struct FDStats
|
||||
{
|
||||
std::uint64_t used{0};
|
||||
std::uint64_t limit{0};
|
||||
};
|
||||
|
||||
void
|
||||
reOpen();
|
||||
|
||||
std::optional<FDStats>
|
||||
query_fd_stats() const;
|
||||
|
||||
bool
|
||||
should_throttle_for_fds();
|
||||
|
||||
public:
|
||||
Door(
|
||||
Handler& handler,
|
||||
@@ -155,7 +186,7 @@ Door<Handler>::Detector::Detector(
|
||||
, stream_(std::move(stream))
|
||||
, socket_(stream_.socket())
|
||||
, remote_address_(remote_address)
|
||||
, strand_(ioc_)
|
||||
, strand_(boost::asio::make_strand(ioc_))
|
||||
, j_(j)
|
||||
{
|
||||
}
|
||||
@@ -164,7 +195,7 @@ template <class Handler>
|
||||
void
|
||||
Door<Handler>::Detector::run()
|
||||
{
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
strand_,
|
||||
std::bind(
|
||||
&Detector::do_detect,
|
||||
@@ -269,7 +300,7 @@ Door<Handler>::reOpen()
|
||||
Throw<std::exception>();
|
||||
}
|
||||
|
||||
acceptor_.listen(boost::asio::socket_base::max_connections, ec);
|
||||
acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec);
|
||||
if (ec)
|
||||
{
|
||||
JLOG(j_.error()) << "Listen on port '" << port_.name
|
||||
@@ -291,7 +322,7 @@ Door<Handler>::Door(
|
||||
, handler_(handler)
|
||||
, ioc_(io_context)
|
||||
, acceptor_(io_context)
|
||||
, strand_(io_context)
|
||||
, strand_(boost::asio::make_strand(io_context))
|
||||
, ssl_(
|
||||
port_.protocol.count("https") > 0 ||
|
||||
port_.protocol.count("wss") > 0 || port_.protocol.count("wss2") > 0 ||
|
||||
@@ -299,6 +330,7 @@ Door<Handler>::Door(
|
||||
, plain_(
|
||||
port_.protocol.count("http") > 0 || port_.protocol.count("ws") > 0 ||
|
||||
port_.protocol.count("ws2"))
|
||||
, backoff_timer_(io_context)
|
||||
{
|
||||
reOpen();
|
||||
}
|
||||
@@ -307,7 +339,7 @@ template <class Handler>
|
||||
void
|
||||
Door<Handler>::run()
|
||||
{
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
strand_,
|
||||
std::bind(
|
||||
&Door<Handler>::do_accept,
|
||||
@@ -320,8 +352,10 @@ void
|
||||
Door<Handler>::close()
|
||||
{
|
||||
if (!strand_.running_in_this_thread())
|
||||
return strand_.post(
|
||||
return boost::asio::post(
|
||||
strand_,
|
||||
std::bind(&Door<Handler>::close, this->shared_from_this()));
|
||||
backoff_timer_.cancel();
|
||||
error_code ec;
|
||||
acceptor_.close(ec);
|
||||
}
|
||||
@@ -367,6 +401,17 @@ Door<Handler>::do_accept(boost::asio::yield_context do_yield)
|
||||
{
|
||||
while (acceptor_.is_open())
|
||||
{
|
||||
if (should_throttle_for_fds())
|
||||
{
|
||||
backoff_timer_.expires_after(accept_delay_);
|
||||
boost::system::error_code tec;
|
||||
backoff_timer_.async_wait(do_yield[tec]);
|
||||
accept_delay_ = std::min(accept_delay_ * 2, MAX_ACCEPT_DELAY);
|
||||
JLOG(j_.warn()) << "Throttling do_accept for "
|
||||
<< accept_delay_.count() << "ms.";
|
||||
continue;
|
||||
}
|
||||
|
||||
error_code ec;
|
||||
endpoint_type remote_address;
|
||||
stream_type stream(ioc_);
|
||||
@@ -376,15 +421,28 @@ Door<Handler>::do_accept(boost::asio::yield_context do_yield)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
break;
|
||||
JLOG(j_.error()) << "accept: " << ec.message();
|
||||
if (ec == boost::asio::error::no_descriptors)
|
||||
|
||||
if (ec == boost::asio::error::no_descriptors ||
|
||||
ec == boost::asio::error::no_buffer_space)
|
||||
{
|
||||
JLOG(j_.info()) << "re-opening acceptor";
|
||||
reOpen();
|
||||
JLOG(j_.warn()) << "accept: Too many open files. Pausing for "
|
||||
<< accept_delay_.count() << "ms.";
|
||||
|
||||
backoff_timer_.expires_after(accept_delay_);
|
||||
boost::system::error_code tec;
|
||||
backoff_timer_.async_wait(do_yield[tec]);
|
||||
|
||||
accept_delay_ = std::min(accept_delay_ * 2, MAX_ACCEPT_DELAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(j_.error()) << "accept error: " << ec.message();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
accept_delay_ = INITIAL_ACCEPT_DELAY;
|
||||
|
||||
if (ssl_ && plain_)
|
||||
{
|
||||
if (auto sp = ios().template emplace<Detector>(
|
||||
@@ -407,6 +465,60 @@ Door<Handler>::do_accept(boost::asio::yield_context do_yield)
|
||||
}
|
||||
}
|
||||
|
||||
template <class Handler>
|
||||
std::optional<typename Door<Handler>::FDStats>
|
||||
Door<Handler>::query_fd_stats() const
|
||||
{
|
||||
#if BOOST_OS_WINDOWS
|
||||
return std::nullopt;
|
||||
#else
|
||||
FDStats s;
|
||||
struct rlimit rl;
|
||||
if (getrlimit(RLIMIT_NOFILE, &rl) != 0 || rl.rlim_cur == RLIM_INFINITY)
|
||||
return std::nullopt;
|
||||
s.limit = static_cast<std::uint64_t>(rl.rlim_cur);
|
||||
#if BOOST_OS_LINUX
|
||||
constexpr char const* kFdDir = "/proc/self/fd";
|
||||
#else
|
||||
constexpr char const* kFdDir = "/dev/fd";
|
||||
#endif
|
||||
if (DIR* d = ::opendir(kFdDir))
|
||||
{
|
||||
std::uint64_t cnt = 0;
|
||||
while (::readdir(d) != nullptr)
|
||||
++cnt;
|
||||
::closedir(d);
|
||||
// readdir counts '.', '..', and the DIR* itself shows in the list
|
||||
s.used = (cnt >= 3) ? (cnt - 3) : 0;
|
||||
return s;
|
||||
}
|
||||
return std::nullopt;
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class Handler>
|
||||
bool
|
||||
Door<Handler>::should_throttle_for_fds()
|
||||
{
|
||||
#if BOOST_OS_WINDOWS
|
||||
return false;
|
||||
#else
|
||||
auto const stats = query_fd_stats();
|
||||
if (!stats || stats->limit == 0)
|
||||
return false;
|
||||
|
||||
auto const& s = *stats;
|
||||
auto const free = (s.limit > s.used) ? (s.limit - s.used) : 0ull;
|
||||
double const free_ratio =
|
||||
static_cast<double>(free) / static_cast<double>(s.limit);
|
||||
if (free_ratio < FREE_FD_THRESHOLD)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -105,7 +105,7 @@ PlainHTTPPeer<Handler>::run()
|
||||
{
|
||||
if (!this->handler_.onAccept(this->session(), this->remote_address_))
|
||||
{
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
this->strand_,
|
||||
std::bind(&PlainHTTPPeer::do_close, this->shared_from_this()));
|
||||
return;
|
||||
@@ -114,7 +114,7 @@ PlainHTTPPeer<Handler>::run()
|
||||
if (!socket_.is_open())
|
||||
return;
|
||||
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
this->strand_,
|
||||
std::bind(
|
||||
&PlainHTTPPeer::do_read,
|
||||
|
||||
@@ -115,14 +115,14 @@ SSLHTTPPeer<Handler>::run()
|
||||
{
|
||||
if (!this->handler_.onAccept(this->session(), this->remote_address_))
|
||||
{
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
this->strand_,
|
||||
std::bind(&SSLHTTPPeer::do_close, this->shared_from_this()));
|
||||
return;
|
||||
}
|
||||
if (!socket_.is_open())
|
||||
return;
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
this->strand_,
|
||||
std::bind(
|
||||
&SSLHTTPPeer::do_handshake,
|
||||
@@ -164,7 +164,7 @@ SSLHTTPPeer<Handler>::do_handshake(yield_context do_yield)
|
||||
this->port().protocol.count("https") > 0;
|
||||
if (http)
|
||||
{
|
||||
boost::asio::spawn(
|
||||
util::spawn(
|
||||
this->strand_,
|
||||
std::bind(
|
||||
&SSLHTTPPeer::do_read,
|
||||
|
||||
@@ -26,6 +26,8 @@
|
||||
#include <xrpl/server/detail/io_list.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
|
||||
#include <array>
|
||||
#include <chrono>
|
||||
@@ -85,9 +87,11 @@ private:
|
||||
|
||||
Handler& handler_;
|
||||
beast::Journal const j_;
|
||||
boost::asio::io_service& io_service_;
|
||||
boost::asio::io_service::strand strand_;
|
||||
std::optional<boost::asio::io_service::work> work_;
|
||||
boost::asio::io_context& io_context_;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
std::optional<boost::asio::executor_work_guard<
|
||||
boost::asio::io_context::executor_type>>
|
||||
work_;
|
||||
|
||||
std::mutex m_;
|
||||
std::vector<Port> ports_;
|
||||
@@ -100,7 +104,7 @@ private:
|
||||
public:
|
||||
ServerImpl(
|
||||
Handler& handler,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::io_context& io_context,
|
||||
beast::Journal journal);
|
||||
|
||||
~ServerImpl();
|
||||
@@ -123,10 +127,10 @@ public:
|
||||
return ios_;
|
||||
}
|
||||
|
||||
boost::asio::io_service&
|
||||
get_io_service()
|
||||
boost::asio::io_context&
|
||||
get_io_context()
|
||||
{
|
||||
return io_service_;
|
||||
return io_context_;
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -140,13 +144,13 @@ private:
|
||||
template <class Handler>
|
||||
ServerImpl<Handler>::ServerImpl(
|
||||
Handler& handler,
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::io_context& io_context,
|
||||
beast::Journal journal)
|
||||
: handler_(handler)
|
||||
, j_(journal)
|
||||
, io_service_(io_service)
|
||||
, strand_(io_service_)
|
||||
, work_(io_service_)
|
||||
, io_context_(io_context)
|
||||
, strand_(boost::asio::make_strand(io_context_))
|
||||
, work_(std::in_place, boost::asio::make_work_guard(io_context_))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -173,7 +177,7 @@ ServerImpl<Handler>::ports(std::vector<Port> const& ports)
|
||||
ports_.push_back(port);
|
||||
auto& internalPort = ports_.back();
|
||||
if (auto sp = ios_.emplace<Door<Handler>>(
|
||||
handler_, io_service_, internalPort, j_))
|
||||
handler_, io_context_, internalPort, j_))
|
||||
{
|
||||
list_.push_back(sp);
|
||||
|
||||
|
||||
108
include/xrpl/server/detail/Spawn.h
Normal file
108
include/xrpl/server/detail/Spawn.h
Normal file
@@ -0,0 +1,108 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright(c) 2025 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_SERVER_SPAWN_H_INCLUDED
|
||||
#define RIPPLE_SERVER_SPAWN_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
|
||||
#include <concepts>
|
||||
#include <type_traits>
|
||||
|
||||
namespace ripple::util {
|
||||
namespace impl {
|
||||
|
||||
template <typename T>
|
||||
concept IsStrand = std::same_as<
|
||||
std::decay_t<T>,
|
||||
boost::asio::strand<typename std::decay_t<T>::inner_executor_type>>;
|
||||
|
||||
/**
|
||||
* @brief A completion handler that restores `boost::asio::spawn`'s behaviour
|
||||
* from Boost 1.83
|
||||
*
|
||||
* This is intended to be passed as the third argument to `boost::asio::spawn`
|
||||
* so that exceptions are not ignored but propagated to `io_context.run()` call
|
||||
* site.
|
||||
*
|
||||
* @param ePtr The exception that was caught on the coroutine
|
||||
*/
|
||||
inline constexpr auto kPROPAGATE_EXCEPTIONS = [](std::exception_ptr ePtr) {
|
||||
if (ePtr)
|
||||
{
|
||||
try
|
||||
{
|
||||
std::rethrow_exception(ePtr);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(debugLog().warn()) << "Spawn exception: " << e.what();
|
||||
throw;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
JLOG(debugLog().warn()) << "Spawn exception: Unknown";
|
||||
throw;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
/**
|
||||
* @brief Spawns a coroutine using `boost::asio::spawn`
|
||||
*
|
||||
* @note This uses kPROPAGATE_EXCEPTIONS to force asio to propagate exceptions
|
||||
* through `io_context`
|
||||
* @note Since implicit strand was removed from boost::asio::spawn this helper
|
||||
* function adds the strand back
|
||||
*
|
||||
* @tparam Ctx The type of the context/strand
|
||||
* @tparam F The type of the function to execute
|
||||
* @param ctx The execution context
|
||||
* @param func The function to execute. Must return `void`
|
||||
*/
|
||||
template <typename Ctx, typename F>
|
||||
requires std::is_invocable_r_v<void, F, boost::asio::yield_context>
|
||||
void
|
||||
spawn(Ctx&& ctx, F&& func)
|
||||
{
|
||||
if constexpr (impl::IsStrand<Ctx>)
|
||||
{
|
||||
boost::asio::spawn(
|
||||
std::forward<Ctx>(ctx),
|
||||
std::forward<F>(func),
|
||||
impl::kPROPAGATE_EXCEPTIONS);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::asio::spawn(
|
||||
boost::asio::make_strand(
|
||||
boost::asio::get_associated_executor(std::forward<Ctx>(ctx))),
|
||||
std::forward<F>(func),
|
||||
impl::kPROPAGATE_EXCEPTIONS);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ripple::util
|
||||
|
||||
#endif
|
||||
@@ -166,7 +166,7 @@ public:
|
||||
May be called concurrently.
|
||||
|
||||
Preconditions:
|
||||
No call to io_service::run on any io_service
|
||||
No call to io_context::run on any io_context
|
||||
used by work objects associated with this io_list
|
||||
exists in the caller's call stack.
|
||||
*/
|
||||
|
||||
167
pkgs/build.sh
Executable file
167
pkgs/build.sh
Executable file
@@ -0,0 +1,167 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o xtrace
|
||||
|
||||
. /etc/os-release
|
||||
|
||||
case "$ID $ID_LIKE" in
|
||||
*rhel*|*fedora*)
|
||||
pkg="rpm"
|
||||
;;
|
||||
*debian*|*ubuntu*)
|
||||
pkg="deb"
|
||||
;;
|
||||
esac
|
||||
|
||||
repo_dir=$PWD
|
||||
set -a
|
||||
repo_name="rippled"
|
||||
pkgs_dir="${repo_dir}/pkgs"
|
||||
shared_files="${pkgs_dir}/shared"
|
||||
pkg_files="${pkgs_dir}/${pkg}"
|
||||
build_info_src="${repo_dir}/src/libxrpl/protocol/BuildInfo.cpp"
|
||||
xrpl_version=$(grep -E -i -o "\b(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-[0-9a-z\-]+(\.[0-9a-z\-]+)*)?(\+[0-9a-z\-]+(\.[0-9a-z\-]+)*)?\b" "${build_info_src}")
|
||||
|
||||
git config --global --add safe.directory '*'
|
||||
branch=$(git rev-parse --abbrev-ref HEAD)
|
||||
commit=$(git rev-parse HEAD)
|
||||
short_commit=$(git rev-parse --short=7 HEAD)
|
||||
date=$(git show -s --format=%cd --date=format-local:"%Y%m%d%H%M%S")
|
||||
|
||||
BUILD_TYPE=${BUILD_TYPE:-Release}
|
||||
set +a
|
||||
if [ "${branch}" = 'develop' ]; then
|
||||
# TODO: Can remove when CMake sets version
|
||||
dev_version="${date}~${short_commit}"
|
||||
xrpl_version="${xrpl_version}+${dev_version}"
|
||||
fi
|
||||
|
||||
if [ "${pkg}" = 'rpm' ]; then
|
||||
IFS='-' read -r RIPPLED_RPM_VERSION RELEASE <<< "${xrpl_version}"
|
||||
export RIPPLED_RPM_VERSION
|
||||
RPM_RELEASE=${RPM_RELEASE-1}
|
||||
# post-release version
|
||||
if [ "hf" = "$(echo "$RELEASE" | cut -c -2)" ]; then
|
||||
RPM_RELEASE="${RPM_RELEASE}.${RELEASE}"
|
||||
# pre-release version (-b or -rc)
|
||||
elif [[ $RELEASE ]]; then
|
||||
RPM_RELEASE="0.${RPM_RELEASE}.${RELEASE}"
|
||||
fi
|
||||
export RPM_RELEASE
|
||||
|
||||
if [[ $RPM_PATCH ]]; then
|
||||
RPM_PATCH=".${RPM_PATCH}"
|
||||
export RPM_PATCH
|
||||
fi
|
||||
|
||||
build_dir="build/${pkg}/packages"
|
||||
rm -rf "${build_dir}"
|
||||
mkdir -p "${build_dir}/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}"
|
||||
# cp "${pkgs_dir}/rippled.patch" "${build_dir}/rpmbuild/SOURCES/"
|
||||
git archive \
|
||||
--remote "${repo_dir}" HEAD \
|
||||
--prefix ${repo_name}/ \
|
||||
--format tar.gz \
|
||||
--output "${build_dir}/rpmbuild/SOURCES/rippled.tar.gz"
|
||||
ln --symbolic "${repo_dir}" "${build_dir}/rippled"
|
||||
cp -r "${pkg_files}/rippled.spec" "${build_dir}"
|
||||
pushd "${build_dir}" || exit
|
||||
|
||||
rpmbuild \
|
||||
--define "_topdir ${PWD}/rpmbuild" \
|
||||
--define "_smp_build_ncpus %(nproc --ignore=2 2>/dev/null || echo 1)" \
|
||||
-ba rippled.spec
|
||||
|
||||
RPM_VERSION_RELEASE=$(rpm -qp --qf='%{NAME}-%{VERSION}-%{RELEASE}' ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm)
|
||||
tar_file=$RPM_VERSION_RELEASE.tar.gz
|
||||
|
||||
printf '%s\n' \
|
||||
"rpm_md5sum=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm 2>/dev/null)" \
|
||||
"rpm_sha256=$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm | awk '{ print $1 }')" \
|
||||
"rippled_version=${xrpl_version}" \
|
||||
"rippled_git_hash=${commit}" \
|
||||
"rpm_version=${RIPPLED_RPM_VERSION}" \
|
||||
"rpm_file_name=${tar_file}" \
|
||||
"rpm_version_release=${RPM_VERSION_RELEASE}" \
|
||||
> build_vars
|
||||
|
||||
# Rename the files arch to match the debs
|
||||
mv rpmbuild/RPMS/x86_64/* .
|
||||
for f in *x86_64.rpm; do
|
||||
new="${f/x86_64/amd64}"
|
||||
mv "$f" "$new"
|
||||
echo "Renamed $f -> $new"
|
||||
done
|
||||
rm -rf rpmbuild
|
||||
rm -f rippled rippled.tar.gz rippled.spec
|
||||
pushd -0 && dirs -c
|
||||
|
||||
mv "${build_dir}/build_vars" .
|
||||
|
||||
elif [ "${pkg}" = 'deb' ]; then
|
||||
# dpkg_version=$(echo "${xrpl_version}" | sed 's:-:~:g')
|
||||
dpkg_version=${xrpl_version//-/\~}
|
||||
full_version="${dpkg_version}"
|
||||
build_dir="build/${pkg}/packages"
|
||||
rm -rf "${build_dir}"
|
||||
mkdir -p "${build_dir}"
|
||||
|
||||
git archive \
|
||||
--remote "${repo_dir}" HEAD \
|
||||
--prefix ${repo_name}/ \
|
||||
--format tar.gz \
|
||||
--output "${build_dir}/${repo_name}_${dpkg_version}.orig.tar.gz"
|
||||
|
||||
pushd "${build_dir}" || exit
|
||||
tar -zxf "${repo_name}_${dpkg_version}.orig.tar.gz"
|
||||
|
||||
pushd ${repo_name} || exit
|
||||
|
||||
# Prepare the package metadata directory, `debian/`, within `rippled/`.
|
||||
cp -r "${pkg_files}/debian" .
|
||||
cp "${shared_files}/rippled.service" debian/
|
||||
cp "${shared_files}/update-rippled.sh" .
|
||||
cp "${shared_files}/update-rippled-cron" .
|
||||
cp "${shared_files}/rippled-logrotate" .
|
||||
|
||||
if [ "${branch}" = 'develop' ]; then
|
||||
# TODO: Can remove when CMake sets version
|
||||
sed --in-place "s/versionString = \"\([^\"]*\)\"/versionString = \"${xrpl_version}\"/" "${build_info_src}"
|
||||
fi
|
||||
|
||||
cat << CHANGELOG > ./debian/changelog
|
||||
rippled (${xrpl_version}) unstable; urgency=low
|
||||
|
||||
* see RELEASENOTES
|
||||
|
||||
-- Ripple Labs Inc. <support@ripple.com> $(TZ=UTC date -R)
|
||||
CHANGELOG
|
||||
|
||||
cat ./debian/changelog
|
||||
dpkg-buildpackage -b -d -us -uc
|
||||
|
||||
popd || exit
|
||||
rm -rf ${repo_name}
|
||||
# for f in *.ddeb; do mv -- "$f" "${f%.ddeb}.deb"; done
|
||||
popd || exit
|
||||
cp "${build_dir}/${repo_name}_${xrpl_version}_amd64.changes" .
|
||||
|
||||
awk '/Checksums-Sha256:/{hit=1;next}/Files:/{hit=0}hit' "${repo_name}_${xrpl_version}_amd64.changes" | sed -E 's!^[[:space:]]+!!' > shasums
|
||||
sha() {
|
||||
<shasums awk "/$1/ { print \$1 }"
|
||||
}
|
||||
|
||||
printf '%s\n' \
|
||||
"deb_sha256=$(sha "rippled_${full_version}_amd64.deb")" \
|
||||
"dbg_sha256=$(sha "rippled-dbgsym_${full_version}_amd64.deb")" \
|
||||
"rippled_version=${xrpl_version}" \
|
||||
"rippled_git_hash=${commit}" \
|
||||
"dpkg_version=${dpkg_version}" \
|
||||
"dpkg_full_version=${full_version}" \
|
||||
> build_vars
|
||||
|
||||
pushd -0 && dirs -c
|
||||
fi
|
||||
|
||||
cp "${build_dir}/"*.$pkg .
|
||||
3
pkgs/deb/debian/README.Debian
Normal file
3
pkgs/deb/debian/README.Debian
Normal file
@@ -0,0 +1,3 @@
|
||||
rippled daemon
|
||||
|
||||
-- Mike Ellery <mellery451@gmail.com> Tue, 04 Dec 2018 18:19:03 +0000
|
||||
20
pkgs/deb/debian/control
Normal file
20
pkgs/deb/debian/control
Normal file
@@ -0,0 +1,20 @@
|
||||
Source: rippled
|
||||
Section: net
|
||||
Priority: optional
|
||||
Maintainer: Ripple Labs Inc. <support@ripple.com>
|
||||
Build-Depends: cmake,
|
||||
debhelper (>= 13),
|
||||
debhelper-compat (= 13)
|
||||
# debhelper (>= 14),
|
||||
# debhelper-compat (= 14),
|
||||
# TODO: Let's go for 14!
|
||||
Standards-Version: 4.6.0
|
||||
Homepage: https://github.com/XRPLF/rippled.git
|
||||
Rules-Requires-Root: no
|
||||
|
||||
Package: rippled
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Description: XRP Ledger server (rippled)
|
||||
rippled is the core server of the XRP Ledger, providing a peer-to-peer
|
||||
network node for validating and processing transactions.
|
||||
86
pkgs/deb/debian/copyright
Normal file
86
pkgs/deb/debian/copyright
Normal file
@@ -0,0 +1,86 @@
|
||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: rippled
|
||||
Source: https://github.com/ripple/rippled
|
||||
|
||||
Files: *
|
||||
Copyright: 2012-2019 Ripple Labs Inc.
|
||||
|
||||
License: __UNKNOWN__
|
||||
|
||||
The accompanying files under various copyrights.
|
||||
|
||||
Copyright (c) 2012, 2013, 2014 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
The accompanying files incorporate work covered by the following copyright
|
||||
and previous license notice:
|
||||
|
||||
Copyright (c) 2011 Arthur Britto, David Schwartz, Jed McCaleb,
|
||||
Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant
|
||||
|
||||
Some code from Raw Material Software, Ltd., provided under the terms of the
|
||||
ISC License. See the corresponding source files for more details.
|
||||
Copyright (c) 2013 - Raw Material Software Ltd.
|
||||
Please visit http://www.juce.com
|
||||
|
||||
Some code from ASIO examples:
|
||||
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
Some code from Bitcoin:
|
||||
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
||||
// Copyright (c) 2011 The Bitcoin developers
|
||||
// Distributed under the MIT/X11 software license, see the accompanying
|
||||
// file license.txt or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
Some code from Tom Wu:
|
||||
This software is covered under the following copyright:
|
||||
|
||||
/*
|
||||
* Copyright (c) 2003-2005 Tom Wu
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
|
||||
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
|
||||
* INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER
|
||||
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF
|
||||
* THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT
|
||||
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*
|
||||
* In addition, the following condition applies:
|
||||
*
|
||||
* All redistributions must retain an intact copy of this copyright notice
|
||||
* and disclaimer.
|
||||
*/
|
||||
|
||||
Address all questions regarding this license to:
|
||||
|
||||
Tom Wu
|
||||
tjw@cs.Stanford.EDU
|
||||
3
pkgs/deb/debian/dirs
Normal file
3
pkgs/deb/debian/dirs
Normal file
@@ -0,0 +1,3 @@
|
||||
/var/log/rippled/
|
||||
/var/lib/rippled/
|
||||
/etc/systemd/system/rippled.service.d/
|
||||
2
pkgs/deb/debian/docs
Normal file
2
pkgs/deb/debian/docs
Normal file
@@ -0,0 +1,2 @@
|
||||
README.md
|
||||
LICENSE.md
|
||||
3
pkgs/deb/debian/rippled-dev.install
Normal file
3
pkgs/deb/debian/rippled-dev.install
Normal file
@@ -0,0 +1,3 @@
|
||||
opt/ripple/include
|
||||
opt/ripple/lib/*.a
|
||||
opt/ripple/lib/cmake/*
|
||||
2
pkgs/deb/debian/rippled.conffiles
Normal file
2
pkgs/deb/debian/rippled.conffiles
Normal file
@@ -0,0 +1,2 @@
|
||||
/opt/ripple/etc/rippled.cfg
|
||||
/opt/ripple/etc/validators.txt
|
||||
10
pkgs/deb/debian/rippled.install
Normal file
10
pkgs/deb/debian/rippled.install
Normal file
@@ -0,0 +1,10 @@
|
||||
etc/logrotate.d/rippled
|
||||
opt/ripple/bin/rippled
|
||||
opt/ripple/bin/update-rippled.sh
|
||||
opt/ripple/bin/validator-keys
|
||||
opt/ripple/bin/xrpld
|
||||
opt/ripple/etc/rippled.cfg
|
||||
opt/ripple/etc/update-rippled-cron
|
||||
opt/ripple/etc/validators.txt
|
||||
opt/ripple/include/*
|
||||
opt/ripple/lib/*
|
||||
4
pkgs/deb/debian/rippled.links
Normal file
4
pkgs/deb/debian/rippled.links
Normal file
@@ -0,0 +1,4 @@
|
||||
opt/ripple/etc/rippled.cfg etc/opt/ripple/rippled.cfg
|
||||
opt/ripple/etc/validators.txt etc/opt/ripple/validators.txt
|
||||
opt/ripple/bin/rippled usr/local/bin/rippled
|
||||
opt/ripple/bin/rippled opt/ripple/bin/xrpld
|
||||
39
pkgs/deb/debian/rippled.postinst
Normal file
39
pkgs/deb/debian/rippled.postinst
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
USER_NAME=rippled
|
||||
GROUP_NAME=rippled
|
||||
case "$1" in
|
||||
configure)
|
||||
id -u $USER_NAME >/dev/null 2>&1 || \
|
||||
useradd --system \
|
||||
--home-dir /nonexistent \
|
||||
--no-create-home \
|
||||
--shell /usr/sbin/nologin \
|
||||
--comment "system user for rippled" \
|
||||
--user-group \
|
||||
${USER_NAME}
|
||||
|
||||
chown -R $USER_NAME:$GROUP_NAME /var/log/rippled/
|
||||
chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled/
|
||||
chown -R $USER_NAME:$GROUP_NAME /opt/ripple
|
||||
chmod 755 /var/log/rippled/
|
||||
chmod 755 /var/lib/rippled/
|
||||
chmod 644 /opt/ripple/etc/update-rippled-cron
|
||||
chmod 644 /etc/logrotate.d/rippled
|
||||
chown -R root:$GROUP_NAME /opt/ripple/etc/update-rippled-cron
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
17
pkgs/deb/debian/rippled.postrm
Normal file
17
pkgs/deb/debian/rippled.postrm
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
case "$1" in
|
||||
purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postrm called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
20
pkgs/deb/debian/rippled.preinst
Normal file
20
pkgs/deb/debian/rippled.preinst
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
case "$1" in
|
||||
install|upgrade)
|
||||
;;
|
||||
|
||||
abort-upgrade)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "preinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
20
pkgs/deb/debian/rippled.prerm
Normal file
20
pkgs/deb/debian/rippled.prerm
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
case "$1" in
|
||||
remove|upgrade|deconfigure)
|
||||
;;
|
||||
|
||||
failed-upgrade)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "prerm called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
exit 0
|
||||
77
pkgs/deb/debian/rules
Executable file
77
pkgs/deb/debian/rules
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/make -f
|
||||
export DH_VERBOSE = 1
|
||||
export DH_OPTIONS = -v
|
||||
## TODO: Confirm these are still required.
|
||||
# debuild sets some warnings that don't work well
|
||||
# for our curent build..so try to remove those flags here:
|
||||
export CFLAGS:=$(subst -Wformat,,$(CFLAGS))
|
||||
export CFLAGS:=$(subst -Werror=format-security,,$(CFLAGS))
|
||||
export CXXFLAGS:=$(subst -Wformat,,$(CXXFLAGS))
|
||||
export CXXFLAGS:=$(subst -Werror=format-security,,$(CXXFLAGS))
|
||||
|
||||
## TODO: Confirm these are still required.
|
||||
export DEB_BUILD_MAINT_OPTIONS = hardening=+all
|
||||
export DEB_BUILD_OPTIONS = nodwz
|
||||
|
||||
export RIPPLE_REMOTE = xrplf
|
||||
export RIPPLE_REMOTE_URL = https://conan.ripplex.io
|
||||
|
||||
# ## CMake Configure args
|
||||
# export DEB_CMAKE_GENERATOR = Ninja
|
||||
export DEB_CMAKE_BUILD_TYPE = Release
|
||||
|
||||
NPROC := $(shell nproc --ignore=2)
|
||||
export DEB_BUILD_OPTIONS += parallel=$(NPROC)
|
||||
|
||||
CONAN_HOME := $(shell conan config home)
|
||||
CONAN_PROFILE := $(shell conan profile path default)
|
||||
CONAN_GCONF := $(CONAN_HOME)/global.conf
|
||||
INSTALL_PREFIX := "/opt/ripple"
|
||||
BUILD_DIR := obj-$(DEB_BUILD_GNU_TYPE)
|
||||
|
||||
.ONESHELL:
|
||||
SHELL := /bin/bash
|
||||
|
||||
%:
|
||||
dh $@ --buildsystem=cmake
|
||||
override_dh_installsystemd:
|
||||
dh_installsystemd --no-start
|
||||
|
||||
override_dh_auto_configure:
|
||||
conan remote add --index 0 $(RIPPLE_REMOTE) $(RIPPLE_REMOTE_URL) --force
|
||||
conan config install ./conan/profiles/default --target-folder $(CONAN_HOME)/profiles
|
||||
echo "tools.build:jobs={{ os.cpu_count() - 2 }}" >> ${CONAN_HOME}/global.conf
|
||||
echo "core.download:parallel={{ os.cpu_count() }}" >> $(CONAN_GCONF)
|
||||
|
||||
conan install . \
|
||||
--settings:all build_type=$(DEB_CMAKE_BUILD_TYPE) \
|
||||
--output-folder=$(BUILD_DIR) \
|
||||
--options:host "&:xrpld=True" \
|
||||
--options:host "&:tests=True" \
|
||||
--build=missing
|
||||
|
||||
# Debian assumes an offline build process and sets CMake's FETCHCONTENT_FULLY_DISCONNECTED variable to ON
|
||||
# To use as much as the default settings as possible we'll clone it where CMake's FetchContent expects it.
|
||||
mkdir -p "$(BUILD_DIR)/_deps"
|
||||
git clone https://github.com/ripple/validator-keys-tool.git "$(BUILD_DIR)/_deps/validator_keys-src"
|
||||
|
||||
dh_auto_configure --builddirectory=$(BUILD_DIR) -- \
|
||||
-DCMAKE_BUILD_TYPE:STRING=$(DEB_CMAKE_BUILD_TYPE) \
|
||||
-Dxrpld=ON -Dtests=ON -Dvalidator_keys=ON \
|
||||
-DCMAKE_INSTALL_PREFIX:PATH=$(INSTALL_PREFIX) \
|
||||
-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=$(BUILD_DIR)/build/generators/conan_toolchain.cmake
|
||||
|
||||
override_dh_auto_build:
|
||||
dh_auto_build \
|
||||
--builddirectory=$(BUILD_DIR) -- rippled validator-keys
|
||||
|
||||
override_dh_auto_install:
|
||||
cmake --install $(BUILD_DIR) --prefix debian/tmp/opt/ripple
|
||||
install -D update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh
|
||||
install -D update-rippled-cron debian/tmp/opt/ripple/etc/update-rippled-cron
|
||||
install -D rippled-logrotate debian/tmp/etc/logrotate.d/rippled
|
||||
rm -rf debian/tmp/opt/ripple/lib64/cmake/date
|
||||
|
||||
override_dh_dwz:
|
||||
@echo "Skipping DWZ due to huge debug info"
|
||||
1
pkgs/deb/debian/source/format
Normal file
1
pkgs/deb/debian/source/format
Normal file
@@ -0,0 +1 @@
|
||||
3.0 (native)
|
||||
2
pkgs/deb/debian/source/local-options
Normal file
2
pkgs/deb/debian/source/local-options
Normal file
@@ -0,0 +1,2 @@
|
||||
#abort-on-upstream-changes
|
||||
#unapply-patches
|
||||
1
pkgs/rpm/50-rippled.preset
Normal file
1
pkgs/rpm/50-rippled.preset
Normal file
@@ -0,0 +1 @@
|
||||
enable rippled.service
|
||||
171
pkgs/rpm/rippled.spec
Normal file
171
pkgs/rpm/rippled.spec
Normal file
@@ -0,0 +1,171 @@
|
||||
%global pkg_name %{getenv:repo_name}
|
||||
%global branch %{getenv:branch}
|
||||
%global commit %{getenv:commit}
|
||||
%global shortcommit %{getenv:shortcommit}
|
||||
%global date %{getenv:commit_date}
|
||||
%global conan_remote_name %{getenv:conan_remote_name}
|
||||
%global conan_remote_url %{getenv:conan_remote_url}
|
||||
%global shared_files %{getenv:shared_files}
|
||||
%global pkg_files %{getenv:pkg_files}
|
||||
%global build_type %{getenv:BUILD_TYPE}
|
||||
|
||||
%global _prefix /opt/ripple
|
||||
%global srcdir %{_builddir}/rippled
|
||||
%global blddir %{srcdir}/bld.rippled
|
||||
|
||||
%global xrpl_version %{getenv:xrpl_version}
|
||||
%global ver_base %(v=%{xrpl_version}; echo ${v%%-*})
|
||||
%global _has_dash %(v=%{xrpl_version}; [ "${v#*-}" != "$v" ] && echo 1 || echo 0)
|
||||
%if 0%{?_has_dash}
|
||||
%global ver_suffix %(v=%{xrpl_version}; printf %s "${v#*-}")
|
||||
%endif
|
||||
|
||||
Name: %{pkg_name}
|
||||
Version: %{ver_base}
|
||||
Release: %{?ver_suffix:0.%{ver_suffix}}%{!?ver_suffix:1}%{?dist}
|
||||
Summary: %{name} XRPL daemon
|
||||
|
||||
License: ISC
|
||||
URL: https://github.com/XRPLF/rippled
|
||||
Source0: rippled.tar.gz
|
||||
Patch0: rippled.patch
|
||||
%{warn:name=%{name}}
|
||||
%{warn:version=%{version}}
|
||||
%{warn:ver_base=%{ver_base}}
|
||||
%{warn:ver_suffix=%{ver_suffix}}
|
||||
%{warn:release=%{release}}
|
||||
%{warn:FullReleaseVersion=%{name}-%{version}-%{release}.%{_arch}.rpm}
|
||||
|
||||
%description
|
||||
%{name} with p2p server for the XRP Ledger.
|
||||
|
||||
%prep
|
||||
%autosetup -p1 -n %{name}
|
||||
|
||||
# TODO: Remove when version set with CMake.
|
||||
if [ %{branch} == 'develop' ]; then
|
||||
sed --in-place "s/versionString = \"\([^\"]*\)\"/versionString = \"\1+%{ver_input}\"/" src/libxrpl/protocol/BuildInfo.cpp
|
||||
fi
|
||||
|
||||
%build
|
||||
conan remote add --index 0 %{conan_remote_name} %{conan_remote_url} --force
|
||||
conan config install conan/profiles/default --target-folder $(conan config home)/profiles/
|
||||
echo "tools.build:jobs={{ os.cpu_count() - 2 }}" >> ${CONAN_HOME}/global.conf
|
||||
echo "core.download:parallel={{ os.cpu_count() }}" >> ${CONAN_HOME}/global.conf
|
||||
|
||||
conan install %{srcdir} \
|
||||
--settings:all build_type=%{build_type} \
|
||||
--output-folder %{srcdir}/conan_deps \
|
||||
--options:host "&:xrpld=True" \
|
||||
--options:host "&:tests=True" \
|
||||
--build=missing
|
||||
|
||||
cmake \
|
||||
-S %{srcdir} \
|
||||
-B %{blddir} \
|
||||
-Dxrpld=ON \
|
||||
-Dvalidator_keys=ON \
|
||||
-Dtests=ON \
|
||||
-DCMAKE_BUILD_TYPE:STRING=%{build_type} \
|
||||
-DCMAKE_INSTALL_PREFIX:PATH=%{_prefix} \
|
||||
-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=%{srcdir}/conan_deps/build/generators/conan_toolchain.cmake
|
||||
|
||||
cmake \
|
||||
--build %{blddir} \
|
||||
--parallel %{_smp_build_ncpus} \
|
||||
--target rippled \
|
||||
--target validator-keys
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
DESTDIR=%{buildroot} cmake --install %{blddir}
|
||||
|
||||
install -Dm0755 %{shared_files}/update-rippled.sh %{buildroot}%{_bindir}/update-rippled.sh
|
||||
ln -s rippled %{buildroot}%{_bindir}/xrpld
|
||||
ln -s update-rippled.sh %{buildroot}%{_bindir}/update-xrpld.sh
|
||||
|
||||
# configs
|
||||
install -Dm0644 %{srcdir}/cfg/rippled-example.cfg %{buildroot}%{_prefix}/etc/rippled.cfg
|
||||
install -Dm0644 %{srcdir}/cfg/validators-example.txt %{buildroot}%{_prefix}/etc/validators.txt
|
||||
mkdir -p %{buildroot}%{_sysconfdir}/opt/ripple
|
||||
|
||||
#/etc points to /opt
|
||||
ln -s ../../../opt/ripple/rippled.cfg %{buildroot}%{_sysconfdir}/opt/ripple/xrpld.cfg
|
||||
ln -s ../../../opt/ripple/etc/rippled.cfg %{buildroot}%{_sysconfdir}/opt/ripple/rippled.cfg
|
||||
ln -s ../../../opt/ripple/etc/validators.txt %{buildroot}%{_sysconfdir}/opt/ripple/validators.txt
|
||||
|
||||
# systemd/sysusers/tmpfiles
|
||||
install -Dm0644 %{shared_files}/rippled.service %{buildroot}%{_unitdir}/rippled.service
|
||||
install -Dm0644 %{pkg_files}/rippled.sysusers %{buildroot}%{_sysusersdir}/rippled.conf
|
||||
install -Dm0644 %{pkg_files}/rippled.tmpfiles %{buildroot}%{_tmpfilesdir}/rippled.conf
|
||||
|
||||
%files
|
||||
%license LICENSE*
|
||||
%doc README*
|
||||
|
||||
# Files/dirs the pkgs owns
|
||||
%dir %{_prefix}
|
||||
%dir %{_prefix}/bin
|
||||
%dir %{_prefix}/etc
|
||||
%if 0
|
||||
%dir %{_sysconfdir}/opt # Add this if rpmlint cries.
|
||||
%endif
|
||||
%dir %{_sysconfdir}/opt/ripple
|
||||
|
||||
# Binaries and symlinks under our (non-standard) _prefix (/opt/ripple)
|
||||
%{_bindir}/rippled
|
||||
%{_bindir}/xrpld
|
||||
%{_bindir}/update-rippled.sh
|
||||
%{_bindir}/update-xrpld.sh
|
||||
%{_bindir}/validator-keys
|
||||
|
||||
# We won't ship these but we'll create them.
|
||||
%ghost /usr/local/bin/rippled
|
||||
%ghost /usr/local/bin/xrpld
|
||||
|
||||
%config(noreplace) %{_prefix}/etc/rippled.cfg
|
||||
%config(noreplace) %{_prefix}/etc/validators.txt
|
||||
|
||||
%config(noreplace) %{_sysconfdir}/opt/ripple/rippled.cfg
|
||||
%config(noreplace) %{_sysconfdir}/opt/ripple/xrpld.cfg
|
||||
%config(noreplace) %{_sysconfdir}/opt/ripple/validators.txt
|
||||
|
||||
# systemd and service user creation
|
||||
%{_unitdir}/rippled.service
|
||||
%{_sysusersdir}/rippled.conf
|
||||
%{_tmpfilesdir}/rippled.conf
|
||||
|
||||
# Let tmpfiles create the db and log dirs
|
||||
%ghost %dir /var/opt/ripple
|
||||
%ghost %dir /var/opt/ripple/lib
|
||||
%ghost %dir /var/opt/ripple/log
|
||||
|
||||
# TODO: Fix the CMake install() calls so we don't need to exclude these.
|
||||
%exclude %{_prefix}/include/*
|
||||
%exclude %{_prefix}/lib/*
|
||||
%exclude %{_prefix}/lib/pkgconfig/*
|
||||
%exclude /usr/lib/debug/**
|
||||
|
||||
%post
|
||||
# Add a link to $PATH /usr/local/bin/rippled %{_bindir}/rippled (also non-standard)
|
||||
mkdir -p /usr/local/bin
|
||||
for i in rippled xrpld
|
||||
do
|
||||
if [ ! -e /usr/local/bin/${i} ]; then
|
||||
ln -s %{_bindir}/${i} /usr/local/bin/${i}
|
||||
elif [ -L /usr/local/bin/${i} ] && \
|
||||
[ "$(readlink -f /usr/local/bin/${i})" != "%{_bindir}/${i}" ]; then
|
||||
ln -sfn %{_bindir}/${i} /usr/local/bin/${i}
|
||||
fi
|
||||
done
|
||||
|
||||
%preun
|
||||
# remove the link only if it points to us (on erase, $1 == 0)
|
||||
for i in rippled xrpld
|
||||
do
|
||||
if [ "$1" -eq 0 ] && [ -L /usr/local/bin/${i} ] && \
|
||||
[ "$(readlink -f /usr/local/bin/${i})" = "%{_bindir}/${i}" ]; then
|
||||
rm -f /usr/local/bin/${i}
|
||||
fi
|
||||
done
|
||||
2
pkgs/rpm/rippled.sysusers
Normal file
2
pkgs/rpm/rippled.sysusers
Normal file
@@ -0,0 +1,2 @@
|
||||
u rippled - "System user for rippled service"
|
||||
g rippled - -
|
||||
2
pkgs/rpm/rippled.tmpfiles
Normal file
2
pkgs/rpm/rippled.tmpfiles
Normal file
@@ -0,0 +1,2 @@
|
||||
d /var/opt/ripple/lib 0750 rippled rippled -
|
||||
d /var/opt/ripple/log 0750 rippled adm -
|
||||
15
pkgs/shared/rippled-logrotate
Normal file
15
pkgs/shared/rippled-logrotate
Normal file
@@ -0,0 +1,15 @@
|
||||
/var/log/rippled/*.log {
|
||||
daily
|
||||
minsize 200M
|
||||
rotate 7
|
||||
nocreate
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
compresscmd /usr/bin/nice
|
||||
compressoptions -n19 ionice -c3 gzip
|
||||
compressext .gz
|
||||
postrotate
|
||||
/opt/ripple/bin/rippled --conf /opt/ripple/etc/rippled.cfg logrotate
|
||||
endscript
|
||||
}
|
||||
15
pkgs/shared/rippled.service
Normal file
15
pkgs/shared/rippled.service
Normal file
@@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=Ripple Daemon
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/opt/ripple/bin/rippled --net --silent --conf /etc/opt/ripple/rippled.cfg
|
||||
Restart=on-failure
|
||||
User=rippled
|
||||
Group=rippled
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
10
pkgs/shared/update-rippled-cron
Normal file
10
pkgs/shared/update-rippled-cron
Normal file
@@ -0,0 +1,10 @@
|
||||
# For automatic updates, symlink this file to /etc/cron.d/
|
||||
# Do not remove the newline at the end of this cron script
|
||||
|
||||
# bash required for use of RANDOM below.
|
||||
SHELL=/bin/bash
|
||||
PATH=/sbin;/bin;/usr/sbin;/usr/bin
|
||||
|
||||
# invoke check/update script with random delay up to 59 mins
|
||||
0 * * * * root sleep $((RANDOM*3540/32768)) && /opt/ripple/bin/update-rippled.sh
|
||||
|
||||
65
pkgs/shared/update-rippled.sh
Executable file
65
pkgs/shared/update-rippled.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# auto-update script for rippled daemon
|
||||
|
||||
# Check for sudo/root permissions
|
||||
if [[ $(id -u) -ne 0 ]] ; then
|
||||
echo "This update script must be run as root or sudo"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LOCKDIR=/tmp/rippleupdate.lock
|
||||
UPDATELOG=/var/log/rippled/update.log
|
||||
|
||||
function cleanup {
|
||||
# If this directory isn't removed, future updates will fail.
|
||||
rmdir $LOCKDIR
|
||||
}
|
||||
|
||||
# Use mkdir to check if process is already running. mkdir is atomic, as against file create.
|
||||
if ! mkdir $LOCKDIR 2>/dev/null; then
|
||||
echo $(date -u) "lockdir exists - won't proceed." >> $UPDATELOG
|
||||
exit 1
|
||||
fi
|
||||
trap cleanup EXIT
|
||||
|
||||
source /etc/os-release
|
||||
can_update=false
|
||||
|
||||
if [[ "$ID" == "ubuntu" || "$ID" == "debian" ]] ; then
|
||||
# Silent update
|
||||
apt-get update -qq
|
||||
|
||||
# The next line is an "awk"ward way to check if the package needs to be updated.
|
||||
RIPPLE=$(apt-get install -s --only-upgrade rippled | awk '/^Inst/ { print $2 }')
|
||||
test "$RIPPLE" == "rippled" && can_update=true
|
||||
|
||||
function apply_update {
|
||||
apt-get install rippled -qq
|
||||
}
|
||||
elif [[ "$ID" == "fedora" || "$ID" == "centos" || "$ID" == "rhel" || "$ID" == "scientific" ]] ; then
|
||||
RIPPLE_REPO=${RIPPLE_REPO-stable}
|
||||
yum --disablerepo=* --enablerepo=ripple-$RIPPLE_REPO clean expire-cache
|
||||
|
||||
yum check-update -q --enablerepo=ripple-$RIPPLE_REPO rippled || can_update=true
|
||||
|
||||
function apply_update {
|
||||
yum update -y --enablerepo=ripple-$RIPPLE_REPO rippled
|
||||
}
|
||||
else
|
||||
echo "unrecognized distro!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Do the actual update and restart the service after reloading systemctl daemon.
|
||||
if [ "$can_update" = true ] ; then
|
||||
exec 3>&1 1>>${UPDATELOG} 2>&1
|
||||
set -e
|
||||
apply_update
|
||||
systemctl daemon-reload
|
||||
systemctl restart rippled.service
|
||||
echo $(date -u) "rippled daemon updated."
|
||||
else
|
||||
echo $(date -u) "no updates available" >> $UPDATELOG
|
||||
fi
|
||||
|
||||
@@ -239,9 +239,11 @@ Logs::fromSeverity(beast::severities::Severity level)
|
||||
case kError:
|
||||
return lsERROR;
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("ripple::Logs::fromSeverity : invalid severity");
|
||||
[[fallthrough]];
|
||||
// LCOV_EXCL_STOP
|
||||
case kFatal:
|
||||
break;
|
||||
}
|
||||
@@ -265,9 +267,11 @@ Logs::toSeverity(LogSeverity level)
|
||||
return kWarning;
|
||||
case lsERROR:
|
||||
return kError;
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("ripple::Logs::toSeverity : invalid severity");
|
||||
[[fallthrough]];
|
||||
// LCOV_EXCL_STOP
|
||||
case lsFATAL:
|
||||
break;
|
||||
}
|
||||
@@ -292,9 +296,11 @@ Logs::toString(LogSeverity s)
|
||||
return "Error";
|
||||
case lsFATAL:
|
||||
return "Fatal";
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("ripple::Logs::toString : invalid severity");
|
||||
return "Unknown";
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,9 +362,11 @@ Logs::format(
|
||||
case kError:
|
||||
output += "ERR ";
|
||||
break;
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("ripple::Logs::format : invalid severity");
|
||||
[[fallthrough]];
|
||||
// LCOV_EXCL_STOP
|
||||
case kFatal:
|
||||
output += "FTL ";
|
||||
break;
|
||||
|
||||
@@ -25,8 +25,9 @@
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <boost/asio/bind_executor.hpp>
|
||||
#include <boost/asio/error.hpp>
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/system/detail/error_code.hpp>
|
||||
|
||||
@@ -124,8 +125,8 @@ public:
|
||||
|
||||
beast::Journal m_journal;
|
||||
|
||||
boost::asio::io_service& m_io_service;
|
||||
boost::asio::io_service::strand m_strand;
|
||||
boost::asio::io_context& m_io_context;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
|
||||
boost::asio::ip::tcp::resolver m_resolver;
|
||||
|
||||
std::condition_variable m_cv;
|
||||
@@ -155,12 +156,12 @@ public:
|
||||
std::deque<Work> m_work;
|
||||
|
||||
ResolverAsioImpl(
|
||||
boost::asio::io_service& io_service,
|
||||
boost::asio::io_context& io_context,
|
||||
beast::Journal journal)
|
||||
: m_journal(journal)
|
||||
, m_io_service(io_service)
|
||||
, m_strand(io_service)
|
||||
, m_resolver(io_service)
|
||||
, m_io_context(io_context)
|
||||
, m_strand(boost::asio::make_strand(io_context))
|
||||
, m_resolver(io_context)
|
||||
, m_asyncHandlersCompleted(true)
|
||||
, m_stop_called(false)
|
||||
, m_stopped(true)
|
||||
@@ -216,8 +217,14 @@ public:
|
||||
{
|
||||
if (m_stop_called.exchange(true) == false)
|
||||
{
|
||||
m_io_service.dispatch(m_strand.wrap(std::bind(
|
||||
&ResolverAsioImpl::do_stop, this, CompletionCounter(this))));
|
||||
boost::asio::dispatch(
|
||||
m_io_context,
|
||||
boost::asio::bind_executor(
|
||||
m_strand,
|
||||
std::bind(
|
||||
&ResolverAsioImpl::do_stop,
|
||||
this,
|
||||
CompletionCounter(this))));
|
||||
|
||||
JLOG(m_journal.debug()) << "Queued a stop request";
|
||||
}
|
||||
@@ -248,12 +255,16 @@ public:
|
||||
|
||||
// TODO NIKB use rvalue references to construct and move
|
||||
// reducing cost.
|
||||
m_io_service.dispatch(m_strand.wrap(std::bind(
|
||||
&ResolverAsioImpl::do_resolve,
|
||||
this,
|
||||
names,
|
||||
handler,
|
||||
CompletionCounter(this))));
|
||||
boost::asio::dispatch(
|
||||
m_io_context,
|
||||
boost::asio::bind_executor(
|
||||
m_strand,
|
||||
std::bind(
|
||||
&ResolverAsioImpl::do_resolve,
|
||||
this,
|
||||
names,
|
||||
handler,
|
||||
CompletionCounter(this))));
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
@@ -279,19 +290,20 @@ public:
|
||||
std::string name,
|
||||
boost::system::error_code const& ec,
|
||||
HandlerType handler,
|
||||
boost::asio::ip::tcp::resolver::iterator iter,
|
||||
boost::asio::ip::tcp::resolver::results_type results,
|
||||
CompletionCounter)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
return;
|
||||
|
||||
std::vector<beast::IP::Endpoint> addresses;
|
||||
auto iter = results.begin();
|
||||
|
||||
// If we get an error message back, we don't return any
|
||||
// results that we may have gotten.
|
||||
if (!ec)
|
||||
{
|
||||
while (iter != boost::asio::ip::tcp::resolver::iterator())
|
||||
while (iter != results.end())
|
||||
{
|
||||
addresses.push_back(
|
||||
beast::IPAddressConversion::from_asio(*iter));
|
||||
@@ -301,8 +313,14 @@ public:
|
||||
|
||||
handler(name, addresses);
|
||||
|
||||
m_io_service.post(m_strand.wrap(std::bind(
|
||||
&ResolverAsioImpl::do_work, this, CompletionCounter(this))));
|
||||
boost::asio::post(
|
||||
m_io_context,
|
||||
boost::asio::bind_executor(
|
||||
m_strand,
|
||||
std::bind(
|
||||
&ResolverAsioImpl::do_work,
|
||||
this,
|
||||
CompletionCounter(this))));
|
||||
}
|
||||
|
||||
HostAndPort
|
||||
@@ -383,16 +401,21 @@ public:
|
||||
{
|
||||
JLOG(m_journal.error()) << "Unable to parse '" << name << "'";
|
||||
|
||||
m_io_service.post(m_strand.wrap(std::bind(
|
||||
&ResolverAsioImpl::do_work, this, CompletionCounter(this))));
|
||||
boost::asio::post(
|
||||
m_io_context,
|
||||
boost::asio::bind_executor(
|
||||
m_strand,
|
||||
std::bind(
|
||||
&ResolverAsioImpl::do_work,
|
||||
this,
|
||||
CompletionCounter(this))));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
boost::asio::ip::tcp::resolver::query query(host, port);
|
||||
|
||||
m_resolver.async_resolve(
|
||||
query,
|
||||
host,
|
||||
port,
|
||||
std::bind(
|
||||
&ResolverAsioImpl::do_finish,
|
||||
this,
|
||||
@@ -423,10 +446,14 @@ public:
|
||||
|
||||
if (m_work.size() > 0)
|
||||
{
|
||||
m_io_service.post(m_strand.wrap(std::bind(
|
||||
&ResolverAsioImpl::do_work,
|
||||
this,
|
||||
CompletionCounter(this))));
|
||||
boost::asio::post(
|
||||
m_io_context,
|
||||
boost::asio::bind_executor(
|
||||
m_strand,
|
||||
std::bind(
|
||||
&ResolverAsioImpl::do_work,
|
||||
this,
|
||||
CompletionCounter(this))));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -435,9 +462,9 @@ public:
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
std::unique_ptr<ResolverAsio>
|
||||
ResolverAsio::New(boost::asio::io_service& io_service, beast::Journal journal)
|
||||
ResolverAsio::New(boost::asio::io_context& io_context, beast::Journal journal)
|
||||
{
|
||||
return std::make_unique<ResolverAsioImpl>(io_service, journal);
|
||||
return std::make_unique<ResolverAsioImpl>(io_context, journal);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
@@ -36,6 +36,7 @@ LogThrow(std::string const& title)
|
||||
[[noreturn]] void
|
||||
LogicError(std::string const& s) noexcept
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(debugLog().fatal()) << s;
|
||||
std::cerr << "Logic error: " << s << std::endl;
|
||||
// Use a non-standard contract naming here (without namespace) because
|
||||
@@ -45,6 +46,7 @@ LogicError(std::string const& s) noexcept
|
||||
// For the above reasons, we want this contract to stand out.
|
||||
UNREACHABLE("LogicError", {{"message", s}});
|
||||
std::abort();
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -30,9 +30,11 @@
|
||||
#include <xrpl/beast/utility/instrumentation.h>
|
||||
|
||||
#include <boost/asio/basic_waitable_timer.hpp>
|
||||
#include <boost/asio/bind_executor.hpp>
|
||||
#include <boost/asio/buffer.hpp>
|
||||
#include <boost/asio/error.hpp>
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/udp.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/system/detail/error_code.hpp>
|
||||
@@ -238,9 +240,11 @@ private:
|
||||
Journal m_journal;
|
||||
IP::Endpoint m_address;
|
||||
std::string m_prefix;
|
||||
boost::asio::io_service m_io_service;
|
||||
std::optional<boost::asio::io_service::work> m_work;
|
||||
boost::asio::io_service::strand m_strand;
|
||||
boost::asio::io_context m_io_context;
|
||||
std::optional<boost::asio::executor_work_guard<
|
||||
boost::asio::io_context::executor_type>>
|
||||
m_work;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
|
||||
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
|
||||
boost::asio::ip::udp::socket m_socket;
|
||||
std::deque<std::string> m_data;
|
||||
@@ -264,18 +268,24 @@ public:
|
||||
: m_journal(journal)
|
||||
, m_address(address)
|
||||
, m_prefix(prefix)
|
||||
, m_work(std::ref(m_io_service))
|
||||
, m_strand(m_io_service)
|
||||
, m_timer(m_io_service)
|
||||
, m_socket(m_io_service)
|
||||
, m_work(boost::asio::make_work_guard(m_io_context))
|
||||
, m_strand(boost::asio::make_strand(m_io_context))
|
||||
, m_timer(m_io_context)
|
||||
, m_socket(m_io_context)
|
||||
, m_thread(&StatsDCollectorImp::run, this)
|
||||
{
|
||||
}
|
||||
|
||||
~StatsDCollectorImp() override
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
m_timer.cancel(ec);
|
||||
try
|
||||
{
|
||||
m_timer.cancel();
|
||||
}
|
||||
catch (boost::system::system_error const&)
|
||||
{
|
||||
// ignored
|
||||
}
|
||||
|
||||
m_work.reset();
|
||||
m_thread.join();
|
||||
@@ -334,10 +344,10 @@ public:
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
boost::asio::io_service&
|
||||
get_io_service()
|
||||
boost::asio::io_context&
|
||||
get_io_context()
|
||||
{
|
||||
return m_io_service;
|
||||
return m_io_context;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
@@ -355,8 +365,14 @@ public:
|
||||
void
|
||||
post_buffer(std::string&& buffer)
|
||||
{
|
||||
m_io_service.dispatch(m_strand.wrap(std::bind(
|
||||
&StatsDCollectorImp::do_post_buffer, this, std::move(buffer))));
|
||||
boost::asio::dispatch(
|
||||
m_io_context,
|
||||
boost::asio::bind_executor(
|
||||
m_strand,
|
||||
std::bind(
|
||||
&StatsDCollectorImp::do_post_buffer,
|
||||
this,
|
||||
std::move(buffer))));
|
||||
}
|
||||
|
||||
// The keepAlive parameter makes sure the buffers sent to
|
||||
@@ -386,8 +402,7 @@ public:
|
||||
for (auto const& buffer : buffers)
|
||||
{
|
||||
std::string const s(
|
||||
boost::asio::buffer_cast<char const*>(buffer),
|
||||
boost::asio::buffer_size(buffer));
|
||||
buffer.data(), boost::asio::buffer_size(buffer));
|
||||
std::cerr << s;
|
||||
}
|
||||
std::cerr << '\n';
|
||||
@@ -456,7 +471,7 @@ public:
|
||||
set_timer()
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
m_timer.expires_from_now(1s);
|
||||
m_timer.expires_after(1s);
|
||||
m_timer.async_wait(std::bind(
|
||||
&StatsDCollectorImp::on_timer, this, std::placeholders::_1));
|
||||
}
|
||||
@@ -498,13 +513,13 @@ public:
|
||||
|
||||
set_timer();
|
||||
|
||||
m_io_service.run();
|
||||
m_io_context.run();
|
||||
|
||||
m_socket.shutdown(boost::asio::ip::udp::socket::shutdown_send, ec);
|
||||
|
||||
m_socket.close();
|
||||
|
||||
m_io_service.poll();
|
||||
m_io_context.poll();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -547,10 +562,12 @@ StatsDCounterImpl::~StatsDCounterImpl()
|
||||
void
|
||||
StatsDCounterImpl::increment(CounterImpl::value_type amount)
|
||||
{
|
||||
m_impl->get_io_service().dispatch(std::bind(
|
||||
&StatsDCounterImpl::do_increment,
|
||||
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()),
|
||||
amount));
|
||||
boost::asio::dispatch(
|
||||
m_impl->get_io_context(),
|
||||
std::bind(
|
||||
&StatsDCounterImpl::do_increment,
|
||||
std::static_pointer_cast<StatsDCounterImpl>(shared_from_this()),
|
||||
amount));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -592,10 +609,12 @@ StatsDEventImpl::StatsDEventImpl(
|
||||
void
|
||||
StatsDEventImpl::notify(EventImpl::value_type const& value)
|
||||
{
|
||||
m_impl->get_io_service().dispatch(std::bind(
|
||||
&StatsDEventImpl::do_notify,
|
||||
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()),
|
||||
value));
|
||||
boost::asio::dispatch(
|
||||
m_impl->get_io_context(),
|
||||
std::bind(
|
||||
&StatsDEventImpl::do_notify,
|
||||
std::static_pointer_cast<StatsDEventImpl>(shared_from_this()),
|
||||
value));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -625,19 +644,23 @@ StatsDGaugeImpl::~StatsDGaugeImpl()
|
||||
void
|
||||
StatsDGaugeImpl::set(GaugeImpl::value_type value)
|
||||
{
|
||||
m_impl->get_io_service().dispatch(std::bind(
|
||||
&StatsDGaugeImpl::do_set,
|
||||
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
||||
value));
|
||||
boost::asio::dispatch(
|
||||
m_impl->get_io_context(),
|
||||
std::bind(
|
||||
&StatsDGaugeImpl::do_set,
|
||||
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
||||
value));
|
||||
}
|
||||
|
||||
void
|
||||
StatsDGaugeImpl::increment(GaugeImpl::difference_type amount)
|
||||
{
|
||||
m_impl->get_io_service().dispatch(std::bind(
|
||||
&StatsDGaugeImpl::do_increment,
|
||||
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
||||
amount));
|
||||
boost::asio::dispatch(
|
||||
m_impl->get_io_context(),
|
||||
std::bind(
|
||||
&StatsDGaugeImpl::do_increment,
|
||||
std::static_pointer_cast<StatsDGaugeImpl>(shared_from_this()),
|
||||
amount));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -713,10 +736,12 @@ StatsDMeterImpl::~StatsDMeterImpl()
|
||||
void
|
||||
StatsDMeterImpl::increment(MeterImpl::value_type amount)
|
||||
{
|
||||
m_impl->get_io_service().dispatch(std::bind(
|
||||
&StatsDMeterImpl::do_increment,
|
||||
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()),
|
||||
amount));
|
||||
boost::asio::dispatch(
|
||||
m_impl->get_io_context(),
|
||||
std::bind(
|
||||
&StatsDMeterImpl::do_increment,
|
||||
std::static_pointer_cast<StatsDMeterImpl>(shared_from_this()),
|
||||
amount));
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -25,11 +25,11 @@ namespace IP {
|
||||
bool
|
||||
is_private(AddressV4 const& addr)
|
||||
{
|
||||
return ((addr.to_ulong() & 0xff000000) ==
|
||||
return ((addr.to_uint() & 0xff000000) ==
|
||||
0x0a000000) || // Prefix /8, 10. #.#.#
|
||||
((addr.to_ulong() & 0xfff00000) ==
|
||||
((addr.to_uint() & 0xfff00000) ==
|
||||
0xac100000) || // Prefix /12 172. 16.#.# - 172.31.#.#
|
||||
((addr.to_ulong() & 0xffff0000) ==
|
||||
((addr.to_uint() & 0xffff0000) ==
|
||||
0xc0a80000) || // Prefix /16 192.168.#.#
|
||||
addr.is_loopback();
|
||||
}
|
||||
@@ -44,7 +44,7 @@ char
|
||||
get_class(AddressV4 const& addr)
|
||||
{
|
||||
static char const* table = "AAAABBCD";
|
||||
return table[(addr.to_ulong() & 0xE0000000) >> 29];
|
||||
return table[(addr.to_uint() & 0xE0000000) >> 29];
|
||||
}
|
||||
|
||||
} // namespace IP
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
#include <xrpl/beast/net/IPAddressV4.h>
|
||||
#include <xrpl/beast/net/IPAddressV6.h>
|
||||
|
||||
#include <boost/asio/ip/address_v4.hpp>
|
||||
|
||||
namespace beast {
|
||||
namespace IP {
|
||||
|
||||
@@ -28,7 +30,9 @@ is_private(AddressV6 const& addr)
|
||||
{
|
||||
return (
|
||||
(addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ?
|
||||
(addr.is_v4_mapped() && is_private(addr.to_v4())));
|
||||
(addr.is_v4_mapped() &&
|
||||
is_private(boost::asio::ip::make_address_v4(
|
||||
boost::asio::ip::v4_mapped, addr))));
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
#include <xrpl/beast/net/IPEndpoint.h>
|
||||
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <boost/asio/ip/address.hpp>
|
||||
#include <boost/asio/ip/address_v4.hpp>
|
||||
#include <boost/system/detail/error_code.hpp>
|
||||
|
||||
#include <cctype>
|
||||
@@ -167,7 +169,7 @@ operator>>(std::istream& is, Endpoint& endpoint)
|
||||
}
|
||||
|
||||
boost::system::error_code ec;
|
||||
auto addr = Address::from_string(addrStr, ec);
|
||||
auto addr = boost::asio::ip::make_address(addrStr, ec);
|
||||
if (ec)
|
||||
{
|
||||
is.setstate(std::ios_base::failbit);
|
||||
|
||||
@@ -174,7 +174,7 @@ Array::append(Json::Value const& v)
|
||||
return;
|
||||
}
|
||||
}
|
||||
UNREACHABLE("Json::Array::append : invalid type");
|
||||
UNREACHABLE("Json::Array::append : invalid type"); // LCOV_EXCL_LINE
|
||||
}
|
||||
|
||||
void
|
||||
@@ -209,7 +209,7 @@ Object::set(std::string const& k, Json::Value const& v)
|
||||
return;
|
||||
}
|
||||
}
|
||||
UNREACHABLE("Json::Object::set : invalid type");
|
||||
UNREACHABLE("Json::Object::set : invalid type"); // LCOV_EXCL_LINE
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -213,8 +213,10 @@ Value::Value(ValueType type) : type_(type), allocated_(0)
|
||||
value_.bool_ = false;
|
||||
break;
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::Value(ValueType) : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,8 +292,10 @@ Value::Value(Value const& other) : type_(other.type_)
|
||||
value_.map_ = new ObjectValues(*other.value_.map_);
|
||||
break;
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::Value(Value const&) : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,8 +322,10 @@ Value::~Value()
|
||||
delete value_.map_;
|
||||
break;
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::~Value : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,8 +425,10 @@ operator<(Value const& x, Value const& y)
|
||||
return *x.value_.map_ < *y.value_.map_;
|
||||
}
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::operator<(Value, Value) : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return 0; // unreachable
|
||||
@@ -465,8 +473,10 @@ operator==(Value const& x, Value const& y)
|
||||
return x.value_.map_->size() == y.value_.map_->size() &&
|
||||
*x.value_.map_ == *y.value_.map_;
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::operator==(Value, Value) : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return 0; // unreachable
|
||||
@@ -506,8 +516,10 @@ Value::asString() const
|
||||
case objectValue:
|
||||
JSON_ASSERT_MESSAGE(false, "Type is not convertible to string");
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::asString : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return ""; // unreachable
|
||||
@@ -548,8 +560,10 @@ Value::asInt() const
|
||||
case objectValue:
|
||||
JSON_ASSERT_MESSAGE(false, "Type is not convertible to int");
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::asInt : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return 0; // unreachable;
|
||||
@@ -590,8 +604,10 @@ Value::asUInt() const
|
||||
case objectValue:
|
||||
JSON_ASSERT_MESSAGE(false, "Type is not convertible to uint");
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::asUInt : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return 0; // unreachable;
|
||||
@@ -622,8 +638,10 @@ Value::asDouble() const
|
||||
case objectValue:
|
||||
JSON_ASSERT_MESSAGE(false, "Type is not convertible to double");
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::asDouble : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return 0; // unreachable;
|
||||
@@ -654,8 +672,10 @@ Value::asBool() const
|
||||
case objectValue:
|
||||
return value_.map_->size() != 0;
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::asBool : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return false; // unreachable;
|
||||
@@ -710,8 +730,10 @@ Value::isConvertibleTo(ValueType other) const
|
||||
return other == objectValue ||
|
||||
(other == nullValue && value_.map_->size() == 0);
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::isConvertible : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return false; // unreachable;
|
||||
@@ -744,8 +766,10 @@ Value::size() const
|
||||
case objectValue:
|
||||
return Int(value_.map_->size());
|
||||
|
||||
// LCOV_EXCL_START
|
||||
default:
|
||||
UNREACHABLE("Json::Value::size : invalid type");
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
return 0; // unreachable;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user