mirror of
https://github.com/Xahau/xahaud.git
synced 2026-02-25 08:12:24 +00:00
Compare commits
8 Commits
merge-dev-
...
sync-2.4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a61d96386d | ||
|
|
906b559f08 | ||
|
|
66dbb11738 | ||
|
|
d18c4d9b33 | ||
|
|
1288115fb1 | ||
|
|
a0b5a59e36 | ||
|
|
c3e8039c5a | ||
|
|
d9e7b50395 |
7
.github/workflows/xahau-ga-macos.yml
vendored
7
.github/workflows/xahau-ga-macos.yml
vendored
@@ -4,7 +4,8 @@ on:
|
||||
push:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
branches: ["**"]
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
@@ -14,6 +15,10 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
if: >
|
||||
github.event_name != 'pull_request' ||
|
||||
contains(fromJson('["dev","candidate","release"]'), github.base_ref) ||
|
||||
contains(join(github.event.pull_request.labels.*.name, ','), 'ci-full-build')
|
||||
strategy:
|
||||
matrix:
|
||||
generator:
|
||||
|
||||
36
.github/workflows/xahau-ga-nix.yml
vendored
36
.github/workflows/xahau-ga-nix.yml
vendored
@@ -4,9 +4,16 @@ on:
|
||||
push:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
branches: ["**"]
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
full_matrix:
|
||||
description: "Force full matrix (6 configs)"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -14,6 +21,10 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
matrix-setup:
|
||||
if: >
|
||||
github.event_name != 'pull_request' ||
|
||||
contains(fromJson('["dev","candidate","release"]'), github.base_ref) ||
|
||||
contains(join(github.event.pull_request.labels.*.name, ','), 'ci-full-build')
|
||||
runs-on: [self-hosted, generic, 20.04]
|
||||
container: python:3-slim
|
||||
outputs:
|
||||
@@ -111,6 +122,7 @@ jobs:
|
||||
base_ref = "${{ github.base_ref }}" # For PRs, this is the target branch
|
||||
event_name = "${{ github.event_name }}"
|
||||
pr_title = """${{ steps.escape.outputs.title }}"""
|
||||
pr_labels = """${{ join(github.event.pull_request.labels.*.name, ',') }}"""
|
||||
pr_head_sha = "${{ github.event.pull_request.head.sha }}"
|
||||
|
||||
# Get commit message - for PRs, fetch via API since head_commit.message is empty
|
||||
@@ -136,11 +148,24 @@ jobs:
|
||||
print(f"Base ref: {base_ref}")
|
||||
print(f"PR head SHA: {pr_head_sha}")
|
||||
print(f"PR title: {pr_title}")
|
||||
print(f"PR labels: {pr_labels}")
|
||||
print(f"Commit message: {commit_message}")
|
||||
|
||||
# Check for override tags in commit message or PR title
|
||||
force_full = "[ci-nix-full-matrix]" in commit_message or "[ci-nix-full-matrix]" in pr_title
|
||||
# Manual trigger input to force full matrix.
|
||||
manual_full = "${{ github.event.inputs.full_matrix || 'false' }}" == "true"
|
||||
|
||||
# Label/manual overrides, while preserving existing title/commit behavior.
|
||||
force_full = (
|
||||
manual_full
|
||||
or "[ci-nix-full-matrix]" in commit_message
|
||||
or "[ci-nix-full-matrix]" in pr_title
|
||||
or ("ci-full-build" in pr_labels and "ci-nix-full-matrix" in pr_labels)
|
||||
)
|
||||
force_min = (
|
||||
"ci-full-build" in pr_labels
|
||||
)
|
||||
print(f"Force full matrix: {force_full}")
|
||||
print(f"Force min matrix: {force_min}")
|
||||
|
||||
# Check if this is targeting a main branch
|
||||
# For PRs: check base_ref (target branch)
|
||||
@@ -148,8 +173,11 @@ jobs:
|
||||
main_branches = ["refs/heads/dev", "refs/heads/release", "refs/heads/candidate"]
|
||||
|
||||
if force_full:
|
||||
# Override: always use full matrix if tag is present
|
||||
# Override: always use full matrix if forced by manual input or label.
|
||||
use_full = True
|
||||
elif force_min:
|
||||
# Override: always use minimal matrix if ci-full-build label is present.
|
||||
use_full = False
|
||||
elif event_name == "pull_request":
|
||||
# For PRs, base_ref is just the branch name (e.g., "dev", not "refs/heads/dev")
|
||||
# Check if the PR targets release or candidate (more critical branches)
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -76,6 +76,7 @@ docs/html_doc
|
||||
# Xcode
|
||||
.DS_Store
|
||||
*/build/*
|
||||
!/docs/build/
|
||||
*.pbxuser
|
||||
!default.pbxuser
|
||||
*.mode1v3
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
[tools]
|
||||
clang-format = "18"
|
||||
224
BUILD.md
224
BUILD.md
@@ -1,7 +1,7 @@
|
||||
| :warning: **WARNING** :warning:
|
||||
|---|
|
||||
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
|
||||
|
||||
> These instructions assume you have a C++ development environment ready
|
||||
> with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up
|
||||
> on Linux, macOS, or Windows, see [our guide](./docs/build/environment.md).
|
||||
>
|
||||
> These instructions also assume a basic familiarity with Conan and CMake.
|
||||
> If you are unfamiliar with Conan,
|
||||
> you can read our [crash course](./docs/build/conan.md)
|
||||
@@ -10,7 +10,7 @@
|
||||
## Branches
|
||||
|
||||
For a stable release, choose the `master` branch or one of the [tagged
|
||||
releases](https://github.com/ripple/rippled/releases).
|
||||
releases](https://github.com/Xahau/xahaud/releases).
|
||||
|
||||
```
|
||||
git checkout master
|
||||
@@ -29,70 +29,46 @@ branch.
|
||||
git checkout develop
|
||||
```
|
||||
|
||||
|
||||
## Minimum Requirements
|
||||
|
||||
See [System Requirements](https://xrpl.org/system-requirements.html).
|
||||
|
||||
Building rippled generally requires git, Python, Conan, CMake, and a C++ compiler. Some guidance on setting up such a [C++ development environment can be found here](./docs/build/environment.md).
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 2.x](https://conan.io/downloads)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
|
||||
[^1]: It is possible to build with Conan 2.x,
|
||||
but the instructions are significantly different,
|
||||
which is why we are not recommending it yet.
|
||||
Notably, the `conan profile update` command is removed in 2.x.
|
||||
Profiles must be edited by hand.
|
||||
|
||||
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||
`xahaud` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||
The [minimum compiler versions][2] required are:
|
||||
|
||||
| Compiler | Version |
|
||||
|-------------|---------|
|
||||
| GCC | 11 |
|
||||
| GCC | 10 |
|
||||
| Clang | 13 |
|
||||
| Apple Clang | 13.1.6 |
|
||||
| MSVC | 19.23 |
|
||||
|
||||
### Linux
|
||||
We don't recommend Windows for `xahaud` production at this time. As of
|
||||
November 2025, Ubuntu has the highest level of quality assurance, testing,
|
||||
and support.
|
||||
|
||||
The Ubuntu operating system has received the highest level of
|
||||
quality assurance, testing, and support.
|
||||
Windows developers should use Visual Studio 2019. `xahaud` isn't
|
||||
compatible with [Boost](https://www.boost.org/) 1.78 or 1.79, and Conan
|
||||
can't build earlier Boost versions.
|
||||
|
||||
Here are [sample instructions for setting up a C++ development environment on Linux](./docs/build/environment.md#linux).
|
||||
**Note:** 32-bit Windows development isn't supported.
|
||||
|
||||
### Mac
|
||||
|
||||
Many rippled engineers use macOS for development.
|
||||
|
||||
Here are [sample instructions for setting up a C++ development environment on macOS](./docs/build/environment.md#macos).
|
||||
|
||||
### Windows
|
||||
|
||||
Windows is not recommended for production use at this time.
|
||||
|
||||
- Additionally, 32-bit Windows development is not supported.
|
||||
|
||||
[Boost]: https://www.boost.org/
|
||||
|
||||
## Steps
|
||||
|
||||
|
||||
### Set Up Conan
|
||||
|
||||
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python, Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
|
||||
|
||||
These instructions assume a basic familiarity with Conan and CMake.
|
||||
|
||||
If you are unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official [Getting Started][3] walkthrough.
|
||||
|
||||
You'll need at least one Conan profile:
|
||||
1. (Optional) If you've never used Conan, use autodetect to set up a default profile.
|
||||
|
||||
```
|
||||
conan profile detect --force
|
||||
```
|
||||
|
||||
Update the compiler settings:
|
||||
2. Update the compiler settings.
|
||||
|
||||
For Conan 2, you can edit the profile directly at `~/.conan2/profiles/default`,
|
||||
or use the Conan CLI. Ensure C++20 is set:
|
||||
@@ -109,16 +85,10 @@ Update the compiler settings:
|
||||
compiler.cppstd=20
|
||||
```
|
||||
|
||||
Configure Conan (1.x only) to use recipe revisions:
|
||||
|
||||
```
|
||||
conan config set general.revisions_enabled=1
|
||||
```
|
||||
|
||||
**Linux** developers will commonly have a default Conan [profile][] that compiles
|
||||
with GCC and links with libstdc++.
|
||||
If you are linking with libstdc++ (see profile setting `compiler.libcxx`),
|
||||
then you will need to choose the `libstdc++11` ABI:
|
||||
Linux developers will commonly have a default Conan [profile][] that compiles
|
||||
with GCC and links with libstdc++.
|
||||
If you are linking with libstdc++ (see profile setting `compiler.libcxx`),
|
||||
then you will need to choose the `libstdc++11` ABI.
|
||||
|
||||
```
|
||||
# In ~/.conan2/profiles/default, ensure:
|
||||
@@ -126,26 +96,12 @@ then you will need to choose the `libstdc++11` ABI:
|
||||
compiler.libcxx=libstdc++11
|
||||
```
|
||||
|
||||
On Windows, you should use the x64 native build tools.
|
||||
An easy way to do that is to run the shortcut "x64 Native Tools Command
|
||||
Prompt" for the version of Visual Studio that you have installed.
|
||||
|
||||
Ensure inter-operability between `boost::string_view` and `std::string_view` types:
|
||||
|
||||
```
|
||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_BEAST_USE_STD_STRING_VIEW"]' default
|
||||
conan profile update 'env.CXXFLAGS="-DBOOST_BEAST_USE_STD_STRING_VIEW"' default
|
||||
```
|
||||
|
||||
If you have other flags in the `conf.tools.build` or `env.CXXFLAGS` sections, make sure to retain the existing flags and append the new ones. You can check them with:
|
||||
```
|
||||
conan profile show default
|
||||
```
|
||||
|
||||
|
||||
**Windows** developers may need to use the x64 native build tools.
|
||||
An easy way to do that is to run the shortcut "x64 Native Tools Command
|
||||
Prompt" for the version of Visual Studio that you have installed.
|
||||
|
||||
Windows developers must also build `rippled` and its dependencies for the x64
|
||||
architecture:
|
||||
Windows developers must also build `xahaud` and its dependencies for the x64
|
||||
architecture.
|
||||
|
||||
```
|
||||
# In ~/.conan2/profiles/default, ensure:
|
||||
@@ -181,8 +137,8 @@ Prompt" for the version of Visual Studio that you have installed.
|
||||
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
|
||||
```
|
||||
|
||||
Export our [Conan recipe for RocksDB](./external/rocksdb).
|
||||
It does not override paths to dependencies when building with Visual Studio.
|
||||
5. Export our [Conan recipe for SOCI](./external/soci).
|
||||
It patches their CMake to correctly import its dependencies.
|
||||
|
||||
```
|
||||
conan export external/soci --version 4.0.3 --user xahaud --channel stable
|
||||
@@ -212,15 +168,13 @@ It does not override paths to dependencies when building with Visual Studio.
|
||||
the `install-folder` or `-if` option to every `conan install` command
|
||||
in the next step.
|
||||
|
||||
2. Use conan to generate CMake files for every configuration you want to build:
|
||||
2. Generate CMake files for every configuration you want to build.
|
||||
|
||||
```
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||
```
|
||||
|
||||
To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug`
|
||||
|
||||
For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`,
|
||||
you only need to run this command once.
|
||||
For a multi-configuration generator, e.g. `Visual Studio`, you may want to
|
||||
@@ -231,13 +185,13 @@ It does not override paths to dependencies when building with Visual Studio.
|
||||
generated by the first. You can pass the build type on the command line with
|
||||
`--settings build_type=$BUILD_TYPE` or in the profile itself,
|
||||
under the section `[settings]` with the key `build_type`.
|
||||
|
||||
|
||||
If you are using a Microsoft Visual C++ compiler,
|
||||
then you will need to ensure consistency between the `build_type` setting
|
||||
and the `compiler.runtime` setting.
|
||||
|
||||
|
||||
When `build_type` is `Release`, `compiler.runtime` should be `MT`.
|
||||
|
||||
|
||||
When `build_type` is `Debug`, `compiler.runtime` should be `MTd`.
|
||||
|
||||
```
|
||||
@@ -249,31 +203,29 @@ It does not override paths to dependencies when building with Visual Studio.
|
||||
`$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`.
|
||||
|
||||
Single-config generators:
|
||||
|
||||
```
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
```
|
||||
|
||||
Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type]
|
||||
and make sure it matches the one of the `build_type` settings
|
||||
you chose in the previous step.
|
||||
and make sure it matches the `build_type` setting you chose in the previous
|
||||
step.
|
||||
|
||||
For example, to build Debug, in the next command, replace "Release" with "Debug"
|
||||
Multi-config gnerators:
|
||||
|
||||
```
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON ..
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
```
|
||||
|
||||
**Note:** You can pass build options for `xahaud` in this step.
|
||||
|
||||
Multi-config generators:
|
||||
|
||||
```
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -Dxrpld=ON -Dtests=ON ..
|
||||
```
|
||||
|
||||
**Note:** You can pass build options for `rippled` in this step.
|
||||
|
||||
5. Build `rippled`.
|
||||
4. Build `xahaud`.
|
||||
|
||||
For a single-configuration generator, it will build whatever configuration
|
||||
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator,
|
||||
you must pass the option `--config` to select the build configuration.
|
||||
The output file is currently named 'rippled'.
|
||||
|
||||
Single-config generators:
|
||||
|
||||
@@ -282,13 +234,13 @@ It does not override paths to dependencies when building with Visual Studio.
|
||||
```
|
||||
|
||||
Multi-config generators:
|
||||
|
||||
|
||||
```
|
||||
cmake --build . --config Release
|
||||
cmake --build . --config Debug
|
||||
```
|
||||
|
||||
6. Test rippled.
|
||||
5. Test xahaud.
|
||||
|
||||
Single-config generators:
|
||||
|
||||
@@ -303,79 +255,19 @@ It does not override paths to dependencies when building with Visual Studio.
|
||||
./Debug/rippled --unittest
|
||||
```
|
||||
|
||||
The location of `rippled` in your build directory depends on your CMake
|
||||
The location of `xahaud` in your build directory depends on your CMake
|
||||
generator. Pass `--help` to see the rest of the command line options.
|
||||
|
||||
|
||||
## Coverage report
|
||||
|
||||
The coverage report is intended for developers using compilers GCC
|
||||
or Clang (including Apple Clang). It is generated by the build target `coverage`,
|
||||
which is only enabled when the `coverage` option is set, e.g. with
|
||||
`--options coverage=True` in `conan` or `-Dcoverage=ON` variable in `cmake`
|
||||
|
||||
Prerequisites for the coverage report:
|
||||
|
||||
- [gcovr tool][gcovr] (can be installed e.g. with [pip][python-pip])
|
||||
- `gcov` for GCC (installed with the compiler by default) or
|
||||
- `llvm-cov` for Clang (installed with the compiler by default)
|
||||
- `Debug` build type
|
||||
|
||||
A coverage report is created when the following steps are completed, in order:
|
||||
|
||||
1. `rippled` binary built with instrumentation data, enabled by the `coverage`
|
||||
option mentioned above
|
||||
2. completed run of unit tests, which populates coverage capture data
|
||||
3. completed run of the `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
|
||||
to assemble both instrumentation data and the coverage capture data into a coverage report
|
||||
|
||||
The above steps are automated into a single target `coverage`. The instrumented
|
||||
`rippled` binary can also be used for regular development or testing work, at
|
||||
the cost of extra disk space utilization and a small performance hit
|
||||
(to store coverage capture). In case of a spurious failure of unit tests, it is
|
||||
possible to re-run the `coverage` target without rebuilding the `rippled` binary
|
||||
(since it is simply a dependency of the coverage report target). It is also possible
|
||||
to select only specific tests for the purpose of the coverage report, by setting
|
||||
the `coverage_test` variable in `cmake`
|
||||
|
||||
The default coverage report format is `html-details`, but the user
|
||||
can override it to any of the formats listed in `cmake/CodeCoverage.cmake`
|
||||
by setting the `coverage_format` variable in `cmake`. It is also possible
|
||||
to generate more than one format at a time by setting the `coverage_extra_args`
|
||||
variable in `cmake`. The specific command line used to run the `gcovr` tool will be
|
||||
displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
|
||||
|
||||
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
|
||||
set to the number of available CPU cores. This may cause spurious test
|
||||
errors on Apple. Developers can override the number of unit test jobs with
|
||||
the `coverage_test_parallelism` variable in `cmake`.
|
||||
|
||||
Example use with some cmake variables set:
|
||||
|
||||
```
|
||||
cd .build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||
cmake -DCMAKE_BUILD_TYPE=Debug -Dcoverage=ON -Dxrpld=ON -Dtests=ON -Dcoverage_test_parallelism=2 -Dcoverage_format=html-details -Dcoverage_extra_args="--json coverage.json" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
cmake --build . --target coverage
|
||||
```
|
||||
|
||||
After the `coverage` target is completed, the generated coverage report will be
|
||||
stored inside the build directory, as either of:
|
||||
|
||||
- file named `coverage.`_extension_ , with a suitable extension for the report format, or
|
||||
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
|
||||
|
||||
|
||||
## Options
|
||||
|
||||
| Option | Default Value | Description |
|
||||
| --- | ---| ---|
|
||||
| `assert` | OFF | Enable assertions.
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
|
||||
| `tests` | OFF | Build tests. |
|
||||
| `reporting` | OFF | Build the reporting mode feature. |
|
||||
| `tests` | ON | Build tests. |
|
||||
| `unity` | ON | Configure a unity build. |
|
||||
| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. |
|
||||
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
|
||||
|
||||
[Unity builds][5] may be faster for the first build
|
||||
(at the cost of much more memory) since they concatenate sources into fewer
|
||||
@@ -414,18 +306,6 @@ tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
```
|
||||
|
||||
|
||||
### call to 'async_teardown' is ambiguous
|
||||
|
||||
If you are compiling with an early version of Clang 16, then you might hit
|
||||
a [regression][6] when compiling C++20 that manifests as an [error in a Boost
|
||||
header][7]. You can workaround it by adding this preprocessor definition:
|
||||
|
||||
```
|
||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default
|
||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
|
||||
```
|
||||
|
||||
|
||||
### recompile with -fPIC
|
||||
|
||||
If you get a linker error suggesting that you recompile Boost with
|
||||
@@ -577,10 +457,6 @@ but it is more convenient to put them in a [profile][profile].
|
||||
|
||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||
[5]: https://en.wikipedia.org/wiki/Unity_build
|
||||
[6]: https://github.com/boostorg/beast/issues/2648
|
||||
[7]: https://github.com/boostorg/beast/issues/2661
|
||||
[gcovr]: https://gcovr.com/en/stable/getting-started.html
|
||||
[python-pip]: https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/
|
||||
[build_type]: https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
|
||||
[runtime]: https://cmake.org/cmake/help/latest/variable/CMAKE_MSVC_RUNTIME_LIBRARY.html
|
||||
[toolchain]: https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#
|
||||
# 4. HTTPS Client
|
||||
#
|
||||
# 5. <vacated>
|
||||
# 5. Reporting Mode
|
||||
#
|
||||
# 6. Database
|
||||
#
|
||||
@@ -282,14 +282,12 @@
|
||||
# ssl_cert
|
||||
#
|
||||
# Specifies the path to the SSL certificate file in PEM format.
|
||||
# This is not needed if the chain includes it. Use ssl_chain if
|
||||
# your certificate includes one or more intermediates.
|
||||
# This is not needed if the chain includes it.
|
||||
#
|
||||
# ssl_chain
|
||||
#
|
||||
# If you need a certificate chain, specify the path to the
|
||||
# certificate chain here. The chain may include the end certificate.
|
||||
# This must be used if the certificate includes intermediates.
|
||||
#
|
||||
# ssl_ciphers = <cipherlist>
|
||||
#
|
||||
@@ -387,32 +385,15 @@
|
||||
#
|
||||
#
|
||||
#
|
||||
# [compression]
|
||||
#
|
||||
# true or false
|
||||
#
|
||||
# true - enables compression
|
||||
# false - disables compression [default].
|
||||
#
|
||||
# The xahaud server can save bandwidth by compressing its peer-to-peer communications,
|
||||
# at a cost of greater CPU usage. If you enable link compression,
|
||||
# the server automatically compresses communications with peer servers
|
||||
# that also have link compression enabled.
|
||||
# https://xrpl.org/enable-link-compression.html
|
||||
#
|
||||
#
|
||||
#
|
||||
# [ips]
|
||||
#
|
||||
# List of hostnames or ips where the XRPL protocol is served. A default
|
||||
# starter list is included in the code and used if no other hostnames are
|
||||
# available.
|
||||
#
|
||||
# One address or domain name per line is allowed. A port may be specified
|
||||
# after adding a space to the address. If a port is not specified, the default
|
||||
# port of 2459 will be used. Many servers still use the legacy port of 51235.
|
||||
# To connect to such servers, you must specify the port number. The ordering
|
||||
# of entries does not generally matter.
|
||||
# One address or domain name per line is allowed. A port may must be
|
||||
# specified after adding a space to the address. The ordering of entries
|
||||
# does not generally matter.
|
||||
#
|
||||
# The default list of entries is:
|
||||
# - hubs.xahau.as16089.net 21337
|
||||
@@ -477,6 +458,19 @@
|
||||
#
|
||||
#
|
||||
#
|
||||
# [sntp_servers]
|
||||
#
|
||||
# IP address or domain of NTP servers to use for time synchronization.
|
||||
#
|
||||
# These NTP servers are suitable for xahaud servers located in the United
|
||||
# States:
|
||||
# time.windows.com
|
||||
# time.apple.com
|
||||
# time.nist.gov
|
||||
# pool.ntp.org
|
||||
#
|
||||
#
|
||||
#
|
||||
# [max_transactions]
|
||||
#
|
||||
# Configure the maximum number of transactions to have in the job queue
|
||||
@@ -881,6 +875,119 @@
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# 5. Reporting Mode
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# xahaud has an optional operating mode called Reporting Mode. In Reporting
|
||||
# Mode, xahaud does not connect to the peer to peer network. Instead, xahaud
|
||||
# will continuously extract data from one or more xahaud servers that are
|
||||
# connected to the peer to peer network (referred to as an ETL source).
|
||||
# Reporting mode servers will forward RPC requests that require access to the
|
||||
# peer to peer network (submit, fee, etc) to an ETL source.
|
||||
#
|
||||
# [reporting] Settings for Reporting Mode. If and only if this section is
|
||||
# present, xahaud will start in reporting mode. This section
|
||||
# contains a list of ETL source names, and key-value pairs. The
|
||||
# ETL source names each correspond to a configuration file
|
||||
# section; the names must match exactly. The key-value pairs are
|
||||
# optional.
|
||||
#
|
||||
#
|
||||
# [<name>]
|
||||
#
|
||||
# A series of key/value pairs that specify an ETL source.
|
||||
#
|
||||
# source_ip = <IP-address>
|
||||
#
|
||||
# Required. IP address of the ETL source. Can also be a DNS record.
|
||||
#
|
||||
# source_ws_port = <number>
|
||||
#
|
||||
# Required. Port on which ETL source is accepting unencrypted websocket
|
||||
# connections.
|
||||
#
|
||||
# source_grpc_port = <number>
|
||||
#
|
||||
# Required for ETL. Port on which ETL source is accepting gRPC requests.
|
||||
# If this option is ommitted, this ETL source cannot actually be used for
|
||||
# ETL; the Reporting Mode server can still forward RPCs to this ETL
|
||||
# source, but cannot extract data from this ETL source.
|
||||
#
|
||||
#
|
||||
# Key-value pairs (all optional):
|
||||
#
|
||||
# read_only Valid values: 0, 1. Default is 0. If set to 1, the server
|
||||
# will start in strict read-only mode, and will not perform
|
||||
# ETL. The server will still handle RPC requests, and will
|
||||
# still forward RPC requests that require access to the p2p
|
||||
# network.
|
||||
#
|
||||
# start_sequence
|
||||
# Sequence of first ledger to extract if the database is empty.
|
||||
# ETL extracts ledgers in order. If this setting is absent and
|
||||
# the database is empty, ETL will start with the next ledger
|
||||
# validated by the network. If this setting is present and the
|
||||
# database is not empty, an exception is thrown.
|
||||
#
|
||||
# num_markers Degree of parallelism used during the initial ledger
|
||||
# download. Only used if the database is empty. Valid values
|
||||
# are 1-256. A higher degree of parallelism results in a
|
||||
# faster download, but puts more load on the ETL source.
|
||||
# Default is 2.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# [reporting]
|
||||
# etl_source1
|
||||
# etl_source2
|
||||
# read_only=0
|
||||
# start_sequence=32570
|
||||
# num_markers=8
|
||||
#
|
||||
# [etl_source1]
|
||||
# source_ip=1.2.3.4
|
||||
# source_ws_port=6005
|
||||
# source_grpc_port=50051
|
||||
#
|
||||
# [etl_source2]
|
||||
# source_ip=5.6.7.8
|
||||
# source_ws_port=6005
|
||||
# source_grpc_port=50051
|
||||
#
|
||||
# Minimal Example:
|
||||
#
|
||||
# [reporting]
|
||||
# etl_source1
|
||||
#
|
||||
# [etl_source1]
|
||||
# source_ip=1.2.3.4
|
||||
# source_ws_port=6005
|
||||
# source_grpc_port=50051
|
||||
#
|
||||
#
|
||||
# Notes:
|
||||
#
|
||||
# Reporting Mode requires Postgres (instead of SQLite). The Postgres
|
||||
# connection info is specified under the [ledger_tx_tables] config section;
|
||||
# see the Database section for further documentation.
|
||||
#
|
||||
# Each ETL source specified must have gRPC enabled (by adding a [port_grpc]
|
||||
# section to the config). It is recommended to add a secure_gateway entry to
|
||||
# the gRPC section, in order to bypass the server's rate limiting.
|
||||
# This section needs to be added to the config of the ETL source, not
|
||||
# the config of the reporting node. In the example below, the
|
||||
# reporting server is running at 127.0.0.1. Multiple IPs can be
|
||||
# specified in secure_gateway via a comma separated list.
|
||||
#
|
||||
# [port_grpc]
|
||||
# ip = 0.0.0.0
|
||||
# port = 50051
|
||||
# secure_gateway = 127.0.0.1
|
||||
#
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
# 6. Database
|
||||
#
|
||||
#------------
|
||||
@@ -888,7 +995,13 @@
|
||||
# xahaud creates 4 SQLite database to hold bookkeeping information
|
||||
# about transactions, local credentials, and various other things.
|
||||
# It also creates the NodeDB, which holds all the objects that
|
||||
# make up the current and historical ledgers.
|
||||
# make up the current and historical ledgers. In Reporting Mode, xahauad
|
||||
# uses a Postgres database instead of SQLite.
|
||||
#
|
||||
# The simplest way to work with Postgres is to install it locally.
|
||||
# When it is running, execute the initdb.sh script in the current
|
||||
# directory as: sudo -u postgres ./initdb.sh
|
||||
# This will create the xahaud user and an empty database of the same name.
|
||||
#
|
||||
# The size of the NodeDB grows in proportion to the amount of new data and the
|
||||
# amount of historical data (a configurable setting) so the performance of the
|
||||
@@ -930,6 +1043,14 @@
|
||||
# keeping full history is not advised, and using online delete is
|
||||
# recommended.
|
||||
#
|
||||
# type = Cassandra
|
||||
#
|
||||
# Apache Cassandra is an open-source, distributed key-value store - see
|
||||
# https://cassandra.apache.org/ for more details.
|
||||
#
|
||||
# Cassandra is an alternative backend to be used only with Reporting Mode.
|
||||
# See the Reporting Mode section for more details about Reporting Mode.
|
||||
#
|
||||
# type = RWDB
|
||||
#
|
||||
# RWDB is a high-performance memory store written by XRPL-Labs and optimized
|
||||
@@ -941,6 +1062,27 @@
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Required keys for RWDB:
|
||||
#
|
||||
# online_delete Required. RWDB stores data in memory and will
|
||||
# grow unbounded without online_delete. See the
|
||||
# online_delete section below.
|
||||
#
|
||||
# Required keys for Cassandra:
|
||||
#
|
||||
# contact_points IP of a node in the Cassandra cluster
|
||||
#
|
||||
# port CQL Native Transport Port
|
||||
#
|
||||
# secure_connect_bundle
|
||||
# Absolute path to a secure connect bundle. When using
|
||||
# a secure connect bundle, contact_points and port are
|
||||
# not required.
|
||||
#
|
||||
# keyspace Name of Cassandra keyspace to use
|
||||
#
|
||||
# table_name Name of table in above keyspace to use
|
||||
#
|
||||
# Optional keys
|
||||
#
|
||||
# cache_size Size of cache for database records. Default is 16384.
|
||||
@@ -957,7 +1099,7 @@
|
||||
# default value for the unspecified parameter.
|
||||
#
|
||||
# Note: the cache will not be created if online_delete
|
||||
# is specified.
|
||||
# is specified, or if shards are used.
|
||||
#
|
||||
# fast_load Boolean. If set, load the last persisted ledger
|
||||
# from disk upon process start before syncing to
|
||||
@@ -980,6 +1122,10 @@
|
||||
# earliest_seq The default is 32570 to match the XRP Ledger's
|
||||
# network's earliest allowed sequence. Alternate
|
||||
# networks may set this value. Minimum value of 1.
|
||||
# If a [shard_db] section is defined, and this
|
||||
# value is present either [node_db] or [shard_db],
|
||||
# it must be defined with the same value in both
|
||||
# sections.
|
||||
#
|
||||
# Optional keys for NuDB only:
|
||||
#
|
||||
@@ -1055,6 +1201,25 @@
|
||||
# checking until healthy.
|
||||
# Default is 5.
|
||||
#
|
||||
# Optional keys for Cassandra:
|
||||
#
|
||||
# username Username to use if Cassandra cluster requires
|
||||
# authentication
|
||||
#
|
||||
# password Password to use if Cassandra cluster requires
|
||||
# authentication
|
||||
#
|
||||
# max_requests_outstanding
|
||||
# Limits the maximum number of concurrent database
|
||||
# writes. Default is 10 million. For slower clusters,
|
||||
# large numbers of concurrent writes can overload the
|
||||
# cluster. Setting this option can help eliminate
|
||||
# write timeouts and other write errors due to the
|
||||
# cluster being overloaded.
|
||||
# io_threads
|
||||
# Set the number of IO threads used by the
|
||||
# Cassandra driver. Defaults to 4.
|
||||
#
|
||||
# Notes:
|
||||
# The 'node_db' entry configures the primary, persistent storage.
|
||||
#
|
||||
@@ -1071,6 +1236,32 @@
|
||||
# your xahaud.cfg file.
|
||||
# Partial pathnames are relative to the location of the xahaud executable.
|
||||
#
|
||||
# [shard_db] Settings for the Shard Database (optional)
|
||||
#
|
||||
# Format (without spaces):
|
||||
# One or more lines of case-insensitive key / value pairs:
|
||||
# <key> '=' <value>
|
||||
# ...
|
||||
#
|
||||
# Example:
|
||||
# path=db/shards/nudb
|
||||
#
|
||||
# Required keys:
|
||||
# path Location to store the database
|
||||
#
|
||||
# Optional keys:
|
||||
# max_historical_shards
|
||||
# The maximum number of historical shards
|
||||
# to store.
|
||||
#
|
||||
# [historical_shard_paths] Additional storage paths for the Shard Database (optional)
|
||||
#
|
||||
# Format (without spaces):
|
||||
# One or more lines, each expressing a full path for storing historical shards:
|
||||
# /mnt/disk1
|
||||
# /mnt/disk2
|
||||
# ...
|
||||
#
|
||||
# [sqlite] Tuning settings for the SQLite databases (optional)
|
||||
#
|
||||
# Format (without spaces):
|
||||
@@ -1150,18 +1341,40 @@
|
||||
# This setting may not be combined with the
|
||||
# "safety_level" setting.
|
||||
#
|
||||
# page_size Valid values: integer (MUST be power of 2 between 512 and 65536)
|
||||
# The default is 4096 bytes. This setting determines
|
||||
# the size of a page in the transaction.db file.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_page_size
|
||||
# for more details about the available options.
|
||||
# [ledger_tx_tables] (optional)
|
||||
#
|
||||
# journal_size_limit Valid values: integer
|
||||
# The default is 1582080. This setting limits
|
||||
# the size of the journal for transaction.db file. When the limit is
|
||||
# reached, older entries will be deleted.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_journal_size_limit
|
||||
# for more details about the available options.
|
||||
# conninfo Info for connecting to Postgres. Format is
|
||||
# postgres://[username]:[password]@[ip]/[database].
|
||||
# The database and user must already exist. If this
|
||||
# section is missing and xahaud is running in
|
||||
# Reporting Mode, xahaud will connect as the
|
||||
# user running xahaud to a database with the
|
||||
# same name. On Linux and Mac OS X, the connection
|
||||
# will take place using the server's UNIX domain
|
||||
# socket. On Windows, through the localhost IP
|
||||
# address. Default is empty.
|
||||
#
|
||||
# use_tx_tables Valid values: 1, 0
|
||||
# The default is 1 (true). Determines whether to use
|
||||
# the SQLite transaction database. If set to 0,
|
||||
# xahaud will not write to the transaction database,
|
||||
# and will reject tx, account_tx and tx_history RPCs.
|
||||
# In Reporting Mode, this setting is ignored.
|
||||
#
|
||||
# max_connections Valid values: any positive integer up to 64 bit
|
||||
# storage length. This configures the maximum
|
||||
# number of concurrent connections to postgres.
|
||||
# Default is the maximum possible value to
|
||||
# fit in a 64 bit integer.
|
||||
#
|
||||
# timeout Number of seconds after which idle postgres
|
||||
# connections are discconnected. If set to 0,
|
||||
# connections never timeout. Default is 600.
|
||||
#
|
||||
#
|
||||
# remember_ip Value values: 1, 0
|
||||
# Default is 1 (true). Whether to cache host and
|
||||
# port connection settings.
|
||||
#
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -1427,12 +1640,6 @@
|
||||
# Admin level API commands over Secure Websockets, when originating
|
||||
# from the same machine (via the loopback adapter at 127.0.0.1).
|
||||
#
|
||||
# "grpc"
|
||||
#
|
||||
# ETL commands for Clio. We recommend setting secure_gateway
|
||||
# in this section to a comma-separated list of the addresses
|
||||
# of your Clio servers, in order to bypass xahaud's rate limiting.
|
||||
#
|
||||
# This port is commented out but can be enabled by removing
|
||||
# the '#' from each corresponding line including the entry under [server]
|
||||
#
|
||||
@@ -1477,7 +1684,6 @@ port = 6009
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = ws
|
||||
send_queue_limit = 500
|
||||
|
||||
[port_grpc]
|
||||
port = 50051
|
||||
@@ -1488,7 +1694,6 @@ secure_gateway = 127.0.0.1
|
||||
#port = 6008
|
||||
#ip = 127.0.0.1
|
||||
#protocol = wss
|
||||
#send_queue_limit = 500
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
@@ -1505,22 +1710,45 @@ secure_gateway = 127.0.0.1
|
||||
# when the node has approximately two times the "online_delete" value of
|
||||
# ledgers. No external administrative command is required to initiate
|
||||
# deletion.
|
||||
[ledger_history]
|
||||
256
|
||||
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/opt/xahaud/db/nudb
|
||||
online_delete=512
|
||||
online_delete=256
|
||||
advisory_delete=0
|
||||
|
||||
[database_path]
|
||||
/opt/xahaud/db
|
||||
|
||||
|
||||
# To use Postgres, uncomment this section and fill in the appropriate connection
|
||||
# info. Postgres can only be used in Reporting Mode.
|
||||
# To disable writing to the transaction database, uncomment this section, and
|
||||
# set use_tx_tables=0
|
||||
# [ledger_tx_tables]
|
||||
# conninfo = postgres://[username]:[password]@[ip]/[database]
|
||||
# use_tx_tables=1
|
||||
|
||||
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
/var/log/xahaud/debug.log
|
||||
|
||||
# To use the Xahau test network
|
||||
[sntp_servers]
|
||||
time.windows.com
|
||||
time.apple.com
|
||||
time.nist.gov
|
||||
pool.ntp.org
|
||||
|
||||
# Use the following [ips] section for the main network:
|
||||
[ips]
|
||||
bacab.alloy.ee 21337
|
||||
hubs.xahau.as16089.net 21337
|
||||
|
||||
# To use the Xahau Test Network
|
||||
# (see https://xahau.network/docs/infrastructure/installing-xahaud),
|
||||
# use the following [ips] section:
|
||||
# [ips]
|
||||
@@ -1546,3 +1774,22 @@ validators-xahau.txt
|
||||
# set to ssl_verify to 0.
|
||||
[ssl_verify]
|
||||
1
|
||||
|
||||
# Define which network xahaud is connecting to
|
||||
# 21337 for the Main Xahau Network
|
||||
# 21338 for the Test Xahau Network
|
||||
[network_id]
|
||||
21337
|
||||
# 21338
|
||||
|
||||
|
||||
# To run in Reporting Mode, uncomment this section and fill in the appropriate
|
||||
# connection info for one or more ETL sources.
|
||||
# [reporting]
|
||||
# etl_source
|
||||
#
|
||||
#
|
||||
# [etl_source]
|
||||
# source_grpc_port=50051
|
||||
# source_ws_port=6005
|
||||
# source_ip=127.0.0.1
|
||||
|
||||
4
docs/build/environment.md
vendored
4
docs/build/environment.md
vendored
@@ -11,11 +11,11 @@ platforms: Linux, macOS, or Windows.
|
||||
Package ecosystems vary across Linux distributions,
|
||||
so there is no one set of instructions that will work for every Linux user.
|
||||
These instructions are written for Ubuntu 22.04.
|
||||
They are largely copied from the [script][1] used to configure our Docker
|
||||
They are largely copied from the [script][1] used to configure a Docker
|
||||
container for continuous integration.
|
||||
That script handles many more responsibilities.
|
||||
These instructions are just the bare minimum to build one configuration of
|
||||
rippled.
|
||||
xahaud.
|
||||
You can check that codebase for other Linux distributions and versions.
|
||||
If you cannot find yours there,
|
||||
then we hope that these instructions can at least guide you in the right
|
||||
|
||||
177
docs/build/install.md
vendored
177
docs/build/install.md
vendored
@@ -1,159 +1,30 @@
|
||||
This document contains instructions for installing rippled.
|
||||
The APT package manager is common on Debian-based Linux distributions like
|
||||
Ubuntu,
|
||||
while the YUM package manager is common on Red Hat-based Linux distributions
|
||||
like CentOS.
|
||||
Installing from source is an option for all platforms,
|
||||
and the only supported option for installing custom builds.
|
||||
Comprehensive instructions for installing and running xahaud are available on the [https://Xahau.Network](https://xahau.network/docs/infrastructure/installing-xahaud) documentation website.
|
||||
|
||||
## Create the Runtime Environment
|
||||
xahaud can be [built from source](../../BUILD.md) or installed using the binary files available from [https://build.xahau.tech](https://build.xahau.tech/). After obtaining a working xahaud binary, users will need to provide a suitable runtime environment. The following setup can be used for Linux or Docker environments.
|
||||
|
||||
## From source
|
||||
|
||||
From a source build, you can install rippled and libxrpl using CMake's
|
||||
`--install` mode:
|
||||
1. Create or download two configuration files: the main xahaud.cfg configuration file and a second validators-xahau.txt file defining which validators or UNL list publishers are trusted. The default location for these files in this xahaud repository is `cfg/`.
|
||||
2. Provide a directory structure that is congruent with the contents of xahaud.cfg. This will include a location for logfiles, such as `/var/log/xahaud/`, as well as database files, `/opt/xahaud/db/`. Configuration files are, by default, sourced from `/etc/xahaud/`. It is possible to provide a symbolic link, if users wish to store configuration files elsewhere.
|
||||
3. If desired, created a xahaud user and group, and change ownership of the binary and directories. Servers used for validating nodes should use the most restrictive permissions possible for `xahaud.cfg`, as the validation token is stored therein.
|
||||
4. If desired, create a systemd service file: `/etc/systemd/system/xahaud.service`, enabling xahaud to run as a daemon. Alternately, run: `/path/to/binary/xahaud --conf=/path/to/xahaud.cfg`.
|
||||
|
||||
## Example systemd Service File
|
||||
```
|
||||
cmake --install . --prefix /opt/local
|
||||
[Unit]
|
||||
Description=Xahaud Daemon
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/path/to/xahaud --silent --conf /path/to/xahaud.cfg
|
||||
Restart=on-failure
|
||||
User=xahaud
|
||||
Group=xahaud
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
The default [prefix][1] is typically `/usr/local` on Linux and macOS and
|
||||
`C:/Program Files/rippled` on Windows.
|
||||
|
||||
[1]: https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html
|
||||
|
||||
|
||||
## With the APT package manager
|
||||
|
||||
1. Update repositories:
|
||||
|
||||
sudo apt update -y
|
||||
|
||||
2. Install utilities:
|
||||
|
||||
sudo apt install -y apt-transport-https ca-certificates wget gnupg
|
||||
|
||||
3. Add Ripple's package-signing GPG key to your list of trusted keys:
|
||||
|
||||
sudo mkdir /usr/local/share/keyrings/
|
||||
wget -q -O - "https://repos.ripple.com/repos/api/gpg/key/public" | gpg --dearmor > ripple-key.gpg
|
||||
sudo mv ripple-key.gpg /usr/local/share/keyrings
|
||||
|
||||
|
||||
4. Check the fingerprint of the newly-added key:
|
||||
|
||||
gpg /usr/local/share/keyrings/ripple-key.gpg
|
||||
|
||||
The output should include an entry for Ripple such as the following:
|
||||
|
||||
gpg: WARNING: no command supplied. Trying to guess what you mean ...
|
||||
pub rsa3072 2019-02-14 [SC] [expires: 2026-02-17]
|
||||
C0010EC205B35A3310DC90DE395F97FFCCAFD9A2
|
||||
uid TechOps Team at Ripple <techops+rippled@ripple.com>
|
||||
sub rsa3072 2019-02-14 [E] [expires: 2026-02-17]
|
||||
|
||||
|
||||
In particular, make sure that the fingerprint matches. (In the above example, the fingerprint is on the third line, starting with `C001`.)
|
||||
|
||||
4. Add the appropriate Ripple repository for your operating system version:
|
||||
|
||||
echo "deb [signed-by=/usr/local/share/keyrings/ripple-key.gpg] https://repos.ripple.com/repos/rippled-deb focal stable" | \
|
||||
sudo tee -a /etc/apt/sources.list.d/ripple.list
|
||||
|
||||
The above example is appropriate for **Ubuntu 20.04 Focal Fossa**. For other operating systems, replace the word `focal` with one of the following:
|
||||
|
||||
- `jammy` for **Ubuntu 22.04 Jammy Jellyfish**
|
||||
- `bionic` for **Ubuntu 18.04 Bionic Beaver**
|
||||
- `bullseye` for **Debian 11 Bullseye**
|
||||
- `buster` for **Debian 10 Buster**
|
||||
|
||||
If you want access to development or pre-release versions of `rippled`, use one of the following instead of `stable`:
|
||||
|
||||
- `unstable` - Pre-release builds ([`release` branch](https://github.com/ripple/rippled/tree/release))
|
||||
- `nightly` - Experimental/development builds ([`develop` branch](https://github.com/ripple/rippled/tree/develop))
|
||||
|
||||
**Warning:** Unstable and nightly builds may be broken at any time. Do not use these builds for production servers.
|
||||
|
||||
5. Fetch the Ripple repository.
|
||||
|
||||
sudo apt -y update
|
||||
|
||||
6. Install the `rippled` software package:
|
||||
|
||||
sudo apt -y install rippled
|
||||
|
||||
7. Check the status of the `rippled` service:
|
||||
|
||||
systemctl status rippled.service
|
||||
|
||||
The `rippled` service should start automatically. If not, you can start it manually:
|
||||
|
||||
sudo systemctl start rippled.service
|
||||
|
||||
8. Optional: allow `rippled` to bind to privileged ports.
|
||||
|
||||
This allows you to serve incoming API requests on port 80 or 443. (If you want to do so, you must also update the config file's port settings.)
|
||||
|
||||
sudo setcap 'cap_net_bind_service=+ep' /opt/ripple/bin/rippled
|
||||
|
||||
|
||||
## With the YUM package manager
|
||||
|
||||
1. Install the Ripple RPM repository:
|
||||
|
||||
Choose the appropriate RPM repository for the stability of releases you want:
|
||||
|
||||
- `stable` for the latest production release (`master` branch)
|
||||
- `unstable` for pre-release builds (`release` branch)
|
||||
- `nightly` for experimental/development builds (`develop` branch)
|
||||
|
||||
*Stable*
|
||||
|
||||
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
||||
[ripple-stable]
|
||||
name=XRP Ledger Packages
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
repo_gpgcheck=1
|
||||
baseurl=https://repos.ripple.com/repos/rippled-rpm/stable/
|
||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/stable/repodata/repomd.xml.key
|
||||
REPOFILE
|
||||
|
||||
*Unstable*
|
||||
|
||||
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
||||
[ripple-unstable]
|
||||
name=XRP Ledger Packages
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
repo_gpgcheck=1
|
||||
baseurl=https://repos.ripple.com/repos/rippled-rpm/unstable/
|
||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/unstable/repodata/repomd.xml.key
|
||||
REPOFILE
|
||||
|
||||
*Nightly*
|
||||
|
||||
cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo
|
||||
[ripple-nightly]
|
||||
name=XRP Ledger Packages
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
repo_gpgcheck=1
|
||||
baseurl=https://repos.ripple.com/repos/rippled-rpm/nightly/
|
||||
gpgkey=https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key
|
||||
REPOFILE
|
||||
|
||||
2. Fetch the latest repo updates:
|
||||
|
||||
sudo yum -y update
|
||||
|
||||
3. Install the new `rippled` package:
|
||||
|
||||
sudo yum install -y rippled
|
||||
|
||||
4. Configure the `rippled` service to start on boot:
|
||||
|
||||
sudo systemctl enable rippled.service
|
||||
|
||||
5. Start the `rippled` service:
|
||||
|
||||
sudo systemctl start rippled.service
|
||||
After the systemd service file is installed, it must be loaded with: `systemctl daemon-reload`. xahaud can then be enabled: `systemctl enable --now xahaud`.
|
||||
|
||||
@@ -139,7 +139,9 @@ struct XahauGenesis_test : public beast::unit_test::suite
|
||||
false, // means the calling test already burned some of the genesis
|
||||
bool skipTests = false,
|
||||
bool const testFlag = false,
|
||||
bool const badNetID = false)
|
||||
bool const badNetID = false,
|
||||
uint32_t const expectedOwnerCount =
|
||||
10 /** testFlag ? 10 : 14 (default) */)
|
||||
{
|
||||
using namespace jtx;
|
||||
|
||||
@@ -247,7 +249,10 @@ struct XahauGenesis_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(
|
||||
genesisAccRoot->getFieldAmount(sfBalance) ==
|
||||
XahauGenesis::GenesisAmount);
|
||||
BEAST_EXPECT(genesisAccRoot->getFieldU32(sfOwnerCount) == 2);
|
||||
BEAST_EXPECT(
|
||||
genesisAccRoot->getFieldU32(sfOwnerCount) == !testFlag
|
||||
? expectedOwnerCount
|
||||
: 14);
|
||||
|
||||
// ensure the definitions are correctly set
|
||||
{
|
||||
@@ -583,7 +588,14 @@ struct XahauGenesis_test : public beast::unit_test::suite
|
||||
toBase58(t), membersStr);
|
||||
}
|
||||
|
||||
activate(__LINE__, env, true, false, true);
|
||||
activate(
|
||||
__LINE__,
|
||||
env,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
{},
|
||||
3 /* IRR,IRD,IMC */ + members.size() + tables.size());
|
||||
|
||||
env.close();
|
||||
env.close();
|
||||
@@ -2235,6 +2247,8 @@ struct XahauGenesis_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(!!hookLE);
|
||||
uint256 const ns = beast::zero;
|
||||
uint8_t mc = 0;
|
||||
uint8_t paramsCount = 0;
|
||||
|
||||
if (hookLE)
|
||||
{
|
||||
auto const hooksArray = hookLE->getFieldArray(sfHooks);
|
||||
@@ -2242,6 +2256,9 @@ struct XahauGenesis_test : public beast::unit_test::suite
|
||||
hooksArray.size() == 1 &&
|
||||
hooksArray[0].getFieldH256(sfHookHash) == governHookHash);
|
||||
|
||||
paramsCount =
|
||||
hooksArray[0].getFieldArray(sfHookParameters).size();
|
||||
|
||||
for (Account const* m : members)
|
||||
{
|
||||
auto const mVec = vecFromAcc(*m);
|
||||
@@ -2308,7 +2325,9 @@ struct XahauGenesis_test : public beast::unit_test::suite
|
||||
BEAST_EXPECT(!!root);
|
||||
if (root)
|
||||
{
|
||||
BEAST_EXPECT(root->getFieldU32(sfOwnerCount) == mc * 2 + 2);
|
||||
BEAST_EXPECT(
|
||||
root->getFieldU32(sfOwnerCount) ==
|
||||
mc * 2 + 2 + paramsCount);
|
||||
BEAST_EXPECT(root->getFieldU32(sfFlags) & lsfDisableMaster);
|
||||
BEAST_EXPECT(root->getAccountID(sfRegularKey) == noAccount());
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <xrpld/app/misc/AmendmentTable.h>
|
||||
#include <xrpld/app/misc/NetworkOPs.h>
|
||||
#include <xrpld/app/tx/detail/Change.h>
|
||||
#include <xrpld/app/tx/detail/SetHook.h>
|
||||
#include <xrpld/app/tx/detail/SetSignerList.h>
|
||||
#include <xrpld/app/tx/detail/XahauGenesis.h>
|
||||
#include <xrpld/ledger/Sandbox.h>
|
||||
@@ -585,10 +586,6 @@ Change::activateXahauGenesis()
|
||||
SetSignerList::removeFromLedger(ctx_.app, sb, accid, j_);
|
||||
|
||||
// Step 4: install genesis hooks
|
||||
sle->setFieldU32(
|
||||
sfOwnerCount, sle->getFieldU32(sfOwnerCount) + genesis_hooks.size());
|
||||
sb.update(sle);
|
||||
|
||||
if (sb.exists(keylet::hook(accid)))
|
||||
{
|
||||
JLOG(j_.warn()) << "featureXahauGenesis genesis account already has "
|
||||
@@ -600,6 +597,7 @@ Change::activateXahauGenesis()
|
||||
ripple::STArray hooks{
|
||||
sfHooks, static_cast<std::size_t>(genesis_hooks.size())};
|
||||
int hookCount = 0;
|
||||
uint32_t hookReserve = 0;
|
||||
|
||||
for (auto const& [hookOn, wasmBytes, params] : genesis_hooks)
|
||||
{
|
||||
@@ -705,8 +703,14 @@ Change::activateXahauGenesis()
|
||||
}
|
||||
|
||||
hooks.push_back(hookObj);
|
||||
|
||||
hookReserve += SetHook::computeHookReserve(hookObj);
|
||||
}
|
||||
|
||||
sle->setFieldU32(
|
||||
sfOwnerCount, sle->getFieldU32(sfOwnerCount) + hookReserve);
|
||||
sb.update(sle);
|
||||
|
||||
auto sle = std::make_shared<SLE>(keylet::hook(accid));
|
||||
sle->setFieldArray(sfHooks, hooks);
|
||||
sle->setAccountID(sfAccount, accid);
|
||||
@@ -747,6 +751,8 @@ Change::activateXahauGenesis()
|
||||
ripple::STArray hooks{sfHooks, 1};
|
||||
STObject hookObj{sfHook};
|
||||
hookObj.setFieldH256(sfHookHash, governHash);
|
||||
|
||||
uint32_t hookReserve = 0;
|
||||
// parameters
|
||||
{
|
||||
std::vector<STObject> vec;
|
||||
@@ -762,6 +768,7 @@ Change::activateXahauGenesis()
|
||||
sfHookParameters, STArray(vec, sfHookParameters));
|
||||
}
|
||||
|
||||
hookReserve += SetHook::computeHookReserve(hookObj);
|
||||
hooks.push_back(hookObj);
|
||||
|
||||
auto sle = std::make_shared<SLE>(hookKL);
|
||||
@@ -788,7 +795,8 @@ Change::activateXahauGenesis()
|
||||
|
||||
sle->setAccountID(sfRegularKey, noAccount());
|
||||
sle->setFieldU32(sfFlags, lsfDisableMaster);
|
||||
sle->setFieldU32(sfOwnerCount, sle->getFieldU32(sfOwnerCount) + 1);
|
||||
sle->setFieldU32(
|
||||
sfOwnerCount, sle->getFieldU32(sfOwnerCount) + hookReserve);
|
||||
sb.update(sle);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1222,6 +1222,29 @@ updateHookParameters(
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the reserve required for a hook object.
|
||||
* @param hookObj The hook object to compute the reserve for.(not Transaction
|
||||
* field, use the Hook object inside the ltHook object.)
|
||||
* @return The reserve required for the hook object.
|
||||
*/
|
||||
uint32_t
|
||||
SetHook::computeHookReserve(STObject const& hookObj)
|
||||
{
|
||||
if (!hookObj.isFieldPresent(sfHookHash))
|
||||
return 0;
|
||||
|
||||
int reserve{1};
|
||||
|
||||
if (hookObj.isFieldPresent(sfHookParameters))
|
||||
reserve += hookObj.getFieldArray(sfHookParameters).size();
|
||||
|
||||
if (hookObj.isFieldPresent(sfHookGrants))
|
||||
reserve += hookObj.getFieldArray(sfHookGrants).size();
|
||||
|
||||
return reserve;
|
||||
};
|
||||
|
||||
struct KeyletComparator
|
||||
{
|
||||
bool
|
||||
@@ -1987,28 +2010,14 @@ SetHook::setHook()
|
||||
int oldHookReserve = 0;
|
||||
int newHookReserve = 0;
|
||||
|
||||
auto const computeHookReserve = [](STObject const& hookObj) -> int {
|
||||
if (!hookObj.isFieldPresent(sfHookHash))
|
||||
return 0;
|
||||
|
||||
int reserve{1};
|
||||
|
||||
if (hookObj.isFieldPresent(sfHookParameters))
|
||||
reserve += hookObj.getFieldArray(sfHookParameters).size();
|
||||
|
||||
if (hookObj.isFieldPresent(sfHookGrants))
|
||||
reserve += hookObj.getFieldArray(sfHookGrants).size();
|
||||
|
||||
return reserve;
|
||||
};
|
||||
|
||||
for (int i = 0; i < hook::maxHookChainLength(); ++i)
|
||||
{
|
||||
if (oldHooks && i < oldHookCount)
|
||||
oldHookReserve += computeHookReserve(((*oldHooks).get())[i]);
|
||||
oldHookReserve +=
|
||||
SetHook::computeHookReserve(((*oldHooks).get())[i]);
|
||||
|
||||
if (i < newHooks.size())
|
||||
newHookReserve += computeHookReserve(newHooks[i]);
|
||||
newHookReserve += SetHook::computeHookReserve(newHooks[i]);
|
||||
}
|
||||
|
||||
reserveDelta = newHookReserve - oldHookReserve;
|
||||
|
||||
@@ -91,6 +91,9 @@ public:
|
||||
static HookSetValidation
|
||||
validateHookSetEntry(SetHookCtx& ctx, STObject const& hookSetObj);
|
||||
|
||||
static uint32_t
|
||||
computeHookReserve(STObject const& hookObj);
|
||||
|
||||
private:
|
||||
TER
|
||||
setHook();
|
||||
|
||||
Reference in New Issue
Block a user