Compare commits

..

2 Commits

Author SHA1 Message Date
Valentin Balaschenko
c40c144bd4 thread name 2025-08-05 22:20:07 +01:00
Valentin Balaschenko
e6bb8831c6 tagging thread in base 2025-08-04 16:38:56 +01:00
40 changed files with 1073 additions and 1124 deletions

View File

@@ -2,37 +2,49 @@ name: dependencies
inputs:
configuration:
required: true
# Implicit inputs are the environment variables `build_dir`, CONAN_REMOTE_URL,
# CONAN_REMOTE_USERNAME, and CONAN_REMOTE_PASSWORD. The latter two are only
# used to upload newly built dependencies to the Conan remote.
# An implicit input is the environment variable `build_dir`.
runs:
using: composite
steps:
- name: add Conan remote
if: ${{ env.CONAN_REMOTE_URL != '' }}
- name: export custom recipes
shell: bash
run: |
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add --index 0 --force xrplf ${{ env.CONAN_REMOTE_URL }}
echo "Listing Conan remotes."
conan remote list
conan export --version 1.1.10 external/snappy
conan export --version 4.0.3 external/soci
- name: add Ripple Conan remote
if: env.CONAN_URL != ''
shell: bash
run: |
if conan remote list | grep -q "ripple"; then
conan remote remove ripple
echo "Removed conan remote ripple"
fi
conan remote add --index 0 ripple "${CONAN_URL}"
echo "Added conan remote ripple at ${CONAN_URL}"
- name: try to authenticate to Ripple Conan remote
if: env.CONAN_LOGIN_USERNAME_RIPPLE != '' && env.CONAN_PASSWORD_RIPPLE != ''
id: remote
shell: bash
run: |
echo "Authenticating to ripple remote..."
conan remote auth ripple --force
conan remote list-users
- name: list missing binaries
id: binaries
shell: bash
# Print the list of dependencies that would need to be built locally.
# A non-empty list means we have "failed" to cache binaries remotely.
run: |
echo missing=$(conan info . --build missing --settings build_type=${{ inputs.configuration }} --json 2>/dev/null | grep '^\[') | tee ${GITHUB_OUTPUT}
- name: install dependencies
shell: bash
run: |
mkdir -p ${{ env.build_dir }}
cd ${{ env.build_dir }}
mkdir ${build_dir}
cd ${build_dir}
conan install \
--output-folder . \
--build missing \
--build '*' \
--options:host "&:tests=True" \
--options:host "&:xrpld=True" \
--settings:all build_type=${{ inputs.configuration }} \
..
- name: upload dependencies
if: ${{ env.CONAN_REMOTE_URL != '' && env.CONAN_REMOTE_USERNAME != '' && env.CONAN_REMOTE_PASSWORD != '' && github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
shell: bash
run: |
echo "Logging into Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote login xrplf "${{ env.CONAN_REMOTE_USERNAME }}" --password "${{ env.CONAN_REMOTE_PASSWORD }}"
echo "Uploading dependencies."
conan upload '*' --confirm --check --remote xrplf

View File

@@ -1,8 +1,8 @@
name: Check libXRPL compatibility with Clio
env:
CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }}
CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }}
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
on:
pull_request:
paths:
@@ -43,20 +43,20 @@ jobs:
shell: bash
run: |
conan export . ${{ steps.channel.outputs.channel }}
- name: Add Conan remote
- name: Add Ripple Conan remote
shell: bash
run: |
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force
echo "Listing Conan remotes."
conan remote list
conan remote remove ripple || true
# Do not quote the URL. An empty string will be accepted (with a non-fatal warning), but a missing argument will not.
conan remote add ripple ${{ env.CONAN_URL }} --insert 0
- name: Parse new version
id: version
shell: bash
run: |
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
- name: Try to authenticate to Conan remote
- name: Try to authenticate to Ripple Conan remote
id: remote
shell: bash
run: |
@@ -64,7 +64,7 @@ jobs:
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
echo outcome=$(conan user --remote xrplf --password >&2 \
echo outcome=$(conan user --remote ripple --password >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
- name: Upload new package
id: upload

View File

@@ -18,12 +18,9 @@ concurrency:
# This part of Conan configuration is specific to this workflow only; we do not want
# to pollute conan/profiles directory with settings which might not work for others
env:
CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }}
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
# This part of the Conan configuration is specific to this workflow only; we
# do not want to pollute the 'conan/profiles' directory with settings that
# might not work for other workflows.
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
CONAN_GLOBAL_CONF: |
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
@@ -90,9 +87,24 @@ jobs:
clang --version
- name: configure Conan
run : |
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf
conan config install conan/profiles/ -tf $(conan config home)/profiles/
conan profile show
- name: export custom recipes
shell: bash
run: |
conan export --version 1.1.10 external/snappy
conan export --version 4.0.3 external/soci
- name: add Ripple Conan remote
if: env.CONAN_URL != ''
shell: bash
run: |
if conan remote list | grep -q "ripple"; then
conan remote remove ripple
echo "Removed conan remote ripple"
fi
conan remote add --index 0 ripple "${CONAN_URL}"
echo "Added conan remote ripple at ${CONAN_URL}"
- name: build dependencies
uses: ./.github/actions/dependencies
with:

View File

@@ -16,13 +16,12 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# This part of Conan configuration is specific to this workflow only; we do not want
# to pollute conan/profiles directory with settings which might not work for others
env:
CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }}
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
# This part of the Conan configuration is specific to this workflow only; we
# do not want to pollute the 'conan/profiles' directory with settings that
# might not work for other workflows.
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
CONAN_GLOBAL_CONF: |
core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }}

View File

@@ -18,13 +18,12 @@ on:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# This part of Conan configuration is specific to this workflow only; we do not want
# to pollute conan/profiles directory with settings which might not work for others
env:
CONAN_REMOTE_URL: ${{ vars.CONAN_REMOTE_URL }}
CONAN_REMOTE_USERNAME: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_REMOTE_PASSWORD: ${{ secrets.CONAN_REMOTE_PASSWORD }}
# This part of the Conan configuration is specific to this workflow only; we
# do not want to pollute the 'conan/profiles' directory with settings that
# might not work for other workflows.
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/dev
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
CONAN_GLOBAL_CONF: |
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
@@ -83,9 +82,24 @@ jobs:
- name: configure Conan
shell: bash
run: |
echo "${CONAN_GLOBAL_CONF}" > $(conan config home)/global.conf
echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf
conan config install conan/profiles/ -tf $(conan config home)/profiles/
conan profile show
- name: export custom recipes
shell: bash
run: |
conan export --version 1.1.10 external/snappy
conan export --version 4.0.3 external/soci
- name: add Ripple Conan remote
if: env.CONAN_URL != ''
shell: bash
run: |
if conan remote list | grep -q "ripple"; then
conan remote remove ripple
echo "Removed conan remote ripple"
fi
conan remote add --index 0 ripple "${CONAN_URL}"
echo "Added conan remote ripple at ${CONAN_URL}"
- name: build dependencies
uses: ./.github/actions/dependencies
with:

407
BUILD.md
View File

@@ -3,29 +3,29 @@
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
> These instructions also assume a basic familiarity with Conan and CMake.
> If you are unfamiliar with Conan, you can read our
> [crash course](./docs/build/conan.md) or the official [Getting Started][3]
> walkthrough.
> If you are unfamiliar with Conan,
> you can read our [crash course](./docs/build/conan.md)
> or the official [Getting Started][3] walkthrough.
## Branches
For a stable release, choose the `master` branch or one of the [tagged
releases](https://github.com/ripple/rippled/releases).
```bash
```
git checkout master
```
For the latest release candidate, choose the `release` branch.
```bash
```
git checkout release
```
For the latest set of untested features, or to contribute, choose the `develop`
branch.
```bash
```
git checkout develop
```
@@ -33,295 +33,151 @@ git checkout develop
See [System Requirements](https://xrpl.org/system-requirements.html).
Building rippled generally requires git, Python, Conan, CMake, and a C++
compiler. Some guidance on setting up such a [C++ development environment can be
found here](./docs/build/environment.md).
Building rippled generally requires git, Python, Conan, CMake, and a C++ compiler. Some guidance on setting up such a [C++ development environment can be found here](./docs/build/environment.md).
- [Python 3.11](https://www.python.org/downloads/), or higher
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
- [CMake 3.22](https://cmake.org/download/)[^2], or higher
- [Python 3.7](https://www.python.org/downloads/)
- [Conan 1.60](https://conan.io/downloads.html)[^1]
- [CMake 3.16](https://cmake.org/download/)
[^1]: It is possible to build with Conan 1.60+, but the instructions are
significantly different, which is why we are not recommending it.
[^2]: CMake 4 is not yet supported by all dependencies required by this project.
If you are affected by this issue, follow [conan workaround for cmake
4](#workaround-for-cmake-4)
[^1]: It is possible to build with Conan 2.x,
but the instructions are significantly different,
which is why we are not recommending it yet.
Notably, the `conan profile update` command is removed in 2.x.
Profiles must be edited by hand.
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
The [minimum compiler versions][2] required are:
| Compiler | Version |
|-------------|-----|
| GCC | 12 |
| Clang | 16 |
| Apple Clang | 16 |
| MSVC | 19.44[^3] |
|-------------|---------|
| GCC | 11 |
| Clang | 13 |
| Apple Clang | 13.1.6 |
| MSVC | 19.23 |
### Linux
The Ubuntu Linux distribution has received the highest level of quality
assurance, testing, and support. We also support Red Hat and use Debian
internally.
The Ubuntu operating system has received the highest level of
quality assurance, testing, and support.
Here are [sample instructions for setting up a C++ development environment on
Linux](./docs/build/environment.md#linux).
Here are [sample instructions for setting up a C++ development environment on Linux](./docs/build/environment.md#linux).
### Mac
Many rippled engineers use macOS for development.
Here are [sample instructions for setting up a C++ development environment on
macOS](./docs/build/environment.md#macos).
Here are [sample instructions for setting up a C++ development environment on macOS](./docs/build/environment.md#macos).
### Windows
Windows is used by some engineers for development only.
Windows is not recommended for production use at this time.
[^3]: Windows is not recommended for production use.
- Additionally, 32-bit Windows development is not supported.
[Boost]: https://www.boost.org/
## Steps
### Set Up Conan
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python,
Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python, Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
These instructions assume a basic familiarity with Conan and CMake. If you are
unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official
[Getting Started][3] walkthrough.
These instructions assume a basic familiarity with Conan and CMake.
#### Default profile
We recommend that you import the provided `conan/profiles/default` profile:
If you are unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official [Getting Started][3] walkthrough.
```bash
conan config install conan/profiles/ -tf $(conan config home)/profiles/
```
You'll need at least one Conan profile:
You can check your Conan profile by running:
```bash
conan profile show
```
#### Custom profile
If the default profile does not work for you and you do not yet have a Conan
profile, you can create one by running:
```bash
conan profile detect
```
You may need to make changes to the profile to suit your environment. You can
refer to the provided `conan/profiles/default` profile for inspiration, and you
may also need to apply the required [tweaks](#conan-profile-tweaks) to this
default profile.
### Patched recipes
The recipes in Conan Center occasionally need to be patched for compatibility
with the latest version of `rippled`. We maintain a fork of the Conan Center
[here](https://github.com/XRPLF/conan-center-index/) containing the patches.
To ensure our patched recipes are used, you must add our Conan remote at a
higher index than the default Conan Center remote, so it is consulted first. You
can do this by running:
```bash
conan remote add --index 0 xrplf "https://conan.ripplex.io"
```
Alternatively, you can pull the patched recipes into the repository and use them
locally:
```bash
cd external
git init
git remote add origin git@github.com:XRPLF/conan-center-index.git
git sparse-checkout init
git sparse-checkout set recipes/snappy
git sparse-checkout add recipes/soci
git fetch origin master
git checkout master
conan export --version 1.1.10 external/recipes/snappy
conan export --version 4.0.3 external/recipes/soci
```
In the case we switch to a newer version of a dependency that still requires a
patch, it will be necessary for you to pull in the changes and re-export the
updated dependencies with the newer version. However, if we switch to a newer
version that no longer requires a patch, no action is required on your part, as
the new recipe will be automatically pulled from the official Conan Center.
### Conan profile tweaks
#### Missing compiler version
If you see an error similar to the following after running `conan profile show`:
```bash
ERROR: Invalid setting '17' is not a valid 'settings.compiler.version' value.
Possible values are ['5.0', '5.1', '6.0', '6.1', '7.0', '7.3', '8.0', '8.1',
'9.0', '9.1', '10.0', '11.0', '12.0', '13', '13.0', '13.1', '14', '14.0', '15',
'15.0', '16', '16.0']
Read "http://docs.conan.io/2/knowledge/faq.html#error-invalid-setting"
```
you need to amend the list of compiler versions in
`$(conan config home)/settings.yml`, by appending the required version number(s)
to the `version` array specific for your compiler. For example:
```yaml
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0",
"9.1", "10.0", "11.0", "12.0", "13", "13.0", "13.1", "14",
"14.0", "15", "15.0", "16", "16.0", "17", "17.0"]
```
#### Multiple compilers
If you have multiple compilers installed, make sure to select the one to use in
your default Conan configuration **before** running `conan profile detect`, by
setting the `CC` and `CXX` environment variables.
For example, if you are running MacOS and have [homebrew
LLVM@18](https://formulae.brew.sh/formula/llvm@18), and want to use it as a
compiler in the new Conan profile:
```bash
export CC=$(brew --prefix llvm@18)/bin/clang
export CXX=$(brew --prefix llvm@18)/bin/clang++
conan profile detect
```
conan profile new default --detect
```
You should also explicitly set the path to the compiler in the profile file,
which helps to avoid errors when `CC` and/or `CXX` are set and disagree with the
selected Conan profile. For example:
Update the compiler settings:
```text
[conf]
tools.build:compiler_executables={'c':'/usr/bin/gcc','cpp':'/usr/bin/g++'}
```
conan profile update settings.compiler.cppstd=20 default
```
Configure Conan (1.x only) to use recipe revisions:
```
conan config set general.revisions_enabled=1
```
**Linux** developers will commonly have a default Conan [profile][] that compiles
with GCC and links with libstdc++.
If you are linking with libstdc++ (see profile setting `compiler.libcxx`),
then you will need to choose the `libstdc++11` ABI:
```
conan profile update settings.compiler.libcxx=libstdc++11 default
```
Ensure inter-operability between `boost::string_view` and `std::string_view` types:
```
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_BEAST_USE_STD_STRING_VIEW"]' default
conan profile update 'env.CXXFLAGS="-DBOOST_BEAST_USE_STD_STRING_VIEW"' default
```
#### Multiple profiles
You can manage multiple Conan profiles in the directory
`$(conan config home)/profiles`, for example renaming `default` to a different
name and then creating a new `default` profile for a different compiler.
#### Select language
The default profile created by Conan will typically select different C++ dialect
than C++20 used by this project. You should set `20` in the profile line
starting with `compiler.cppstd=`. For example:
```bash
sed -i.bak -e 's|^compiler\.cppstd=.*$|compiler.cppstd=20|' $(conan config home)/profiles/default
If you have other flags in the `conf.tools.build` or `env.CXXFLAGS` sections, make sure to retain the existing flags and append the new ones. You can check them with:
```
conan profile show default
```
#### Select standard library in Linux
**Linux** developers will commonly have a default Conan [profile][] that
compiles with GCC and links with libstdc++. If you are linking with libstdc++
(see profile setting `compiler.libcxx`), then you will need to choose the
`libstdc++11` ABI:
**Windows** developers may need to use the x64 native build tools.
An easy way to do that is to run the shortcut "x64 Native Tools Command
Prompt" for the version of Visual Studio that you have installed.
```bash
sed -i.bak -e 's|^compiler\.libcxx=.*$|compiler.libcxx=libstdc++11|' $(conan config home)/profiles/default
Windows developers must also build `rippled` and its dependencies for the x64
architecture:
```
conan profile update settings.arch=x86_64 default
```
### Multiple compilers
When `/usr/bin/g++` exists on a platform, it is the default cpp compiler. This
default works for some users.
However, if this compiler cannot build rippled or its dependencies, then you can
install another compiler and set Conan and CMake to use it.
Update the `conf.tools.build:compiler_executables` setting in order to set the correct variables (`CMAKE_<LANG>_COMPILER`) in the
generated CMake toolchain file.
For example, on Ubuntu 20, you may have gcc at `/usr/bin/gcc` and g++ at `/usr/bin/g++`; if that is the case, you can select those compilers with:
```
conan profile update 'conf.tools.build:compiler_executables={"c": "/usr/bin/gcc", "cpp": "/usr/bin/g++"}' default
```
#### Select architecture and runtime in Windows
Replace `/usr/bin/gcc` and `/usr/bin/g++` with paths to the desired compilers.
**Windows** developers may need to use the x64 native build tools. An easy way
to do that is to run the shortcut "x64 Native Tools Command Prompt" for the
version of Visual Studio that you have installed.
It should choose the compiler for dependencies as well,
but not all of them have a Conan recipe that respects this setting (yet).
For the rest, you can set these environment variables.
Replace `<path>` with paths to the desired compilers:
Windows developers must also build `rippled` and its dependencies for the x64
architecture:
- `conan profile update env.CC=<path> default`
- `conan profile update env.CXX=<path> default`
```bash
sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
```
Export our [Conan recipe for Snappy](./external/snappy).
It does not explicitly link the C++ standard library,
which allows you to statically link it with GCC, if you want.
**Windows** developers also must select static runtime:
```
# Conan 2.x
conan export --version 1.1.10 external/snappy
```
```bash
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
```
Export our [Conan recipe for SOCI](./external/soci).
It patches their CMake to correctly import its dependencies.
#### Workaround for CMake 4
If your system CMake is version 4 rather than 3, you may have to configure Conan
profile to use CMake version 3 for dependencies, by adding the following two
lines to your profile:
```text
[tool_requires]
!cmake/*: cmake/[>=3 <4]
```
This will force Conan to download and use a locally cached CMake 3 version, and
is needed because some of the dependencies used by this project do not support
CMake 4.
#### Clang workaround for grpc
If your compiler is clang, version 19 or later, or apple-clang, version 17 or
later, you may encounter a compilation error while building the `grpc`
dependency:
```text
In file included from .../lib/promise/try_seq.h:26:
.../lib/promise/detail/basic_seq.h:499:38: error: a template argument list is expected after a name prefixed by the template keyword [-Wmissing-template-arg-list-after-template-kw]
499 | Traits::template CallSeqFactory(f_, *cur_, std::move(arg)));
| ^
```
The workaround for this error is to add two lines to profile:
```text
[conf]
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
```
#### Workaround for gcc 12
If your compiler is gcc, version 12, and you have enabled `werr` option, you may
encounter a compilation error such as:
```text
/usr/include/c++/12/bits/char_traits.h:435:56: error: 'void* __builtin_memcpy(void*, const void*, long unsigned int)' accessing 9223372036854775810 or more bytes at offsets [2, 9223372036854775807] and 1 may overlap up to 9223372036854775813 bytes at offset -3 [-Werror=restrict]
435 | return static_cast<char_type*>(__builtin_memcpy(__s1, __s2, __n));
| ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~
cc1plus: all warnings being treated as errors
```
The workaround for this error is to add two lines to your profile:
```text
[conf]
tools.build:cxxflags=['-Wno-restrict']
```
#### Workaround for clang 16
If your compiler is clang, version 16, you may encounter compilation error such
as:
```text
In file included from .../boost/beast/websocket/stream.hpp:2857:
.../boost/beast/websocket/impl/read.hpp:695:17: error: call to 'async_teardown' is ambiguous
async_teardown(impl.role, impl.stream(),
^~~~~~~~~~~~~~
```
The workaround for this error is to add two lines to your profile:
```text
[conf]
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
```
```
# Conan 2.x
conan export --version 4.0.3 external/soci
```
### Build and Test
@@ -389,6 +245,7 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON ..
```
Multi-config generators:
```
@@ -400,13 +257,13 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
5. Build `rippled`.
For a single-configuration generator, it will build whatever configuration
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, you
must pass the option `--config` to select the build configuration.
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator,
you must pass the option `--config` to select the build configuration.
Single-config generators:
```
cmake --build .
cmake --build . -j $(nproc)
```
Multi-config generators:
@@ -421,22 +278,18 @@ tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
Single-config generators:
```
./rippled --unittest --unittest-jobs N
./rippled --unittest
```
Multi-config generators:
```
./Release/rippled --unittest --unittest-jobs N
./Debug/rippled --unittest --unittest-jobs N
./Release/rippled --unittest
./Debug/rippled --unittest
```
Replace the `--unittest-jobs` parameter N with the desired unit tests
concurrency. Recommended setting is half of the number of available CPU
cores.
The location of `rippled` binary in your build directory depends on your
CMake generator. Pass `--help` to see the rest of the command line options.
The location of `rippled` in your build directory depends on your CMake
generator. Pass `--help` to see the rest of the command line options.
## Coverage report
@@ -494,7 +347,7 @@ cmake --build . --target coverage
After the `coverage` target is completed, the generated coverage report will be
stored inside the build directory, as either of:
- file named `coverage.`_extension_, with a suitable extension for the report format, or
- file named `coverage.`_extension_ , with a suitable extension for the report format, or
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
@@ -502,14 +355,12 @@ stored inside the build directory, as either of:
| Option | Default Value | Description |
| --- | ---| ---|
| `assert` | OFF | Enable assertions. |
| `assert` | OFF | Enable assertions.
| `coverage` | OFF | Prepare the coverage report. |
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
| `tests` | OFF | Build tests. |
| `unity` | OFF | Configure a unity build. |
| `unity` | ON | Configure a unity build. |
| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. |
| `werr` | OFF | Treat compilation warnings as errors |
| `wextra` | OFF | Enable additional compilation warnings |
[Unity builds][5] may be faster for the first build
(at the cost of much more memory) since they concatenate sources into fewer
@@ -524,28 +375,12 @@ and can be helpful for detecting `#include` omissions.
After any updates or changes to dependencies, you may need to do the following:
1. Remove your build directory.
2. Remove individual libraries from the Conan cache, e.g.
2. Remove the Conan cache: `conan remove "*" -c`
3. Re-run [conan install](#build-and-test).
```bash
conan remove 'grpc/*'
```
### 'protobuf/port_def.inc' file not found
**or**
Remove all libraries from Conan cache:
```bash
conan remove '*'
```
3. Re-run [conan export](#patched-recipes) if needed.
4. Re-run [conan install](#build-and-test).
### `protobuf/port_def.inc` file not found
If `cmake --build .` results in an error due to a missing a protobuf file, then
you might have generated CMake files for a different `build_type` than the
`CMAKE_BUILD_TYPE` you passed to Conan.
If `cmake --build .` results in an error due to a missing a protobuf file, then you might have generated CMake files for a different `build_type` than the `CMAKE_BUILD_TYPE` you passed to conan.
```
/rippled/.build/pb-xrpl.libpb/xrpl/proto/ripple.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found

View File

@@ -10,35 +10,37 @@ platforms: Linux, macOS, or Windows.
Package ecosystems vary across Linux distributions,
so there is no one set of instructions that will work for every Linux user.
The instructions below are written for Debian 12 (Bookworm).
These instructions are written for Ubuntu 22.04.
They are largely copied from the [script][1] used to configure our Docker
container for continuous integration.
That script handles many more responsibilities.
These instructions are just the bare minimum to build one configuration of
rippled.
You can check that codebase for other Linux distributions and versions.
If you cannot find yours there,
then we hope that these instructions can at least guide you in the right
direction.
```
export GCC_RELEASE=12
sudo apt update
sudo apt install --yes gcc-${GCC_RELEASE} g++-${GCC_RELEASE} python3-pip \
python-is-python3 python3-venv python3-dev curl wget ca-certificates \
git build-essential cmake ninja-build libc6-dev
sudo pip install --break-system-packages conan
apt update
apt install --yes curl git libssl-dev pipx python3.10-dev python3-pip make g++-11 libprotobuf-dev protobuf-compiler
sudo update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_RELEASE} 999
sudo update-alternatives --install \
/usr/bin/gcc gcc /usr/bin/gcc-${GCC_RELEASE} 100 \
--slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_RELEASE} \
--slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-${GCC_RELEASE} \
--slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-${GCC_RELEASE} \
--slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-${GCC_RELEASE} \
--slave /usr/bin/gcov gcov /usr/bin/gcov-${GCC_RELEASE} \
--slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_RELEASE} \
--slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_RELEASE} \
--slave /usr/bin/lto-dump lto-dump /usr/bin/lto-dump-${GCC_RELEASE}
sudo update-alternatives --auto cc
sudo update-alternatives --auto gcc
curl --location --remote-name \
"https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1.tar.gz"
tar -xzf cmake-3.25.1.tar.gz
rm cmake-3.25.1.tar.gz
cd cmake-3.25.1
./bootstrap --parallel=$(nproc)
make --jobs $(nproc)
make install
cd ..
pipx install 'conan<2'
pipx ensurepath
```
If you use different Linux distribution, hope the instruction above can guide
you in the right direction. We try to maintain compatibility with all recent
compiler releases, so if you use a rolling distribution like e.g. Arch or CentOS
then there is a chance that everything will "just work".
[1]: https://github.com/thejohnfreeman/rippled-docker/blob/master/ubuntu-22.04/install.sh
## macOS
@@ -51,34 +53,6 @@ minimum required (see [BUILD.md][]).
clang --version
```
### Install Xcode Specific Version (Optional)
If you develop other applications using XCode you might be consistently updating to the newest version of Apple Clang.
This will likely cause issues building rippled. You may want to install a specific version of Xcode:
1. **Download Xcode**
- Visit [Apple Developer Downloads](https://developer.apple.com/download/more/)
- Sign in with your Apple Developer account
- Search for an Xcode version that includes **Apple Clang (Expected Version)**
- Download the `.xip` file
2. **Install and Configure Xcode**
```bash
# Extract the .xip file and rename for version management
# Example: Xcode_16.2.app
# Move to Applications directory
sudo mv Xcode_16.2.app /Applications/
# Set as default toolchain (persistent)
sudo xcode-select -s /Applications/Xcode_16.2.app/Contents/Developer
# Set as environment variable (temporary)
export DEVELOPER_DIR=/Applications/Xcode_16.2.app/Contents/Developer
```
The command line developer tools should include Git too:
```
@@ -98,10 +72,10 @@ and use it to install Conan:
brew update
brew install xz
brew install pyenv
pyenv install 3.11
pyenv global 3.11
pyenv install 3.10-dev
pyenv global 3.10-dev
eval "$(pyenv init -)"
pip install 'conan'
pip install 'conan<2'
```
Install CMake with Homebrew too:

8
external/README.md vendored
View File

@@ -1,10 +1,14 @@
# External Conan recipes
The subdirectories in this directory contain copies of external libraries used
by rippled.
The subdirectories in this directory contain either copies or Conan recipes
of external libraries used by rippled.
The Conan recipes include patches we have not yet pushed upstream.
| Folder | Upstream | Description |
|:----------------|:---------------------------------------------|:------------|
| `antithesis-sdk`| [Project](https://github.com/antithesishq/antithesis-sdk-cpp/) | [Antithesis](https://antithesis.com/docs/using_antithesis/sdk/cpp/overview.html) SDK for C++ |
| `ed25519-donna` | [Project](https://github.com/floodyberry/ed25519-donna) | [Ed25519](http://ed25519.cr.yp.to/) digital signatures |
| `rocksdb` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/rocksdb) | Fast key/value database. (Supports rotational disks better than NuDB.) |
| `secp256k1` | [Project](https://github.com/bitcoin-core/secp256k1) | ECDSA digital signatures using the **secp256k1** curve |
| `snappy` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/snappy) | "Snappy" lossless compression algorithm. |
| `soci` | [Recipe](https://github.com/conan-io/conan-center-index/tree/master/recipes/soci) | Abstraction layer for database access. |

40
external/snappy/conandata.yml vendored Normal file
View File

@@ -0,0 +1,40 @@
sources:
"1.1.10":
url: "https://github.com/google/snappy/archive/1.1.10.tar.gz"
sha256: "49d831bffcc5f3d01482340fe5af59852ca2fe76c3e05df0e67203ebbe0f1d90"
"1.1.9":
url: "https://github.com/google/snappy/archive/1.1.9.tar.gz"
sha256: "75c1fbb3d618dd3a0483bff0e26d0a92b495bbe5059c8b4f1c962b478b6e06e7"
"1.1.8":
url: "https://github.com/google/snappy/archive/1.1.8.tar.gz"
sha256: "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f"
"1.1.7":
url: "https://github.com/google/snappy/archive/1.1.7.tar.gz"
sha256: "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4"
patches:
"1.1.10":
- patch_file: "patches/1.1.10-0001-fix-inlining-failure.patch"
patch_description: "disable inlining for compilation error"
patch_type: "portability"
- patch_file: "patches/1.1.9-0002-no-Werror.patch"
patch_description: "disable 'warning as error' options"
patch_type: "portability"
- patch_file: "patches/1.1.10-0003-fix-clobber-list-older-llvm.patch"
patch_description: "disable inline asm on apple-clang"
patch_type: "portability"
- patch_file: "patches/1.1.9-0004-rtti-by-default.patch"
patch_description: "remove 'disable rtti'"
patch_type: "conan"
"1.1.9":
- patch_file: "patches/1.1.9-0001-fix-inlining-failure.patch"
patch_description: "disable inlining for compilation error"
patch_type: "portability"
- patch_file: "patches/1.1.9-0002-no-Werror.patch"
patch_description: "disable 'warning as error' options"
patch_type: "portability"
- patch_file: "patches/1.1.9-0003-fix-clobber-list-older-llvm.patch"
patch_description: "disable inline asm on apple-clang"
patch_type: "portability"
- patch_file: "patches/1.1.9-0004-rtti-by-default.patch"
patch_description: "remove 'disable rtti'"
patch_type: "conan"

89
external/snappy/conanfile.py vendored Normal file
View File

@@ -0,0 +1,89 @@
from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
from conan.tools.scm import Version
import os
required_conan_version = ">=1.54.0"
class SnappyConan(ConanFile):
name = "snappy"
description = "A fast compressor/decompressor"
topics = ("google", "compressor", "decompressor")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/google/snappy"
license = "BSD-3-Clause"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["SNAPPY_BUILD_TESTS"] = False
if Version(self.version) >= "1.1.8":
tc.variables["SNAPPY_FUZZING_BUILD"] = False
tc.variables["SNAPPY_REQUIRE_AVX"] = False
tc.variables["SNAPPY_REQUIRE_AVX2"] = False
tc.variables["SNAPPY_INSTALL"] = True
if Version(self.version) >= "1.1.9":
tc.variables["SNAPPY_BUILD_BENCHMARKS"] = False
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "Snappy")
self.cpp_info.set_property("cmake_target_name", "Snappy::snappy")
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
self.cpp_info.components["snappylib"].libs = ["snappy"]
if not self.options.shared:
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["snappylib"].system_libs.append("m")
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.names["cmake_find_package"] = "Snappy"
self.cpp_info.names["cmake_find_package_multi"] = "Snappy"
self.cpp_info.components["snappylib"].names["cmake_find_package"] = "snappy"
self.cpp_info.components["snappylib"].names["cmake_find_package_multi"] = "snappy"
self.cpp_info.components["snappylib"].set_property("cmake_target_name", "Snappy::snappy")

View File

@@ -0,0 +1,13 @@
diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h
index 1548ed7..3b4a9f3 100644
--- a/snappy-stubs-internal.h
+++ b/snappy-stubs-internal.h
@@ -100,7 +100,7 @@
// Inlining hints.
#if HAVE_ATTRIBUTE_ALWAYS_INLINE
-#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
#else
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
#endif // HAVE_ATTRIBUTE_ALWAYS_INLINE

View File

@@ -0,0 +1,13 @@
diff --git a/snappy.cc b/snappy.cc
index d414718..e4efb59 100644
--- a/snappy.cc
+++ b/snappy.cc
@@ -1132,7 +1132,7 @@ inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) {
size_t literal_len = *tag >> 2;
size_t tag_type = *tag;
bool is_literal;
-#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__)
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) && ( (!defined(__clang__) && !defined(__APPLE__)) || (!defined(__APPLE__) && defined(__clang__) && (__clang_major__ >= 9)) || (defined(__APPLE__) && defined(__clang__) && (__clang_major__ > 11)) )
// TODO clang misses the fact that the (c & 3) already correctly
// sets the zero flag.
asm("and $3, %k[tag_type]\n\t"

View File

@@ -0,0 +1,14 @@
Fixes the following error:
error: inlining failed in call to always_inline size_t snappy::AdvanceToNextTag(const uint8_t**, size_t*): function body can be overwritten at link time
--- snappy-stubs-internal.h
+++ snappy-stubs-internal.h
@@ -100,7 +100,7 @@
// Inlining hints.
#ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE
-#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
#else
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
#endif

View File

@@ -0,0 +1,12 @@
--- CMakeLists.txt
+++ CMakeLists.txt
@@ -69,7 +69,7 @@
- # Use -Werror for clang only.
+if(0)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
-
+endif()

View File

@@ -0,0 +1,12 @@
asm clobbers do not work for clang < 9 and apple-clang < 11 (found by SpaceIm)
--- snappy.cc
+++ snappy.cc
@@ -1026,7 +1026,7 @@
size_t literal_len = *tag >> 2;
size_t tag_type = *tag;
bool is_literal;
-#if defined(__GNUC__) && defined(__x86_64__)
+#if defined(__GNUC__) && defined(__x86_64__) && ( (!defined(__clang__) && !defined(__APPLE__)) || (!defined(__APPLE__) && defined(__clang__) && (__clang_major__ >= 9)) || (defined(__APPLE__) && defined(__clang__) && (__clang_major__ > 11)) )
// TODO clang misses the fact that the (c & 3) already correctly
// sets the zero flag.
asm("and $3, %k[tag_type]\n\t"

View File

@@ -0,0 +1,20 @@
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -53,8 +53,6 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
add_definitions(-D_HAS_EXCEPTIONS=0)
# Disable RTTI.
- string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# Use -Wall for clang and gcc.
if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall")
@@ -78,8 +76,6 @@ endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
# Disable RTTI.
- string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to make

12
external/soci/conandata.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
sources:
"4.0.3":
url: "https://github.com/SOCI/soci/archive/v4.0.3.tar.gz"
sha256: "4b1ff9c8545c5d802fbe06ee6cd2886630e5c03bf740e269bb625b45cf934928"
patches:
"4.0.3":
- patch_file: "patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch"
patch_description: "Generate relocatable libraries on MacOS"
patch_type: "portability"
- patch_file: "patches/0002-Fix-soci_backend.patch"
patch_description: "Fix variable names for dependencies"
patch_type: "conan"

212
external/soci/conanfile.py vendored Normal file
View File

@@ -0,0 +1,212 @@
from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
from conan.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.55.0"
class SociConan(ConanFile):
name = "soci"
homepage = "https://github.com/SOCI/soci"
url = "https://github.com/conan-io/conan-center-index"
description = "The C++ Database Access Library "
topics = ("mysql", "odbc", "postgresql", "sqlite3")
license = "BSL-1.0"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"empty": [True, False],
"with_sqlite3": [True, False],
"with_db2": [True, False],
"with_odbc": [True, False],
"with_oracle": [True, False],
"with_firebird": [True, False],
"with_mysql": [True, False],
"with_postgresql": [True, False],
"with_boost": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"empty": False,
"with_sqlite3": False,
"with_db2": False,
"with_odbc": False,
"with_oracle": False,
"with_firebird": False,
"with_mysql": False,
"with_postgresql": False,
"with_boost": False,
}
def export_sources(self):
export_conandata_patches(self)
def layout(self):
cmake_layout(self, src_folder="src")
def config_options(self):
if self.settings.os == "Windows":
self.options.rm_safe("fPIC")
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def requirements(self):
if self.options.with_sqlite3:
self.requires("sqlite3/3.47.0")
if self.options.with_odbc and self.settings.os != "Windows":
self.requires("odbc/2.3.11")
if self.options.with_mysql:
self.requires("libmysqlclient/8.1.0")
if self.options.with_postgresql:
self.requires("libpq/15.5")
if self.options.with_boost:
self.requires("boost/1.86.0")
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"gcc": "4.8",
"clang": "3.8",
"apple-clang": "8.0"
}
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
compiler = str(self.settings.compiler)
compiler_version = Version(self.settings.compiler.version.value)
if compiler not in self._minimum_compilers_version:
self.output.warning("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler))
elif compiler_version < self._minimum_compilers_version[compiler]:
raise ConanInvalidConfiguration("{} requires a {} version >= {}".format(self.name, compiler, compiler_version))
prefix = "Dependencies for"
message = "not configured in this conan package."
if self.options.with_db2:
# self.requires("db2/0.0.0") # TODO add support for db2
raise ConanInvalidConfiguration("{} DB2 {} ".format(prefix, message))
if self.options.with_oracle:
# self.requires("oracle_db/0.0.0") # TODO add support for oracle
raise ConanInvalidConfiguration("{} ORACLE {} ".format(prefix, message))
if self.options.with_firebird:
# self.requires("firebird/0.0.0") # TODO add support for firebird
raise ConanInvalidConfiguration("{} firebird {} ".format(prefix, message))
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["SOCI_SHARED"] = self.options.shared
tc.variables["SOCI_STATIC"] = not self.options.shared
tc.variables["SOCI_TESTS"] = False
tc.variables["SOCI_CXX11"] = True
tc.variables["SOCI_EMPTY"] = self.options.empty
tc.variables["WITH_SQLITE3"] = self.options.with_sqlite3
tc.variables["WITH_DB2"] = self.options.with_db2
tc.variables["WITH_ODBC"] = self.options.with_odbc
tc.variables["WITH_ORACLE"] = self.options.with_oracle
tc.variables["WITH_FIREBIRD"] = self.options.with_firebird
tc.variables["WITH_MYSQL"] = self.options.with_mysql
tc.variables["WITH_POSTGRESQL"] = self.options.with_postgresql
tc.variables["WITH_BOOST"] = self.options.with_boost
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE_1_0.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "SOCI")
target_suffix = "" if self.options.shared else "_static"
lib_prefix = "lib" if is_msvc(self) and not self.options.shared else ""
version = Version(self.version)
lib_suffix = "_{}_{}".format(version.major, version.minor) if self.settings.os == "Windows" else ""
# soci_core
self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix))
self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)]
if self.options.with_boost:
self.cpp_info.components["soci_core"].requires.append("boost::headers")
# soci_empty
if self.options.empty:
self.cpp_info.components["soci_empty"].set_property("cmake_target_name", "SOCI::soci_empty{}".format(target_suffix))
self.cpp_info.components["soci_empty"].libs = ["{}soci_empty{}".format(lib_prefix, lib_suffix)]
self.cpp_info.components["soci_empty"].requires = ["soci_core"]
# soci_sqlite3
if self.options.with_sqlite3:
self.cpp_info.components["soci_sqlite3"].set_property("cmake_target_name", "SOCI::soci_sqlite3{}".format(target_suffix))
self.cpp_info.components["soci_sqlite3"].libs = ["{}soci_sqlite3{}".format(lib_prefix, lib_suffix)]
self.cpp_info.components["soci_sqlite3"].requires = ["soci_core", "sqlite3::sqlite3"]
# soci_odbc
if self.options.with_odbc:
self.cpp_info.components["soci_odbc"].set_property("cmake_target_name", "SOCI::soci_odbc{}".format(target_suffix))
self.cpp_info.components["soci_odbc"].libs = ["{}soci_odbc{}".format(lib_prefix, lib_suffix)]
self.cpp_info.components["soci_odbc"].requires = ["soci_core"]
if self.settings.os == "Windows":
self.cpp_info.components["soci_odbc"].system_libs.append("odbc32")
else:
self.cpp_info.components["soci_odbc"].requires.append("odbc::odbc")
# soci_mysql
if self.options.with_mysql:
self.cpp_info.components["soci_mysql"].set_property("cmake_target_name", "SOCI::soci_mysql{}".format(target_suffix))
self.cpp_info.components["soci_mysql"].libs = ["{}soci_mysql{}".format(lib_prefix, lib_suffix)]
self.cpp_info.components["soci_mysql"].requires = ["soci_core", "libmysqlclient::libmysqlclient"]
# soci_postgresql
if self.options.with_postgresql:
self.cpp_info.components["soci_postgresql"].set_property("cmake_target_name", "SOCI::soci_postgresql{}".format(target_suffix))
self.cpp_info.components["soci_postgresql"].libs = ["{}soci_postgresql{}".format(lib_prefix, lib_suffix)]
self.cpp_info.components["soci_postgresql"].requires = ["soci_core", "libpq::libpq"]
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.names["cmake_find_package"] = "SOCI"
self.cpp_info.names["cmake_find_package_multi"] = "SOCI"
self.cpp_info.components["soci_core"].names["cmake_find_package"] = "soci_core{}".format(target_suffix)
self.cpp_info.components["soci_core"].names["cmake_find_package_multi"] = "soci_core{}".format(target_suffix)
if self.options.empty:
self.cpp_info.components["soci_empty"].names["cmake_find_package"] = "soci_empty{}".format(target_suffix)
self.cpp_info.components["soci_empty"].names["cmake_find_package_multi"] = "soci_empty{}".format(target_suffix)
if self.options.with_sqlite3:
self.cpp_info.components["soci_sqlite3"].names["cmake_find_package"] = "soci_sqlite3{}".format(target_suffix)
self.cpp_info.components["soci_sqlite3"].names["cmake_find_package_multi"] = "soci_sqlite3{}".format(target_suffix)
if self.options.with_odbc:
self.cpp_info.components["soci_odbc"].names["cmake_find_package"] = "soci_odbc{}".format(target_suffix)
self.cpp_info.components["soci_odbc"].names["cmake_find_package_multi"] = "soci_odbc{}".format(target_suffix)
if self.options.with_mysql:
self.cpp_info.components["soci_mysql"].names["cmake_find_package"] = "soci_mysql{}".format(target_suffix)
self.cpp_info.components["soci_mysql"].names["cmake_find_package_multi"] = "soci_mysql{}".format(target_suffix)
if self.options.with_postgresql:
self.cpp_info.components["soci_postgresql"].names["cmake_find_package"] = "soci_postgresql{}".format(target_suffix)
self.cpp_info.components["soci_postgresql"].names["cmake_find_package_multi"] = "soci_postgresql{}".format(target_suffix)

View File

@@ -0,0 +1,39 @@
From d491bf7b5040d314ffd0c6310ba01f78ff44c85e Mon Sep 17 00:00:00 2001
From: Rasmus Thomsen <rasmus.thomsen@dampsoft.de>
Date: Fri, 14 Apr 2023 09:16:29 +0200
Subject: [PATCH] Remove hardcoded INSTALL_NAME_DIR for relocatable libraries
on MacOS
---
cmake/SociBackend.cmake | 2 +-
src/core/CMakeLists.txt | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake
index 5d4ef0df..39fe1f77 100644
--- a/cmake/SociBackend.cmake
+++ b/cmake/SociBackend.cmake
@@ -171,7 +171,7 @@ macro(soci_backend NAME)
set_target_properties(${THIS_BACKEND_TARGET}
PROPERTIES
SOVERSION ${${PROJECT_NAME}_SOVERSION}
- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib)
+ )
if(APPLE)
set_target_properties(${THIS_BACKEND_TARGET}
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 3e7deeae..f9eae564 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -59,7 +59,6 @@ if (SOCI_SHARED)
PROPERTIES
VERSION ${SOCI_VERSION}
SOVERSION ${SOCI_SOVERSION}
- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib
CLEAN_DIRECT_OUTPUT 1)
endif()
--
2.25.1

View File

@@ -0,0 +1,24 @@
diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake
index 0a664667..3fa2ed95 100644
--- a/cmake/SociBackend.cmake
+++ b/cmake/SociBackend.cmake
@@ -31,14 +31,13 @@ macro(soci_backend_deps_found NAME DEPS SUCCESS)
if(NOT DEPEND_FOUND)
list(APPEND DEPS_NOT_FOUND ${dep})
else()
- string(TOUPPER "${dep}" DEPU)
- if( ${DEPU}_INCLUDE_DIR )
- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIR})
+ if( ${dep}_INCLUDE_DIR )
+ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIR})
endif()
- if( ${DEPU}_INCLUDE_DIRS )
- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIRS})
+ if( ${dep}_INCLUDE_DIRS )
+ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIRS})
endif()
- list(APPEND DEPS_LIBRARIES ${${DEPU}_LIBRARIES})
+ list(APPEND DEPS_LIBRARIES ${${dep}_LIBRARIES})
endif()
endforeach()

View File

@@ -21,6 +21,7 @@
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <ostream>

View File

@@ -90,6 +90,9 @@ public:
int
getCacheSize() const;
int
getTrackSize() const;
float
getHitRate();
@@ -167,6 +170,9 @@ public:
bool
retrieve(key_type const& key, T& data);
mutex_type&
peekMutex();
std::vector<key_type>
getKeys() const;
@@ -187,14 +193,11 @@ public:
private:
SharedPointerType
initialFetch(key_type const& key);
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
void
collect_metrics();
Mutex&
lockPartition(key_type const& key) const;
private:
struct Stats
{
@@ -297,8 +300,8 @@ private:
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemoval,
Mutex& partitionLock);
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&);
[[nodiscard]] std::thread
sweepHelper(
@@ -307,12 +310,14 @@ private:
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
Mutex& partitionLock);
std::lock_guard<std::recursive_mutex> const&);
beast::Journal m_journal;
clock_type& m_clock;
Stats m_stats;
mutex_type mutable m_mutex;
// Used for logging
std::string m_name;
@@ -323,11 +328,10 @@ private:
clock_type::duration const m_target_age;
// Number of items cached
std::atomic<int> m_cache_count;
int m_cache_count;
cache_type m_cache; // Hold strong reference to recent objects
std::atomic<std::uint64_t> m_hits;
std::atomic<std::uint64_t> m_misses;
mutable std::vector<mutex_type> partitionLocks_;
std::uint64_t m_hits;
std::uint64_t m_misses;
};
} // namespace ripple

View File

@@ -61,7 +61,6 @@ inline TaggedCache<
, m_hits(0)
, m_misses(0)
{
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
}
template <
@@ -107,13 +106,8 @@ TaggedCache<
KeyEqual,
Mutex>::size() const
{
std::size_t totalSize = 0;
for (size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
totalSize += m_cache.map()[i].size();
}
return totalSize;
std::lock_guard lock(m_mutex);
return m_cache.size();
}
template <
@@ -136,7 +130,32 @@ TaggedCache<
KeyEqual,
Mutex>::getCacheSize() const
{
return m_cache_count.load(std::memory_order_relaxed);
std::lock_guard lock(m_mutex);
return m_cache_count;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline int
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
}
template <
@@ -159,10 +178,9 @@ TaggedCache<
KeyEqual,
Mutex>::getHitRate()
{
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
float const total = float(hits + misses);
return hits * (100.0f / std::max(1.0f, total));
std::lock_guard lock(m_mutex);
auto const total = static_cast<float>(m_hits + m_misses);
return m_hits * (100.0f / std::max(1.0f, total));
}
template <
@@ -185,12 +203,9 @@ TaggedCache<
KeyEqual,
Mutex>::clear()
{
for (auto& mutex : partitionLocks_)
mutex.lock();
std::lock_guard lock(m_mutex);
m_cache.clear();
for (auto& mutex : partitionLocks_)
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
m_cache_count = 0;
}
template <
@@ -213,9 +228,11 @@ TaggedCache<
KeyEqual,
Mutex>::reset()
{
clear();
m_hits.store(0, std::memory_order_relaxed);
m_misses.store(0, std::memory_order_relaxed);
std::lock_guard lock(m_mutex);
m_cache.clear();
m_cache_count = 0;
m_hits = 0;
m_misses = 0;
}
template <
@@ -239,7 +256,7 @@ TaggedCache<
KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key)
{
std::lock_guard<Mutex> lock(lockPartition(key));
std::lock_guard lock(m_mutex);
auto const iter(m_cache.find(key));
if (iter == m_cache.end())
{
@@ -281,6 +298,8 @@ TaggedCache<
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard lock(m_mutex);
if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size))
{
@@ -312,13 +331,12 @@ TaggedCache<
m_cache.map()[p],
allStuffToSweep[p],
allRemovals,
partitionLocks_[p]));
lock));
}
for (std::thread& worker : workers)
worker.join();
int removals = allRemovals.load(std::memory_order_relaxed);
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
m_cache_count -= allRemovals;
}
// At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer.
@@ -352,8 +370,7 @@ TaggedCache<
{
// Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache
std::lock_guard<Mutex> lock(lockPartition(key));
std::lock_guard lock(m_mutex);
auto cit = m_cache.find(key);
@@ -366,7 +383,7 @@ TaggedCache<
if (entry.isCached())
{
m_cache_count.fetch_sub(1, std::memory_order_relaxed);
--m_cache_count;
entry.ptr.convertToWeak();
ret = true;
}
@@ -404,16 +421,17 @@ TaggedCache<
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
{
m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
m_cache_count.fetch_add(1, std::memory_order_relaxed);
++m_cache_count;
return false;
}
@@ -462,12 +480,12 @@ TaggedCache<
data = cachedData;
}
m_cache_count.fetch_add(1, std::memory_order_relaxed);
++m_cache_count;
return true;
}
entry.ptr = data;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
++m_cache_count;
return false;
}
@@ -543,11 +561,10 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& key)
{
std::lock_guard<Mutex> lock(lockPartition(key));
auto ret = initialFetch(key);
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
if (!ret)
m_misses.fetch_add(1, std::memory_order_relaxed);
++m_misses;
return ret;
}
@@ -611,8 +628,8 @@ TaggedCache<
Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
std::lock_guard<Mutex> lock(lockPartition(key));
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
@@ -652,6 +669,29 @@ TaggedCache<
return true;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline auto
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
template <
class Key,
class T,
@@ -675,13 +715,10 @@ TaggedCache<
std::vector<key_type> v;
{
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size());
for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
for (auto const& entry : m_cache.map()[i])
v.push_back(entry.first);
}
for (auto const& _ : m_cache)
v.push_back(_.first);
}
return v;
@@ -707,12 +744,11 @@ TaggedCache<
KeyEqual,
Mutex>::rate() const
{
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
auto const tot = hits + misses;
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
if (tot == 0)
return 0.0;
return double(hits) / tot;
return 0;
return double(m_hits) / tot;
}
template <
@@ -736,16 +772,18 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h)
{
std::lock_guard<Mutex> lock(lockPartition(digest));
if (auto ret = initialFetch(digest))
return ret;
{
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest, l))
return ret;
}
auto sle = h();
if (!sle)
return {};
m_misses.fetch_add(1, std::memory_order_relaxed);
std::lock_guard l(m_mutex);
++m_misses;
auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted)
@@ -772,10 +810,9 @@ TaggedCache<
SharedPointerType,
Hash,
KeyEqual,
Mutex>::initialFetch(key_type const& key)
Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
return {};
@@ -783,7 +820,7 @@ TaggedCache<
Entry& entry = cit->second;
if (entry.isCached())
{
m_hits.fetch_add(1, std::memory_order_relaxed);
++m_hits;
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
@@ -791,13 +828,12 @@ TaggedCache<
if (entry.isCached())
{
// independent of cache size, so not counted as a hit
m_cache_count.fetch_add(1, std::memory_order_relaxed);
++m_cache_count;
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
m_cache.erase(cit);
return {};
}
@@ -826,11 +862,10 @@ TaggedCache<
{
beast::insight::Gauge::value_type hit_rate(0);
{
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
auto const total = hits + misses;
std::lock_guard lock(m_mutex);
auto const total(m_hits + m_misses);
if (total != 0)
hit_rate = (hits * 100) / total;
hit_rate = (m_hits * 100) / total;
}
m_stats.hit_rate.set(hit_rate);
}
@@ -861,7 +896,7 @@ TaggedCache<
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
Mutex& partitionLock)
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KVCache");
@@ -869,8 +904,6 @@ TaggedCache<
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size());
@@ -954,7 +987,7 @@ TaggedCache<
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
Mutex& partitionLock)
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KCache");
@@ -962,8 +995,6 @@ TaggedCache<
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
{
@@ -998,29 +1029,6 @@ TaggedCache<
});
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline Mutex&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::lockPartition(key_type const& key) const
{
return partitionLocks_[m_cache.partition_index(key)];
}
} // namespace ripple
#endif

View File

@@ -277,12 +277,6 @@ public:
return map_;
}
partition_map_type const&
map() const
{
return map_;
}
iterator
begin()
{
@@ -327,12 +321,6 @@ public:
return cend();
}
std::size_t
partition_index(key_type const& key) const
{
return partitioner(key);
}
private:
template <class T>
void

View File

@@ -24,111 +24,32 @@
#include <xxhash.h>
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <span>
#include <new>
#include <type_traits>
namespace beast {
class xxhasher
{
public:
using result_type = std::size_t;
static int volatile test;
private:
static_assert(sizeof(std::size_t) == 8, "requires 64-bit std::size_t");
// Have an internal buffer to avoid the streaming API
// A 64-byte buffer should to be big enough for us
static constexpr std::size_t INTERNAL_BUFFER_SIZE = 64;
// requires 64-bit std::size_t
static_assert(sizeof(std::size_t) == 8, "");
alignas(64) std::array<std::uint8_t, INTERNAL_BUFFER_SIZE> buffer_;
std::span<std::uint8_t> readBuffer_;
std::span<std::uint8_t> writeBuffer_;
std::optional<XXH64_hash_t> seed_;
XXH3_state_t* state_ = nullptr;
void
resetBuffers()
{
writeBuffer_ = std::span{buffer_};
readBuffer_ = {};
}
void
updateHash(void const* data, std::size_t len)
{
if (writeBuffer_.size() < len)
{
flushToState(data, len);
}
else
{
std::memcpy(writeBuffer_.data(), data, len);
writeBuffer_ = writeBuffer_.subspan(len);
readBuffer_ = std::span{
std::begin(buffer_), buffer_.size() - writeBuffer_.size()};
}
}
XXH3_state_t* state_;
static XXH3_state_t*
allocState()
{
auto ret = XXH3_createState();
if (ret == nullptr)
throw std::bad_alloc(); // LCOV_EXCL_LINE
throw std::bad_alloc();
return ret;
}
void
flushToState(void const* data, std::size_t len)
{
if (!state_)
{
state_ = allocState();
if (seed_.has_value())
{
XXH3_64bits_reset_withSeed(state_, *seed_);
}
else
{
XXH3_64bits_reset(state_);
}
}
XXH3_64bits_update(state_, readBuffer_.data(), readBuffer_.size());
resetBuffers();
if (data && len)
{
XXH3_64bits_update(state_, data, len);
}
}
result_type
retrieveHash()
{
if (state_)
{
flushToState(nullptr, 0);
return XXH3_64bits_digest(state_);
}
else
{
if (seed_.has_value())
{
return XXH3_64bits_withSeed(
readBuffer_.data(), readBuffer_.size(), *seed_);
}
else
{
return XXH3_64bits(readBuffer_.data(), readBuffer_.size());
}
}
}
public:
using result_type = std::size_t;
static constexpr auto const endian = boost::endian::order::native;
xxhasher(xxhasher const&) = delete;
@@ -137,44 +58,43 @@ public:
xxhasher()
{
resetBuffers();
state_ = allocState();
XXH3_64bits_reset(state_);
}
~xxhasher() noexcept
{
if (state_)
{
test = test + 1;
XXH3_freeState(state_);
}
XXH3_freeState(state_);
}
template <
class Seed,
std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
explicit xxhasher(Seed seed) : seed_(seed)
explicit xxhasher(Seed seed)
{
resetBuffers();
state_ = allocState();
XXH3_64bits_reset_withSeed(state_, seed);
}
template <
class Seed,
std::enable_if_t<std::is_unsigned<Seed>::value>* = nullptr>
xxhasher(Seed seed, Seed) : seed_(seed)
xxhasher(Seed seed, Seed)
{
resetBuffers();
state_ = allocState();
XXH3_64bits_reset_withSeed(state_, seed);
}
void
operator()(void const* key, std::size_t len) noexcept
{
updateHash(key, len);
XXH3_64bits_update(state_, key, len);
}
explicit
operator result_type() noexcept
operator std::size_t() noexcept
{
return retrieveHash();
return XXH3_64bits_digest(state_);
}
};

View File

@@ -22,6 +22,7 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint>

View File

@@ -32,7 +32,6 @@
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
@@ -40,7 +39,7 @@ XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo
XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions

View File

@@ -678,61 +678,6 @@ private:
oracle.set(
UpdateArg{.series = {{"XRP", "USD", 742, 2}}, .fee = baseFee});
}
for (bool const withFixOrder : {false, true})
{
// Should be same order as creation
Env env(
*this,
withFixOrder ? testable_amendments()
: testable_amendments() - fixPriceOracleOrder);
auto const baseFee =
static_cast<int>(env.current()->fees().base.drops());
auto test = [&](Env& env, DataSeries const& series) {
env.fund(XRP(1'000), owner);
Oracle oracle(
env, {.owner = owner, .series = series, .fee = baseFee});
BEAST_EXPECT(oracle.exists());
auto sle = env.le(keylet::oracle(owner, oracle.documentID()));
BEAST_EXPECT(
sle->getFieldArray(sfPriceDataSeries).size() ==
series.size());
auto const beforeQuoteAssetName1 =
sle->getFieldArray(sfPriceDataSeries)[0]
.getFieldCurrency(sfQuoteAsset)
.getText();
auto const beforeQuoteAssetName2 =
sle->getFieldArray(sfPriceDataSeries)[1]
.getFieldCurrency(sfQuoteAsset)
.getText();
oracle.set(UpdateArg{.series = series, .fee = baseFee});
sle = env.le(keylet::oracle(owner, oracle.documentID()));
auto const afterQuoteAssetName1 =
sle->getFieldArray(sfPriceDataSeries)[0]
.getFieldCurrency(sfQuoteAsset)
.getText();
auto const afterQuoteAssetName2 =
sle->getFieldArray(sfPriceDataSeries)[1]
.getFieldCurrency(sfQuoteAsset)
.getText();
if (env.current()->rules().enabled(fixPriceOracleOrder))
{
BEAST_EXPECT(afterQuoteAssetName1 == beforeQuoteAssetName1);
BEAST_EXPECT(afterQuoteAssetName2 == beforeQuoteAssetName2);
}
else
{
BEAST_EXPECT(afterQuoteAssetName1 != beforeQuoteAssetName1);
BEAST_EXPECT(afterQuoteAssetName2 != beforeQuoteAssetName2);
}
};
test(env, {{"XRP", "USD", 742, 2}, {"XRP", "EUR", 711, 2}});
}
}
void

View File

@@ -58,10 +58,10 @@ public:
// Insert an item, retrieve it, and age it so it gets purged.
{
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(!c.insert(1, "one"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
{
std::string s;
@@ -72,7 +72,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
}
// Insert an item, maintain a strong pointer, age it, and
@@ -80,7 +80,7 @@ public:
{
BEAST_EXPECT(!c.insert(2, "two"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
{
auto p = c.fetch(2);
@@ -88,14 +88,14 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
}
// Make sure its gone now that our reference is gone
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
}
// Insert the same key/value pair and make sure we get the same result
@@ -111,7 +111,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
}
// Put an object in but keep a strong pointer to it, advance the clock a
@@ -121,24 +121,24 @@ public:
// Put an object in
BEAST_EXPECT(!c.insert(4, "four"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
{
// Keep a strong pointer to it
auto const p1 = c.fetch(4);
BEAST_EXPECT(p1 != nullptr);
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
// Advance the clock a lot
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
// Canonicalize a new object with the same key
auto p2 = std::make_shared<std::string>("four");
BEAST_EXPECT(c.canonicalize_replace_client(4, p2));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
// Make sure we get the original object
BEAST_EXPECT(p1.get() == p2.get());
}
@@ -146,7 +146,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
}
}
};

View File

@@ -1,246 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpl/beast/hash/xxhasher.h>
#include <xrpl/beast/unit_test.h>
namespace beast {
int volatile xxhasher::test = 0;
class XXHasher_test : public unit_test::suite
{
public:
void
testWithoutSeed()
{
testcase("Without seed");
xxhasher hasher{};
std::string objectToHash{"Hello, xxHash!"};
hasher(objectToHash.data(), objectToHash.size());
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
16042857369214894119ULL);
}
void
testWithSeed()
{
testcase("With seed");
xxhasher hasher{static_cast<std::uint32_t>(102)};
std::string objectToHash{"Hello, xxHash!"};
hasher(objectToHash.data(), objectToHash.size());
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
14440132435660934800ULL);
}
void
testWithTwoSeeds()
{
testcase("With two seeds");
xxhasher hasher{
static_cast<std::uint32_t>(102), static_cast<std::uint32_t>(103)};
std::string objectToHash{"Hello, xxHash!"};
hasher(objectToHash.data(), objectToHash.size());
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
14440132435660934800ULL);
}
void
testBigObjectWithMultiupleSmallUpdatesWithoutSeed()
{
testcase("Big object with multiple small updates without seed");
xxhasher hasher{};
std::string objectToHash{"Hello, xxHash!"};
for (int i = 0; i < 100; i++)
{
hasher(objectToHash.data(), objectToHash.size());
}
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
15296278154063476002ULL);
}
void
testBigObjectWithMultiupleSmallUpdatesWithSeed()
{
testcase("Big object with multiple small updates with seed");
xxhasher hasher{static_cast<std::uint32_t>(103)};
std::string objectToHash{"Hello, xxHash!"};
for (int i = 0; i < 100; i++)
{
hasher(objectToHash.data(), objectToHash.size());
}
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
17285302196561698791ULL);
BEAST_EXPECT(xxhasher::test > 0);
}
void
testBigObjectWithSmallAndBigUpdatesWithoutSeed()
{
testcase("Big object with small and big updates without seed");
xxhasher hasher{};
std::string objectToHash{"Hello, xxHash!"};
std::string bigObject;
for (int i = 0; i < 20; i++)
{
bigObject += "Hello, xxHash!";
}
hasher(objectToHash.data(), objectToHash.size());
hasher(bigObject.data(), bigObject.size());
hasher(objectToHash.data(), objectToHash.size());
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
1865045178324729219ULL);
BEAST_EXPECT(xxhasher::test > 0);
}
void
testBigObjectWithSmallAndBigUpdatesWithSeed()
{
testcase("Big object with small and big updates with seed");
xxhasher hasher{static_cast<std::uint32_t>(103)};
std::string objectToHash{"Hello, xxHash!"};
std::string bigObject;
for (int i = 0; i < 20; i++)
{
bigObject += "Hello, xxHash!";
}
hasher(objectToHash.data(), objectToHash.size());
hasher(bigObject.data(), bigObject.size());
hasher(objectToHash.data(), objectToHash.size());
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
16189862915636005281ULL);
BEAST_EXPECT(xxhasher::test > 0);
}
void
testBigObjectWithOneUpdateWithoutSeed()
{
testcase("Big object with one update without seed");
xxhasher hasher{};
std::string objectToHash;
for (int i = 0; i < 100; i++)
{
objectToHash += "Hello, xxHash!";
}
hasher(objectToHash.data(), objectToHash.size());
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
15296278154063476002ULL);
BEAST_EXPECT(xxhasher::test > 0);
}
void
testBigObjectWithOneUpdateWithSeed()
{
testcase("Big object with one update with seed");
xxhasher hasher{static_cast<std::uint32_t>(103)};
std::string objectToHash;
for (int i = 0; i < 100; i++)
{
objectToHash += "Hello, xxHash!";
}
hasher(objectToHash.data(), objectToHash.size());
BEAST_EXPECT(
static_cast<xxhasher::result_type>(hasher) ==
17285302196561698791ULL);
BEAST_EXPECT(xxhasher::test > 0);
}
void
testOperatorResultTypeDoesNotChangeInternalState()
{
testcase("Operator result type doesn't change the internal state");
{
xxhasher hasher;
std::string object{"Hello xxhash"};
hasher(object.data(), object.size());
auto xxhashResult1 = static_cast<xxhasher::result_type>(hasher);
auto xxhashResult2 = static_cast<xxhasher::result_type>(hasher);
BEAST_EXPECT(xxhashResult1 == xxhashResult2);
}
{
xxhasher hasher;
std::string object;
for (int i = 0; i < 100; i++)
{
object += "Hello, xxHash!";
}
hasher(object.data(), object.size());
auto xxhashResult1 = hasher.operator xxhasher::result_type();
auto xxhashResult2 = hasher.operator xxhasher::result_type();
BEAST_EXPECT(xxhashResult1 == xxhashResult2);
}
BEAST_EXPECT(xxhasher::test > 0);
}
void
run() override
{
testWithoutSeed();
testWithSeed();
testWithTwoSeeds();
testBigObjectWithMultiupleSmallUpdatesWithoutSeed();
testBigObjectWithMultiupleSmallUpdatesWithSeed();
testBigObjectWithSmallAndBigUpdatesWithoutSeed();
testBigObjectWithSmallAndBigUpdatesWithSeed();
testBigObjectWithOneUpdateWithoutSeed();
testBigObjectWithOneUpdateWithSeed();
testOperatorResultTypeDoesNotChangeInternalState();
}
};
BEAST_DEFINE_TESTSUITE(XXHasher, beast_core, beast);
} // namespace beast

View File

@@ -63,6 +63,8 @@ LedgerHistory::insert(
ledger->stateMap().getHash().isNonZero(),
"ripple::LedgerHistory::insert : nonzero hash");
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
ledger->info().hash, ledger);
if (validated)
@@ -74,6 +76,7 @@ LedgerHistory::insert(
LedgerHash
LedgerHistory::getLedgerHash(LedgerIndex index)
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
return it->second;
return {};
@@ -83,11 +86,13 @@ std::shared_ptr<Ledger const>
LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto it = mLedgersByIndex.find(index);
if (it != mLedgersByIndex.end())
{
uint256 hash = it->second;
sl.unlock();
return getLedgerByHash(hash);
}
}
@@ -103,6 +108,7 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
// Add this ledger to the local tracking by index
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
XRPL_ASSERT(
ret->isImmutable(),
@@ -452,6 +458,8 @@ LedgerHistory::builtLedger(
XRPL_ASSERT(
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -492,6 +500,8 @@ LedgerHistory::validatedLedger(
!hash.isZero(),
"ripple::LedgerHistory::validatedLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -525,9 +535,10 @@ LedgerHistory::validatedLedger(
bool
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{
auto ledger = m_ledgers_by_hash.fetch(ledgerHash);
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto it = mLedgersByIndex.find(ledgerIndex);
if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash))
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))
{
it->second = ledgerHash;
return false;

View File

@@ -175,18 +175,126 @@ MPTokenAuthorize::createMPToken(
return tesSUCCESS;
}
TER
MPTokenAuthorize::authorize(
ApplyView& view,
beast::Journal journal,
MPTAuthorizeArgs const& args)
{
auto const sleAcct = view.peek(keylet::account(args.account));
if (!sleAcct)
return tecINTERNAL;
// If the account that submitted the tx is a holder
// Note: `account_` is holder's account
// `holderID` is NOT used
if (!args.holderID)
{
// When a holder wants to unauthorize/delete a MPT, the ledger must
// - delete mptokenKey from owner directory
// - delete the MPToken
if (args.flags & tfMPTUnauthorize)
{
auto const mptokenKey =
keylet::mptoken(args.mptIssuanceID, args.account);
auto const sleMpt = view.peek(mptokenKey);
if (!sleMpt || (*sleMpt)[sfMPTAmount] != 0)
return tecINTERNAL; // LCOV_EXCL_LINE
if (!view.dirRemove(
keylet::ownerDir(args.account),
(*sleMpt)[sfOwnerNode],
sleMpt->key(),
false))
return tecINTERNAL; // LCOV_EXCL_LINE
adjustOwnerCount(view, sleAcct, -1, journal);
view.erase(sleMpt);
return tesSUCCESS;
}
// A potential holder wants to authorize/hold a mpt, the ledger must:
// - add the new mptokenKey to the owner directory
// - create the MPToken object for the holder
// The reserve that is required to create the MPToken. Note
// that although the reserve increases with every item
// an account owns, in the case of MPTokens we only
// *enforce* a reserve if the user owns more than two
// items. This is similar to the reserve requirements of trust lines.
std::uint32_t const uOwnerCount = sleAcct->getFieldU32(sfOwnerCount);
XRPAmount const reserveCreate(
(uOwnerCount < 2) ? XRPAmount(beast::zero)
: view.fees().accountReserve(uOwnerCount + 1));
if (args.priorBalance < reserveCreate)
return tecINSUFFICIENT_RESERVE;
auto const mptokenKey =
keylet::mptoken(args.mptIssuanceID, args.account);
auto mptoken = std::make_shared<SLE>(mptokenKey);
if (auto ter = dirLink(view, args.account, mptoken))
return ter; // LCOV_EXCL_LINE
(*mptoken)[sfAccount] = args.account;
(*mptoken)[sfMPTokenIssuanceID] = args.mptIssuanceID;
(*mptoken)[sfFlags] = 0;
view.insert(mptoken);
// Update owner count.
adjustOwnerCount(view, sleAcct, 1, journal);
return tesSUCCESS;
}
auto const sleMptIssuance =
view.read(keylet::mptIssuance(args.mptIssuanceID));
if (!sleMptIssuance)
return tecINTERNAL;
// If the account that submitted this tx is the issuer of the MPT
// Note: `account_` is issuer's account
// `holderID` is holder's account
if (args.account != (*sleMptIssuance)[sfIssuer])
return tecINTERNAL;
auto const sleMpt =
view.peek(keylet::mptoken(args.mptIssuanceID, *args.holderID));
if (!sleMpt)
return tecINTERNAL;
std::uint32_t const flagsIn = sleMpt->getFieldU32(sfFlags);
std::uint32_t flagsOut = flagsIn;
// Issuer wants to unauthorize the holder, unset lsfMPTAuthorized on
// their MPToken
if (args.flags & tfMPTUnauthorize)
flagsOut &= ~lsfMPTAuthorized;
// Issuer wants to authorize a holder, set lsfMPTAuthorized on their
// MPToken
else
flagsOut |= lsfMPTAuthorized;
if (flagsIn != flagsOut)
sleMpt->setFieldU32(sfFlags, flagsOut);
view.update(sleMpt);
return tesSUCCESS;
}
TER
MPTokenAuthorize::doApply()
{
auto const& tx = ctx_.tx;
return authorizeMPToken(
return authorize(
ctx_.view(),
mPriorBalance,
tx[sfMPTokenIssuanceID],
account_,
ctx_.journal,
tx.getFlags(),
tx[~sfHolder]);
{.priorBalance = mPriorBalance,
.mptIssuanceID = tx[sfMPTokenIssuanceID],
.account = account_,
.flags = tx.getFlags(),
.holderID = tx[~sfHolder]});
}
} // namespace ripple

View File

@@ -48,6 +48,12 @@ public:
static TER
preclaim(PreclaimContext const& ctx);
static TER
authorize(
ApplyView& view,
beast::Journal journal,
MPTAuthorizeArgs const& args);
static TER
createMPToken(
ApplyView& view,

View File

@@ -209,17 +209,6 @@ SetOracle::doApply()
{
auto const oracleID = keylet::oracle(account_, ctx_.tx[sfOracleDocumentID]);
auto populatePriceData = [](STObject& priceData, STObject const& entry) {
setPriceDataInnerObjTemplate(priceData);
priceData.setFieldCurrency(
sfBaseAsset, entry.getFieldCurrency(sfBaseAsset));
priceData.setFieldCurrency(
sfQuoteAsset, entry.getFieldCurrency(sfQuoteAsset));
priceData.setFieldU64(sfAssetPrice, entry.getFieldU64(sfAssetPrice));
if (entry.isFieldPresent(sfScale))
priceData.setFieldU8(sfScale, entry.getFieldU8(sfScale));
};
if (auto sle = ctx_.view().peek(oracleID))
{
// update
@@ -260,7 +249,15 @@ SetOracle::doApply()
{
// add a token pair with the price
STObject priceData{sfPriceData};
populatePriceData(priceData, entry);
setPriceDataInnerObjTemplate(priceData);
priceData.setFieldCurrency(
sfBaseAsset, entry.getFieldCurrency(sfBaseAsset));
priceData.setFieldCurrency(
sfQuoteAsset, entry.getFieldCurrency(sfQuoteAsset));
priceData.setFieldU64(
sfAssetPrice, entry.getFieldU64(sfAssetPrice));
if (entry.isFieldPresent(sfScale))
priceData.setFieldU8(sfScale, entry.getFieldU8(sfScale));
pairs.emplace(key, std::move(priceData));
}
}
@@ -288,26 +285,7 @@ SetOracle::doApply()
sle->setFieldVL(sfProvider, ctx_.tx[sfProvider]);
if (ctx_.tx.isFieldPresent(sfURI))
sle->setFieldVL(sfURI, ctx_.tx[sfURI]);
STArray series;
if (!ctx_.view().rules().enabled(fixPriceOracleOrder))
{
series = ctx_.tx.getFieldArray(sfPriceDataSeries);
}
else
{
std::map<std::pair<Currency, Currency>, STObject> pairs;
for (auto const& entry : ctx_.tx.getFieldArray(sfPriceDataSeries))
{
auto const key = tokenPairKey(entry);
STObject priceData{sfPriceData};
populatePriceData(priceData, entry);
pairs.emplace(key, std::move(priceData));
}
for (auto const& iter : pairs)
series.push_back(std::move(iter.second));
}
auto const& series = ctx_.tx.getFieldArray(sfPriceDataSeries);
sle->setFieldArray(sfPriceDataSeries, series);
sle->setFieldVL(sfAssetClass, ctx_.tx[sfAssetClass]);
sle->setFieldU32(sfLastUpdateTime, ctx_.tx[sfLastUpdateTime]);

View File

@@ -210,12 +210,12 @@ VaultDeposit::doApply()
auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_));
if (!sleMpt)
{
if (auto const err = authorizeMPToken(
if (auto const err = MPTokenAuthorize::authorize(
view(),
mPriorBalance,
mptIssuanceID->value(),
account_,
ctx_.journal);
ctx_.journal,
{.priorBalance = mPriorBalance,
.mptIssuanceID = mptIssuanceID->value(),
.account = account_});
!isTesSuccess(err))
return err;
}
@@ -223,15 +223,15 @@ VaultDeposit::doApply()
// If the vault is private, set the authorized flag for the vault owner
if (vault->isFlag(tfVaultPrivate))
{
if (auto const err = authorizeMPToken(
if (auto const err = MPTokenAuthorize::authorize(
view(),
mPriorBalance, // priorBalance
mptIssuanceID->value(), // mptIssuanceID
sleIssuance->at(sfIssuer), // account
ctx_.journal,
{}, // flags
account_ // holderID
);
{
.priorBalance = mPriorBalance,
.mptIssuanceID = mptIssuanceID->value(),
.account = sleIssuance->at(sfIssuer),
.holderID = account_,
});
!isTesSuccess(err))
return err;
}

View File

@@ -600,16 +600,6 @@ addEmptyHolding(
asset.value());
}
[[nodiscard]] TER
authorizeMPToken(
ApplyView& view,
XRPAmount const& priorBalance,
MPTID const& mptIssuanceID,
AccountID const& account,
beast::Journal journal,
std::uint32_t flags = 0,
std::optional<AccountID> holderID = std::nullopt);
// VFALCO NOTE Both STAmount parameters should just
// be "Amount", a unit-less number.
//

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include <xrpld/app/misc/CredentialHelpers.h>
#include <xrpld/app/tx/detail/MPTokenAuthorize.h>
#include <xrpld/ledger/ReadView.h>
#include <xrpld/ledger/View.h>
@@ -1214,115 +1215,12 @@ addEmptyHolding(
if (view.peek(keylet::mptoken(mptID, accountID)))
return tecDUPLICATE;
return authorizeMPToken(view, priorBalance, mptID, accountID, journal);
}
[[nodiscard]] TER
authorizeMPToken(
ApplyView& view,
XRPAmount const& priorBalance,
MPTID const& mptIssuanceID,
AccountID const& account,
beast::Journal journal,
std::uint32_t flags,
std::optional<AccountID> holderID)
{
auto const sleAcct = view.peek(keylet::account(account));
if (!sleAcct)
return tecINTERNAL;
// If the account that submitted the tx is a holder
// Note: `account_` is holder's account
// `holderID` is NOT used
if (!holderID)
{
// When a holder wants to unauthorize/delete a MPT, the ledger must
// - delete mptokenKey from owner directory
// - delete the MPToken
if (flags & tfMPTUnauthorize)
{
auto const mptokenKey = keylet::mptoken(mptIssuanceID, account);
auto const sleMpt = view.peek(mptokenKey);
if (!sleMpt || (*sleMpt)[sfMPTAmount] != 0)
return tecINTERNAL; // LCOV_EXCL_LINE
if (!view.dirRemove(
keylet::ownerDir(account),
(*sleMpt)[sfOwnerNode],
sleMpt->key(),
false))
return tecINTERNAL; // LCOV_EXCL_LINE
adjustOwnerCount(view, sleAcct, -1, journal);
view.erase(sleMpt);
return tesSUCCESS;
}
// A potential holder wants to authorize/hold a mpt, the ledger must:
// - add the new mptokenKey to the owner directory
// - create the MPToken object for the holder
// The reserve that is required to create the MPToken. Note
// that although the reserve increases with every item
// an account owns, in the case of MPTokens we only
// *enforce* a reserve if the user owns more than two
// items. This is similar to the reserve requirements of trust lines.
std::uint32_t const uOwnerCount = sleAcct->getFieldU32(sfOwnerCount);
XRPAmount const reserveCreate(
(uOwnerCount < 2) ? XRPAmount(beast::zero)
: view.fees().accountReserve(uOwnerCount + 1));
if (priorBalance < reserveCreate)
return tecINSUFFICIENT_RESERVE;
auto const mptokenKey = keylet::mptoken(mptIssuanceID, account);
auto mptoken = std::make_shared<SLE>(mptokenKey);
if (auto ter = dirLink(view, account, mptoken))
return ter; // LCOV_EXCL_LINE
(*mptoken)[sfAccount] = account;
(*mptoken)[sfMPTokenIssuanceID] = mptIssuanceID;
(*mptoken)[sfFlags] = 0;
view.insert(mptoken);
// Update owner count.
adjustOwnerCount(view, sleAcct, 1, journal);
return tesSUCCESS;
}
auto const sleMptIssuance = view.read(keylet::mptIssuance(mptIssuanceID));
if (!sleMptIssuance)
return tecINTERNAL;
// If the account that submitted this tx is the issuer of the MPT
// Note: `account_` is issuer's account
// `holderID` is holder's account
if (account != (*sleMptIssuance)[sfIssuer])
return tecINTERNAL;
auto const sleMpt = view.peek(keylet::mptoken(mptIssuanceID, *holderID));
if (!sleMpt)
return tecINTERNAL;
std::uint32_t const flagsIn = sleMpt->getFieldU32(sfFlags);
std::uint32_t flagsOut = flagsIn;
// Issuer wants to unauthorize the holder, unset lsfMPTAuthorized on
// their MPToken
if (flags & tfMPTUnauthorize)
flagsOut &= ~lsfMPTAuthorized;
// Issuer wants to authorize a holder, set lsfMPTAuthorized on their
// MPToken
else
flagsOut |= lsfMPTAuthorized;
if (flagsIn != flagsOut)
sleMpt->setFieldU32(sfFlags, flagsOut);
view.update(sleMpt);
return tesSUCCESS;
return MPTokenAuthorize::authorize(
view,
journal,
{.priorBalance = priorBalance,
.mptIssuanceID = mptID,
.account = accountID});
}
TER
@@ -1520,14 +1418,13 @@ removeEmptyHolding(
if (mptoken->at(sfMPTAmount) != 0)
return tecHAS_OBLIGATIONS;
return authorizeMPToken(
return MPTokenAuthorize::authorize(
view,
{}, // priorBalance
mptID,
accountID,
journal,
tfMPTUnauthorize // flags
);
{.priorBalance = {},
.mptIssuanceID = mptID,
.account = accountID,
.flags = tfMPTUnauthorize});
}
TER
@@ -2600,12 +2497,15 @@ enforceMPTokenAuthorization(
XRPL_ASSERT(
maybeDomainID.has_value() && sleToken == nullptr,
"ripple::enforceMPTokenAuthorization : new MPToken for domain");
if (auto const err = authorizeMPToken(
if (auto const err = MPTokenAuthorize::authorize(
view,
priorBalance, // priorBalance
mptIssuanceID, // mptIssuanceID
account, // account
j);
j,
{
.priorBalance = priorBalance,
.mptIssuanceID = mptIssuanceID,
.account = account,
.flags = 0,
});
!isTesSuccess(err))
return err;

View File

@@ -446,8 +446,6 @@ Slot<clock_type>::deletePeer(PublicKey const& validator, id_t id, bool erase)
auto it = peers_.find(id);
if (it != peers_.end())
{
std::vector<Peer::id_t> toUnsquelch;
JLOG(journal_.trace())
<< "deletePeer: " << Slice(validator) << " " << id << " selected "
<< (it->second.state == PeerState::Selected) << " considered "
@@ -459,7 +457,7 @@ Slot<clock_type>::deletePeer(PublicKey const& validator, id_t id, bool erase)
for (auto& [k, v] : peers_)
{
if (v.state == PeerState::Squelched)
toUnsquelch.push_back(k);
handler_.unsquelch(validator, k);
v.state = PeerState::Counting;
v.count = 0;
v.expire = now;
@@ -481,10 +479,6 @@ Slot<clock_type>::deletePeer(PublicKey const& validator, id_t id, bool erase)
if (erase)
peers_.erase(it);
// Must be after peers_.erase(it)
for (auto const& k : toUnsquelch)
handler_.unsquelch(validator, k);
}
}

View File

@@ -1423,12 +1423,7 @@ OverlayImpl::updateSlotAndSquelch(
if (!strand_.running_in_this_thread())
return post(
strand_,
// Must capture copies of reference parameters (i.e. key, validator)
[this,
key = key,
validator = validator,
peers = std::move(peers),
type]() mutable {
[this, key, validator, peers = std::move(peers), type]() mutable {
updateSlotAndSquelch(key, validator, std::move(peers), type);
});
@@ -1449,12 +1444,9 @@ OverlayImpl::updateSlotAndSquelch(
return;
if (!strand_.running_in_this_thread())
return post(
strand_,
// Must capture copies of reference parameters (i.e. key, validator)
[this, key = key, validator = validator, peer, type]() {
updateSlotAndSquelch(key, validator, peer, type);
});
return post(strand_, [this, key, validator, peer, type]() {
updateSlotAndSquelch(key, validator, peer, type);
});
slots_.updateSlotAndSquelch(key, validator, peer, type, [&]() {
reportInboundTraffic(TrafficCount::squelch_ignored, 0);

View File

@@ -114,7 +114,7 @@ getCountsJson(Application& app, int minObjectCount)
ret[jss::treenode_cache_size] =
app.getNodeFamily().getTreeNodeCache()->getCacheSize();
ret[jss::treenode_track_size] =
static_cast<int>(app.getNodeFamily().getTreeNodeCache()->size());
app.getNodeFamily().getTreeNodeCache()->getTrackSize();
std::string uptime;
auto s = UptimeClock::now();