mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-26 16:52:32 +00:00
Compare commits
12 Commits
copilot/ad
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3a8a18c2ca | ||
|
|
65e63ebef3 | ||
|
|
bdd106d992 | ||
|
|
24cbaf76a5 | ||
|
|
3a805cc646 | ||
|
|
0fd237d707 | ||
|
|
3542daa4cc | ||
|
|
fd9f57ec97 | ||
|
|
625becff18 | ||
|
|
4bcbc6e50f | ||
|
|
0bc4a0cfe8 | ||
|
|
cb54adefed |
4
.github/workflows/on-pr.yml
vendored
4
.github/workflows/on-pr.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Determine changed files
|
||||
# This step checks whether any files have changed that should
|
||||
# cause the next jobs to run. We do it this way rather than
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
# that Github considers any skipped jobs to have passed, and in
|
||||
# turn the required checks as well.
|
||||
id: changes
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
with:
|
||||
files: |
|
||||
# These paths are unique to `on-pr.yml`.
|
||||
|
||||
2
.github/workflows/pre-commit.yml
vendored
2
.github/workflows/pre-commit.yml
vendored
@@ -14,4 +14,4 @@ jobs:
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@320be44621ca2a080f05aeb15817c44b84518108
|
||||
with:
|
||||
runs_on: ubuntu-latest
|
||||
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-ab4d1f0" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-41ec7c1" }'
|
||||
|
||||
20
.github/workflows/publish-docs.yml
vendored
20
.github/workflows/publish-docs.yml
vendored
@@ -4,6 +4,18 @@ name: Build and publish documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "release*"
|
||||
paths:
|
||||
- ".github/workflows/publish-docs.yml"
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- "docs/**"
|
||||
- "include/**"
|
||||
- "src/libxrpl/**"
|
||||
- "src/xrpld/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/publish-docs.yml"
|
||||
- "*.md"
|
||||
@@ -23,7 +35,9 @@ defaults:
|
||||
|
||||
env:
|
||||
BUILD_DIR: build
|
||||
NPROC_SUBTRACT: 2
|
||||
# ubuntu-latest has only 2 CPUs for private repositories
|
||||
# https://docs.github.com/en/actions/reference/runners/github-hosted-runners#standard-github-hosted-runners-for--private-repositories
|
||||
NPROC_SUBTRACT: ${{ github.event.repository.private && '1' || '2' }}
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
@@ -33,7 +47,7 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
@@ -65,7 +79,7 @@ jobs:
|
||||
cmake --build . --target docs --parallel ${BUILD_NPROC}
|
||||
|
||||
- name: Publish documentation
|
||||
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -101,10 +101,10 @@ jobs:
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
uses: XRPLF/actions/cleanup-workspace@c7d9ce5ebb03c752a354889ecd870cadfc2b1cd4
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
||||
@@ -177,7 +177,7 @@ jobs:
|
||||
|
||||
- name: Upload the binary (Linux)
|
||||
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: xrpld-${{ inputs.config_name }}
|
||||
path: ${{ env.BUILD_DIR }}/xrpld
|
||||
@@ -254,7 +254,7 @@ jobs:
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ github.repository_owner == 'XRPLF' && !inputs.build_only && env.COVERAGE_ENABLED == 'true' }}
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
with:
|
||||
disable_search: true
|
||||
disable_telem: true
|
||||
|
||||
@@ -18,7 +18,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Check levelization
|
||||
run: .github/scripts/levelization/generate.sh
|
||||
- name: Check for differences
|
||||
|
||||
2
.github/workflows/reusable-check-rename.yml
vendored
2
.github/workflows/reusable-check-rename.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Check definitions
|
||||
run: .github/scripts/rename/definitions.sh .
|
||||
- name: Check copyright notices
|
||||
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
||||
@@ -84,7 +84,7 @@ jobs:
|
||||
|
||||
- name: Upload clang-tidy output
|
||||
if: steps.run_clang_tidy.outcome != 'success'
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: clang-tidy-results
|
||||
path: clang-tidy-output.txt
|
||||
|
||||
2
.github/workflows/reusable-clang-tidy.yml
vendored
2
.github/workflows/reusable-clang-tidy.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
|
||||
- name: Get changed C++ files
|
||||
id: changed_files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
with:
|
||||
files: |
|
||||
**/*.cpp
|
||||
|
||||
@@ -29,10 +29,10 @@ jobs:
|
||||
matrix: ${{ steps.generate.outputs.matrix }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
with:
|
||||
python-version: 3.13
|
||||
|
||||
|
||||
2
.github/workflows/reusable-upload-recipe.yml
vendored
2
.github/workflows/reusable-upload-recipe.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Generate build version number
|
||||
id: version
|
||||
|
||||
4
.github/workflows/upload-conan-deps.yml
vendored
4
.github/workflows/upload-conan-deps.yml
vendored
@@ -64,10 +64,10 @@ jobs:
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
uses: XRPLF/actions/cleanup-workspace@c7d9ce5ebb03c752a354889ecd870cadfc2b1cd4
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -42,6 +42,9 @@ gmon.out
|
||||
# Locally patched Conan recipes
|
||||
external/conan-center-index/
|
||||
|
||||
# Local conan directory
|
||||
.conan
|
||||
|
||||
# XCode IDE.
|
||||
*.pbxuser
|
||||
!default.pbxuser
|
||||
@@ -72,6 +75,8 @@ DerivedData
|
||||
/.claude
|
||||
/CLAUDE.md
|
||||
|
||||
# Direnv's directory
|
||||
/.direnv
|
||||
|
||||
# clangd cache
|
||||
/.cache
|
||||
build_nocmake/
|
||||
|
||||
@@ -57,6 +57,16 @@ repos:
|
||||
- .git/COMMIT_EDITMSG
|
||||
stages: [commit-msg]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: nix-fmt
|
||||
name: Format Nix files
|
||||
entry: nix --extra-experimental-features 'nix-command flakes' fmt
|
||||
language: system
|
||||
types:
|
||||
- nix
|
||||
pass_filenames: true
|
||||
|
||||
exclude: |
|
||||
(?x)^(
|
||||
external/.*|
|
||||
|
||||
@@ -173,6 +173,9 @@ words:
|
||||
- nftokens
|
||||
- nftpage
|
||||
- nikb
|
||||
- nixfmt
|
||||
- nixos
|
||||
- nixpkgs
|
||||
- nonxrp
|
||||
- noripple
|
||||
- nudb
|
||||
|
||||
2
docs/build/environment.md
vendored
2
docs/build/environment.md
vendored
@@ -3,6 +3,8 @@ environment complete with Git, Python, Conan, CMake, and a C++ compiler.
|
||||
This document exists to help readers set one up on any of the Big Three
|
||||
platforms: Linux, macOS, or Windows.
|
||||
|
||||
As an alternative to system packages, the Nix development shell can be used to provide a development environment. See [using nix development shell](./nix.md) for more details.
|
||||
|
||||
[BUILD.md]: ../../BUILD.md
|
||||
|
||||
## Linux
|
||||
|
||||
95
docs/build/nix.md
vendored
Normal file
95
docs/build/nix.md
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
# Using Nix Development Shell for xrpld Development
|
||||
|
||||
This guide explains how to use Nix to set up a reproducible development environment for xrpld. Using Nix eliminates the need to manually install utilities and ensures consistent tooling across different machines.
|
||||
|
||||
## Benefits of Using Nix
|
||||
|
||||
- **Reproducible environment**: Everyone gets the same versions of tools and compilers
|
||||
- **No system pollution**: Dependencies are isolated and don't affect your system packages
|
||||
- **Multiple compiler versions**: Easily switch between different GCC and Clang versions
|
||||
- **Quick setup**: Get started with a single command
|
||||
- **Works on Linux and macOS**: Consistent experience across platforms
|
||||
|
||||
## Install Nix
|
||||
|
||||
Please follow [the official installation instructions of nix package manager](https://nixos.org/download/) for your system.
|
||||
|
||||
## Entering the Development Shell
|
||||
|
||||
### Basic Usage
|
||||
|
||||
From the root of the xrpld repository, enter the default development shell:
|
||||
|
||||
```bash
|
||||
nix --experimental-features 'nix-command flakes' develop
|
||||
```
|
||||
|
||||
This will:
|
||||
|
||||
- Download and set up all required development tools (CMake, Ninja, Conan, etc.)
|
||||
- Configure the appropriate compiler for your platform:
|
||||
- **macOS**: Apple Clang (default system compiler)
|
||||
- **Linux**: GCC 15
|
||||
|
||||
The first time you run this command, it will take a few minutes to download and build the environment. Subsequent runs will be much faster.
|
||||
|
||||
> [!TIP]
|
||||
> To avoid typing `--experimental-features 'nix-command flakes'` every time, you can permanently enable flakes by creating `~/.config/nix/nix.conf`:
|
||||
>
|
||||
> ```bash
|
||||
> mkdir -p ~/.config/nix
|
||||
> echo "experimental-features = nix-command flakes" >> ~/.config/nix/nix.conf
|
||||
> ```
|
||||
>
|
||||
> After this, you can simply use `nix develop` instead.
|
||||
|
||||
> [!NOTE]
|
||||
> The examples below assume you've enabled flakes in your config. If you haven't, add `--experimental-features 'nix-command flakes'` after each `nix` command.
|
||||
|
||||
### Choosing a different compiler
|
||||
|
||||
A compiler can be chosen by providing its name with the `.#` prefix, e.g. `nix develop .#gcc15`.
|
||||
Use `nix flake show` to see all the available development shells.
|
||||
|
||||
Use `nix develop .#no_compiler` to use the compiler from your system.
|
||||
|
||||
### Example Usage
|
||||
|
||||
```bash
|
||||
# Use GCC 14
|
||||
nix develop .#gcc14
|
||||
|
||||
# Use Clang 19
|
||||
nix develop .#clang19
|
||||
|
||||
# Use default for your platform
|
||||
nix develop
|
||||
```
|
||||
|
||||
### Using a different shell
|
||||
|
||||
`nix develop` opens bash by default. If you want to use another shell this could be done by adding `-c` flag. For example:
|
||||
|
||||
```bash
|
||||
nix develop -c zsh
|
||||
```
|
||||
|
||||
## Building xrpld with Nix
|
||||
|
||||
Once inside the Nix development shell, follow the standard [build instructions](../../BUILD.md#steps). The Nix shell provides all necessary tools (CMake, Ninja, Conan, etc.).
|
||||
|
||||
## Automatic Activation with direnv
|
||||
|
||||
[direnv](https://direnv.net/) or [nix-direnv](https://github.com/nix-community/nix-direnv) can automatically activate the Nix development shell when you enter the repository directory.
|
||||
|
||||
## Conan and Prebuilt Packages
|
||||
|
||||
Please note that there is no guarantee that binaries from conan cache will work when using nix. If you encounter any errors, please use `--build '*'` to force conan to compile everything from source:
|
||||
|
||||
```bash
|
||||
conan install .. --output-folder . --build '*' --settings build_type=Release
|
||||
```
|
||||
|
||||
## Updating `flake.lock` file
|
||||
|
||||
To update `flake.lock` to the latest revision use `nix flake update` command.
|
||||
26
flake.lock
generated
Normal file
26
flake.lock
generated
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1769461804,
|
||||
"narHash": "sha256-6h5sROT/3CTHvzPy9koKBmoCa2eJKh4fzQK8eYFEgl8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b579d443b37c9c5373044201ea77604e37e748c8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-unstable",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
16
flake.nix
Normal file
16
flake.nix
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
description = "Nix related things for xrpld";
|
||||
inputs = {
|
||||
nixpkgs.url = "nixpkgs/nixos-unstable";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ nixpkgs, ... }:
|
||||
let
|
||||
forEachSystem = (import ./nix/utils.nix { inherit nixpkgs; }).forEachSystem;
|
||||
in
|
||||
{
|
||||
devShells = forEachSystem (import ./nix/devshell.nix);
|
||||
formatter = forEachSystem ({ pkgs, ... }: pkgs.nixfmt);
|
||||
};
|
||||
}
|
||||
73
include/xrpl/basics/MallocTrim.h
Normal file
73
include/xrpl/basics/MallocTrim.h
Normal file
@@ -0,0 +1,73 @@
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <string_view>
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
// cSpell:ignore ptmalloc
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Allocator interaction note:
|
||||
// - This facility invokes glibc's malloc_trim(0) on Linux/glibc to request that
|
||||
// ptmalloc return free heap pages to the OS.
|
||||
// - If an alternative allocator (e.g. jemalloc or tcmalloc) is linked or
|
||||
// preloaded (LD_PRELOAD), calling glibc's malloc_trim typically has no effect
|
||||
// on the *active* heap. The call is harmless but may not reclaim memory
|
||||
// because those allocators manage their own arenas.
|
||||
// - Only glibc sbrk/arena space is eligible for trimming; large mmap-backed
|
||||
// allocations are usually returned to the OS on free regardless of trimming.
|
||||
// - Call at known reclamation points (e.g., after cache sweeps / online delete)
|
||||
// and consider rate limiting to avoid churn.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
struct MallocTrimReport
|
||||
{
|
||||
bool supported{false};
|
||||
int trimResult{-1};
|
||||
std::int64_t rssBeforeKB{-1};
|
||||
std::int64_t rssAfterKB{-1};
|
||||
std::chrono::microseconds durationUs{-1};
|
||||
std::int64_t minfltDelta{-1};
|
||||
std::int64_t majfltDelta{-1};
|
||||
|
||||
[[nodiscard]] std::int64_t
|
||||
deltaKB() const noexcept
|
||||
{
|
||||
if (rssBeforeKB < 0 || rssAfterKB < 0)
|
||||
return 0;
|
||||
return rssAfterKB - rssBeforeKB;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Attempt to return freed memory to the operating system.
|
||||
*
|
||||
* On Linux with glibc malloc, this issues ::malloc_trim(0), which may release
|
||||
* free space from ptmalloc arenas back to the kernel. On other platforms, or if
|
||||
* a different allocator is in use, this function is a no-op and the report will
|
||||
* indicate that trimming is unsupported or had no effect.
|
||||
*
|
||||
* @param tag Identifier for logging/debugging purposes.
|
||||
* @param journal Journal for diagnostic logging.
|
||||
* @return Report containing before/after metrics and the trim result.
|
||||
*
|
||||
* @note If an alternative allocator (jemalloc/tcmalloc) is linked or preloaded,
|
||||
* calling glibc's malloc_trim may have no effect on the active heap. The
|
||||
* call is harmless but typically does not reclaim memory under those
|
||||
* allocators.
|
||||
*
|
||||
* @note Only memory served from glibc's sbrk/arena heaps is eligible for trim.
|
||||
* Large allocations satisfied via mmap are usually returned on free
|
||||
* independently of trimming.
|
||||
*
|
||||
* @note Intended for use after operations that free significant memory (e.g.,
|
||||
* cache sweeps, ledger cleanup, online delete). Consider rate limiting.
|
||||
*/
|
||||
MallocTrimReport
|
||||
mallocTrim(std::string_view tag, beast::Journal journal);
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -77,16 +77,16 @@ public:
|
||||
If the object is not found or an error is encountered, the
|
||||
result will indicate the condition.
|
||||
@note This will be called concurrently.
|
||||
@param key A pointer to the key data.
|
||||
@param hash The hash of the object.
|
||||
@param pObject [out] The created object if successful.
|
||||
@return The result of the operation.
|
||||
*/
|
||||
virtual Status
|
||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) = 0;
|
||||
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) = 0;
|
||||
|
||||
/** Fetch a batch synchronously. */
|
||||
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) = 0;
|
||||
fetchBatch(std::vector<uint256> const& hashes) = 0;
|
||||
|
||||
/** Store a single object.
|
||||
Depending on the implementation this may happen immediately
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
|
||||
// Add new amendments to the top of this list.
|
||||
// Keep it sorted in reverse chronological order.
|
||||
|
||||
XRPL_FIX (PermissionedDomainInvariant, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (BatchInnerSigs, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(LendingProtocol, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
|
||||
@@ -31,7 +32,7 @@ XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(Batch, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(SingleAssetVault, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
|
||||
// Check flags in Credential transactions
|
||||
|
||||
140
nix/devshell.nix
Normal file
140
nix/devshell.nix
Normal file
@@ -0,0 +1,140 @@
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
commonPackages = with pkgs; [
|
||||
ccache
|
||||
cmake
|
||||
conan
|
||||
gcovr
|
||||
git
|
||||
gnumake
|
||||
llvmPackages_21.clang-tools
|
||||
ninja
|
||||
perl # needed for openssl
|
||||
pkg-config
|
||||
pre-commit
|
||||
python314
|
||||
];
|
||||
|
||||
# Supported compiler versions
|
||||
gccVersion = pkgs.lib.range 13 15;
|
||||
clangVersions = pkgs.lib.range 18 21;
|
||||
|
||||
defaultCompiler = if pkgs.stdenv.isDarwin then "apple-clang" else "gcc";
|
||||
defaultGccVersion = pkgs.lib.last gccVersion;
|
||||
defaultClangVersion = pkgs.lib.last clangVersions;
|
||||
|
||||
strToCompilerEnv =
|
||||
compiler: version:
|
||||
(
|
||||
if compiler == "gcc" then
|
||||
let
|
||||
gccPkg = pkgs."gcc${toString version}Stdenv" or null;
|
||||
in
|
||||
if gccPkg != null && builtins.elem version gccVersion then
|
||||
gccPkg
|
||||
else
|
||||
throw "Invalid GCC version: ${toString version}. Must be one of: ${toString gccVersion}"
|
||||
else if compiler == "clang" then
|
||||
let
|
||||
clangPkg = pkgs."llvmPackages_${toString version}".stdenv or null;
|
||||
in
|
||||
if clangPkg != null && builtins.elem version clangVersions then
|
||||
clangPkg
|
||||
else
|
||||
throw "Invalid Clang version: ${toString version}. Must be one of: ${toString clangVersions}"
|
||||
else if compiler == "apple-clang" || compiler == "none" then
|
||||
pkgs.stdenvNoCC
|
||||
else
|
||||
throw "Invalid compiler: ${compiler}. Must be one of: gcc, clang, apple-clang, none"
|
||||
);
|
||||
|
||||
# Helper function to create a shell with a specific compiler
|
||||
makeShell =
|
||||
{
|
||||
compiler ? defaultCompiler,
|
||||
version ? (
|
||||
if compiler == "gcc" then
|
||||
defaultGccVersion
|
||||
else if compiler == "clang" then
|
||||
defaultClangVersion
|
||||
else
|
||||
null
|
||||
),
|
||||
}:
|
||||
let
|
||||
compilerStdEnv = strToCompilerEnv compiler version;
|
||||
|
||||
compilerName =
|
||||
if compiler == "apple-clang" then
|
||||
"clang"
|
||||
else if compiler == "none" then
|
||||
null
|
||||
else
|
||||
compiler;
|
||||
|
||||
gccOnMacWarning =
|
||||
if pkgs.stdenv.isDarwin && compiler == "gcc" then
|
||||
''
|
||||
echo "WARNING: Using GCC on macOS with Conan may not work."
|
||||
echo " Consider using 'nix develop .#clang' or the default shell instead."
|
||||
echo ""
|
||||
''
|
||||
else
|
||||
"";
|
||||
|
||||
compilerVersion =
|
||||
if compilerName != null then
|
||||
''
|
||||
echo "Compiler: "
|
||||
${compilerName} --version
|
||||
''
|
||||
else
|
||||
''
|
||||
echo "No compiler specified - using system compiler"
|
||||
'';
|
||||
|
||||
shellAttrs = {
|
||||
packages = commonPackages;
|
||||
|
||||
shellHook = ''
|
||||
echo "Welcome to xrpld development shell";
|
||||
${gccOnMacWarning}${compilerVersion}
|
||||
'';
|
||||
};
|
||||
in
|
||||
pkgs.mkShell.override { stdenv = compilerStdEnv; } shellAttrs;
|
||||
|
||||
# Generate shells for each compiler version
|
||||
gccShells = builtins.listToAttrs (
|
||||
map (version: {
|
||||
name = "gcc${toString version}";
|
||||
value = makeShell {
|
||||
compiler = "gcc";
|
||||
version = version;
|
||||
};
|
||||
}) gccVersion
|
||||
);
|
||||
|
||||
clangShells = builtins.listToAttrs (
|
||||
map (version: {
|
||||
name = "clang${toString version}";
|
||||
value = makeShell {
|
||||
compiler = "clang";
|
||||
version = version;
|
||||
};
|
||||
}) clangVersions
|
||||
);
|
||||
|
||||
in
|
||||
gccShells
|
||||
// clangShells
|
||||
// {
|
||||
# Default shells
|
||||
default = makeShell { };
|
||||
gcc = makeShell { compiler = "gcc"; };
|
||||
clang = makeShell { compiler = "clang"; };
|
||||
|
||||
# No compiler
|
||||
no-compiler = makeShell { compiler = "none"; };
|
||||
apple-clang = makeShell { compiler = "apple-clang"; };
|
||||
}
|
||||
19
nix/utils.nix
Normal file
19
nix/utils.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{ nixpkgs }:
|
||||
{
|
||||
forEachSystem =
|
||||
function:
|
||||
nixpkgs.lib.genAttrs
|
||||
[
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
]
|
||||
(
|
||||
system:
|
||||
function {
|
||||
inherit system;
|
||||
pkgs = import nixpkgs { inherit system; };
|
||||
}
|
||||
);
|
||||
}
|
||||
157
src/libxrpl/basics/MallocTrim.cpp
Normal file
157
src/libxrpl/basics/MallocTrim.cpp
Normal file
@@ -0,0 +1,157 @@
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
|
||||
#include <boost/predef.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <malloc.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// Require RUSAGE_THREAD for thread-scoped page fault tracking
|
||||
#ifndef RUSAGE_THREAD
|
||||
#error "MallocTrim rusage instrumentation requires RUSAGE_THREAD on Linux/glibc"
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
|
||||
bool
|
||||
getRusageThread(struct rusage& ru)
|
||||
{
|
||||
return ::getrusage(RUSAGE_THREAD, &ru) == 0; // LCOV_EXCL_LINE
|
||||
}
|
||||
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
namespace xrpl {
|
||||
|
||||
namespace detail {
|
||||
|
||||
// cSpell:ignore statm
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
|
||||
inline int
|
||||
mallocTrimWithPad(std::size_t padBytes)
|
||||
{
|
||||
return ::malloc_trim(padBytes);
|
||||
}
|
||||
|
||||
long
|
||||
parseStatmRSSkB(std::string const& statm)
|
||||
{
|
||||
// /proc/self/statm format: size resident shared text lib data dt
|
||||
// We want the second field (resident) which is in pages
|
||||
std::istringstream iss(statm);
|
||||
long size, resident;
|
||||
if (!(iss >> size >> resident))
|
||||
return -1;
|
||||
|
||||
// Convert pages to KB
|
||||
long const pageSize = ::sysconf(_SC_PAGESIZE);
|
||||
if (pageSize <= 0)
|
||||
return -1;
|
||||
|
||||
return (resident * pageSize) / 1024;
|
||||
}
|
||||
|
||||
#endif // __GLIBC__ && BOOST_OS_LINUX
|
||||
|
||||
} // namespace detail
|
||||
|
||||
MallocTrimReport
|
||||
mallocTrim(std::string_view tag, beast::Journal journal)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
|
||||
MallocTrimReport report;
|
||||
|
||||
#if !(defined(__GLIBC__) && BOOST_OS_LINUX)
|
||||
JLOG(journal.debug()) << "malloc_trim not supported on this platform (tag=" << tag << ")";
|
||||
#else
|
||||
// Keep glibc malloc_trim padding at 0 (default): 12h Mainnet tests across 0/256KB/1MB/16MB
|
||||
// showed no clear, consistent benefit from custom padding—0 provided the best overall balance
|
||||
// of RSS reduction and trim-latency stability without adding a tuning surface.
|
||||
constexpr std::size_t TRIM_PAD = 0;
|
||||
|
||||
report.supported = true;
|
||||
|
||||
if (journal.debug())
|
||||
{
|
||||
auto readFile = [](std::string const& path) -> std::string {
|
||||
std::ifstream ifs(path, std::ios::in | std::ios::binary);
|
||||
if (!ifs.is_open())
|
||||
return {};
|
||||
|
||||
// /proc files are often not seekable; read as a stream.
|
||||
std::ostringstream oss;
|
||||
oss << ifs.rdbuf();
|
||||
return oss.str();
|
||||
};
|
||||
|
||||
std::string const tagStr{tag};
|
||||
std::string const statmPath = "/proc/self/statm";
|
||||
|
||||
auto const statmBefore = readFile(statmPath);
|
||||
long const rssBeforeKB = detail::parseStatmRSSkB(statmBefore);
|
||||
|
||||
struct rusage ru0{};
|
||||
bool const have_ru0 = getRusageThread(ru0);
|
||||
|
||||
auto const t0 = std::chrono::steady_clock::now();
|
||||
|
||||
report.trimResult = detail::mallocTrimWithPad(TRIM_PAD);
|
||||
|
||||
auto const t1 = std::chrono::steady_clock::now();
|
||||
|
||||
struct rusage ru1{};
|
||||
bool const have_ru1 = getRusageThread(ru1);
|
||||
|
||||
auto const statmAfter = readFile(statmPath);
|
||||
long const rssAfterKB = detail::parseStatmRSSkB(statmAfter);
|
||||
|
||||
// Populate report fields
|
||||
report.rssBeforeKB = rssBeforeKB;
|
||||
report.rssAfterKB = rssAfterKB;
|
||||
report.durationUs = std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0);
|
||||
|
||||
if (have_ru0 && have_ru1)
|
||||
{
|
||||
report.minfltDelta = ru1.ru_minflt - ru0.ru_minflt;
|
||||
report.majfltDelta = ru1.ru_majflt - ru0.ru_majflt;
|
||||
}
|
||||
|
||||
std::int64_t const deltaKB = (rssBeforeKB < 0 || rssAfterKB < 0)
|
||||
? 0
|
||||
: (static_cast<std::int64_t>(rssAfterKB) - static_cast<std::int64_t>(rssBeforeKB));
|
||||
|
||||
JLOG(journal.debug()) << "malloc_trim tag=" << tagStr << " result=" << report.trimResult
|
||||
<< " pad=" << TRIM_PAD << " bytes"
|
||||
<< " rss_before=" << rssBeforeKB << "kB"
|
||||
<< " rss_after=" << rssAfterKB << "kB"
|
||||
<< " delta=" << deltaKB << "kB"
|
||||
<< " duration_us=" << report.durationUs.count()
|
||||
<< " minflt_delta=" << report.minfltDelta
|
||||
<< " majflt_delta=" << report.majfltDelta;
|
||||
}
|
||||
else
|
||||
{
|
||||
report.trimResult = detail::mallocTrimWithPad(TRIM_PAD);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
return report;
|
||||
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
} // namespace xrpl
|
||||
@@ -33,7 +33,7 @@ DatabaseNodeImp::fetchNodeObject(
|
||||
|
||||
try
|
||||
{
|
||||
status = backend_->fetch(hash.data(), &nodeObject);
|
||||
status = backend_->fetch(hash, &nodeObject);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
@@ -68,18 +68,10 @@ DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
|
||||
using namespace std::chrono;
|
||||
auto const before = steady_clock::now();
|
||||
|
||||
std::vector<uint256 const*> batch{};
|
||||
batch.reserve(hashes.size());
|
||||
for (size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
auto const& hash = hashes[i];
|
||||
batch.push_back(&hash);
|
||||
}
|
||||
|
||||
// Get the node objects that match the hashes from the backend. To protect
|
||||
// against the backends returning fewer or more results than expected, the
|
||||
// container is resized to the number of hashes.
|
||||
auto results = backend_->fetchBatch(batch).first;
|
||||
auto results = backend_->fetchBatch(hashes).first;
|
||||
XRPL_ASSERT(
|
||||
results.size() == hashes.size() || results.empty(),
|
||||
"number of output objects either matches number of input hashes or is empty");
|
||||
|
||||
@@ -105,7 +105,7 @@ DatabaseRotatingImp::fetchNodeObject(
|
||||
std::shared_ptr<NodeObject> nodeObject;
|
||||
try
|
||||
{
|
||||
status = backend->fetch(hash.data(), &nodeObject);
|
||||
status = backend->fetch(hash, &nodeObject);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
|
||||
@@ -116,10 +116,9 @@ public:
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status
|
||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
|
||||
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
|
||||
{
|
||||
XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database");
|
||||
uint256 const hash(uint256::fromVoid(key));
|
||||
|
||||
std::lock_guard _(db_->mutex);
|
||||
|
||||
@@ -134,14 +133,14 @@ public:
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
||||
fetchBatch(std::vector<uint256> const& hashes) override
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results;
|
||||
results.reserve(hashes.size());
|
||||
for (auto const& h : hashes)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nObj;
|
||||
Status status = fetch(h->begin(), &nObj);
|
||||
Status status = fetch(h, &nObj);
|
||||
if (status != ok)
|
||||
results.push_back({});
|
||||
else
|
||||
|
||||
@@ -179,17 +179,17 @@ public:
|
||||
}
|
||||
|
||||
Status
|
||||
fetch(void const* key, std::shared_ptr<NodeObject>* pno) override
|
||||
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pno) override
|
||||
{
|
||||
Status status;
|
||||
pno->reset();
|
||||
nudb::error_code ec;
|
||||
db_.fetch(
|
||||
key,
|
||||
[key, pno, &status](void const* data, std::size_t size) {
|
||||
hash.data(),
|
||||
[&hash, pno, &status](void const* data, std::size_t size) {
|
||||
nudb::detail::buffer bf;
|
||||
auto const result = nodeobject_decompress(data, size, bf);
|
||||
DecodedBlob decoded(key, result.first, result.second);
|
||||
DecodedBlob decoded(hash.data(), result.first, result.second);
|
||||
if (!decoded.wasOk())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
@@ -207,14 +207,14 @@ public:
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
||||
fetchBatch(std::vector<uint256> const& hashes) override
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results;
|
||||
results.reserve(hashes.size());
|
||||
for (auto const& h : hashes)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nObj;
|
||||
Status status = fetch(h->begin(), &nObj);
|
||||
Status status = fetch(h, &nObj);
|
||||
if (status != ok)
|
||||
results.push_back({});
|
||||
else
|
||||
|
||||
@@ -36,13 +36,13 @@ public:
|
||||
}
|
||||
|
||||
Status
|
||||
fetch(void const*, std::shared_ptr<NodeObject>*) override
|
||||
fetch(uint256 const&, std::shared_ptr<NodeObject>*) override
|
||||
{
|
||||
return notFound;
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
||||
fetchBatch(std::vector<uint256> const& hashes) override
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -244,7 +244,7 @@ public:
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status
|
||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
|
||||
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
|
||||
{
|
||||
XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database");
|
||||
pObject->reset();
|
||||
@@ -252,7 +252,7 @@ public:
|
||||
Status status(ok);
|
||||
|
||||
rocksdb::ReadOptions const options;
|
||||
rocksdb::Slice const slice(static_cast<char const*>(key), m_keyBytes);
|
||||
rocksdb::Slice const slice(std::bit_cast<char const*>(hash.data()), m_keyBytes);
|
||||
|
||||
std::string string;
|
||||
|
||||
@@ -260,7 +260,7 @@ public:
|
||||
|
||||
if (getStatus.ok())
|
||||
{
|
||||
DecodedBlob decoded(key, string.data(), string.size());
|
||||
DecodedBlob decoded(hash.data(), string.data(), string.size());
|
||||
|
||||
if (decoded.wasOk())
|
||||
{
|
||||
@@ -295,14 +295,14 @@ public:
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
||||
fetchBatch(std::vector<uint256> const& hashes) override
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results;
|
||||
results.reserve(hashes.size());
|
||||
for (auto const& h : hashes)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nObj;
|
||||
Status status = fetch(h->begin(), &nObj);
|
||||
Status status = fetch(h, &nObj);
|
||||
if (status != ok)
|
||||
results.push_back({});
|
||||
else
|
||||
@@ -332,9 +332,8 @@ public:
|
||||
EncodedBlob encoded(e);
|
||||
|
||||
wb.Put(
|
||||
rocksdb::Slice(reinterpret_cast<char const*>(encoded.getKey()), m_keyBytes),
|
||||
rocksdb::Slice(
|
||||
reinterpret_cast<char const*>(encoded.getData()), encoded.getSize()));
|
||||
rocksdb::Slice(std::bit_cast<char const*>(encoded.getKey()), m_keyBytes),
|
||||
rocksdb::Slice(std::bit_cast<char const*>(encoded.getData()), encoded.getSize()));
|
||||
}
|
||||
|
||||
rocksdb::WriteOptions const options;
|
||||
|
||||
@@ -138,7 +138,7 @@ public:
|
||||
{
|
||||
std::shared_ptr<NodeObject> object;
|
||||
|
||||
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
|
||||
Status const status = backend.fetch(batch[i]->getHash(), &object);
|
||||
|
||||
BEAST_EXPECT(status == ok);
|
||||
|
||||
@@ -158,7 +158,7 @@ public:
|
||||
{
|
||||
std::shared_ptr<NodeObject> object;
|
||||
|
||||
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
|
||||
Status const status = backend.fetch(batch[i]->getHash(), &object);
|
||||
|
||||
BEAST_EXPECT(status == notFound);
|
||||
}
|
||||
|
||||
@@ -314,7 +314,7 @@ public:
|
||||
std::shared_ptr<NodeObject> obj;
|
||||
std::shared_ptr<NodeObject> result;
|
||||
obj = seq1_.obj(dist_(gen_));
|
||||
backend_.fetch(obj->getHash().data(), &result);
|
||||
backend_.fetch(obj->getHash(), &result);
|
||||
suite_.expect(result && isSame(result, obj));
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
@@ -377,9 +377,9 @@ public:
|
||||
{
|
||||
try
|
||||
{
|
||||
auto const key = seq2_.key(i);
|
||||
auto const hash = seq2_.key(i);
|
||||
std::shared_ptr<NodeObject> result;
|
||||
backend_.fetch(key.data(), &result);
|
||||
backend_.fetch(hash, &result);
|
||||
suite_.expect(!result);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
@@ -449,9 +449,9 @@ public:
|
||||
{
|
||||
if (rand_(gen_) < missingNodePercent)
|
||||
{
|
||||
auto const key = seq2_.key(dist_(gen_));
|
||||
auto const hash = seq2_.key(dist_(gen_));
|
||||
std::shared_ptr<NodeObject> result;
|
||||
backend_.fetch(key.data(), &result);
|
||||
backend_.fetch(hash, &result);
|
||||
suite_.expect(!result);
|
||||
}
|
||||
else
|
||||
@@ -459,7 +459,7 @@ public:
|
||||
std::shared_ptr<NodeObject> obj;
|
||||
std::shared_ptr<NodeObject> result;
|
||||
obj = seq1_.obj(dist_(gen_));
|
||||
backend_.fetch(obj->getHash().data(), &result);
|
||||
backend_.fetch(obj->getHash(), &result);
|
||||
suite_.expect(result && isSame(result, obj));
|
||||
}
|
||||
}
|
||||
@@ -540,8 +540,7 @@ public:
|
||||
std::shared_ptr<NodeObject> result;
|
||||
auto const j = older_(gen_);
|
||||
obj = seq1_.obj(j);
|
||||
std::shared_ptr<NodeObject> result1;
|
||||
backend_.fetch(obj->getHash().data(), &result);
|
||||
backend_.fetch(obj->getHash(), &result);
|
||||
suite_.expect(result != nullptr);
|
||||
suite_.expect(isSame(result, obj));
|
||||
}
|
||||
@@ -559,7 +558,7 @@ public:
|
||||
std::shared_ptr<NodeObject> result;
|
||||
auto const j = recent_(gen_);
|
||||
obj = seq1_.obj(j);
|
||||
backend_.fetch(obj->getHash().data(), &result);
|
||||
backend_.fetch(obj->getHash(), &result);
|
||||
suite_.expect(!result || isSame(result, obj));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#include <test/jtx/xchain_bridge.h>
|
||||
|
||||
#include <xrpld/app/misc/TxQ.h>
|
||||
#include <xrpld/rpc/CTID.h>
|
||||
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
@@ -672,126 +671,6 @@ class LedgerRPC_test : public beast::unit_test::suite
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerExpandedTransactionsCTID()
|
||||
{
|
||||
testcase("Ledger Expanded Transactions Include CTID");
|
||||
using namespace test::jtx;
|
||||
|
||||
// Use a specific network ID so we can verify it in the CTID
|
||||
constexpr uint32_t NETWORK_ID = 11111;
|
||||
auto cfg = envconfig([&](std::unique_ptr<Config> cfg) {
|
||||
cfg->NETWORK_ID = NETWORK_ID;
|
||||
return cfg;
|
||||
});
|
||||
|
||||
Env env{*this, std::move(cfg)};
|
||||
uint32_t netID = env.app().getNetworkIDService().getNetworkID();
|
||||
BEAST_EXPECT(netID == NETWORK_ID);
|
||||
|
||||
Account const alice{"alice"};
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
// Submit a transaction and close the ledger
|
||||
env(noop(alice));
|
||||
env.close();
|
||||
|
||||
auto const ledgerSeq = env.closed()->header().seq;
|
||||
|
||||
// Request ledger with expanded transactions (API v2, non-binary)
|
||||
{
|
||||
Json::Value jv;
|
||||
jv[jss::ledger_index] = ledgerSeq;
|
||||
jv[jss::transactions] = true;
|
||||
jv[jss::expand] = true;
|
||||
jv[jss::api_version] = 2;
|
||||
auto jrr = env.rpc("json", "ledger", to_string(jv))[jss::result];
|
||||
BEAST_EXPECT(jrr[jss::status] == "success");
|
||||
auto const& txns = jrr[jss::ledger][jss::transactions];
|
||||
BEAST_EXPECT(txns.isArray() && txns.size() > 0);
|
||||
for (auto const& tx : txns)
|
||||
{
|
||||
BEAST_EXPECT(tx.isMember(jss::ctid));
|
||||
auto const ctid = tx[jss::ctid].asString();
|
||||
// Verify CTID can be decoded and matches the ledger
|
||||
auto const decoded = RPC::decodeCTID(ctid);
|
||||
if (BEAST_EXPECT(decoded.has_value()))
|
||||
{
|
||||
auto const [seq, idx, net] = *decoded;
|
||||
BEAST_EXPECT(seq == ledgerSeq);
|
||||
BEAST_EXPECT(net == NETWORK_ID);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Request ledger with expanded transactions (API v1, non-binary)
|
||||
{
|
||||
Json::Value jv;
|
||||
jv[jss::ledger_index] = ledgerSeq;
|
||||
jv[jss::transactions] = true;
|
||||
jv[jss::expand] = true;
|
||||
auto jrr = env.rpc("json", "ledger", to_string(jv))[jss::result];
|
||||
BEAST_EXPECT(jrr[jss::status] == "success");
|
||||
auto const& txns = jrr[jss::ledger][jss::transactions];
|
||||
BEAST_EXPECT(txns.isArray() && txns.size() > 0);
|
||||
for (auto const& tx : txns)
|
||||
{
|
||||
BEAST_EXPECT(tx.isMember(jss::ctid));
|
||||
auto const ctid = tx[jss::ctid].asString();
|
||||
auto const decoded = RPC::decodeCTID(ctid);
|
||||
if (BEAST_EXPECT(decoded.has_value()))
|
||||
{
|
||||
auto const [seq, idx, net] = *decoded;
|
||||
BEAST_EXPECT(seq == ledgerSeq);
|
||||
BEAST_EXPECT(net == NETWORK_ID);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Request ledger with expanded transactions (binary)
|
||||
{
|
||||
Json::Value jv;
|
||||
jv[jss::ledger_index] = ledgerSeq;
|
||||
jv[jss::transactions] = true;
|
||||
jv[jss::expand] = true;
|
||||
jv[jss::binary] = true;
|
||||
auto jrr = env.rpc("json", "ledger", to_string(jv))[jss::result];
|
||||
BEAST_EXPECT(jrr[jss::status] == "success");
|
||||
auto const& txns = jrr[jss::ledger][jss::transactions];
|
||||
BEAST_EXPECT(txns.isArray() && txns.size() > 0);
|
||||
for (auto const& tx : txns)
|
||||
{
|
||||
BEAST_EXPECT(tx.isMember(jss::ctid));
|
||||
auto const ctid = tx[jss::ctid].asString();
|
||||
auto const decoded = RPC::decodeCTID(ctid);
|
||||
if (BEAST_EXPECT(decoded.has_value()))
|
||||
{
|
||||
auto const [seq, idx, net] = *decoded;
|
||||
BEAST_EXPECT(seq == ledgerSeq);
|
||||
BEAST_EXPECT(net == NETWORK_ID);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Request ledger with non-expanded transactions (should NOT have CTID)
|
||||
{
|
||||
Json::Value jv;
|
||||
jv[jss::ledger_index] = ledgerSeq;
|
||||
jv[jss::transactions] = true;
|
||||
jv[jss::expand] = false;
|
||||
auto jrr = env.rpc("json", "ledger", to_string(jv))[jss::result];
|
||||
BEAST_EXPECT(jrr[jss::status] == "success");
|
||||
auto const& txns = jrr[jss::ledger][jss::transactions];
|
||||
BEAST_EXPECT(txns.isArray() && txns.size() > 0);
|
||||
for (auto const& tx : txns)
|
||||
{
|
||||
// Non-expanded transactions are just hash strings
|
||||
BEAST_EXPECT(tx.isString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
@@ -806,7 +685,6 @@ public:
|
||||
testNoQueue();
|
||||
testQueue();
|
||||
testLedgerAccountsOption();
|
||||
testLedgerExpandedTransactionsCTID();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
209
src/tests/libxrpl/basics/MallocTrim.cpp
Normal file
209
src/tests/libxrpl/basics/MallocTrim.cpp
Normal file
@@ -0,0 +1,209 @@
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
|
||||
#include <boost/predef.h>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using namespace xrpl;
|
||||
|
||||
// cSpell:ignore statm
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
namespace xrpl::detail {
|
||||
long
|
||||
parseStatmRSSkB(std::string const& statm);
|
||||
} // namespace xrpl::detail
|
||||
#endif
|
||||
|
||||
TEST(MallocTrimReport, structure)
|
||||
{
|
||||
// Test default construction
|
||||
MallocTrimReport report;
|
||||
EXPECT_EQ(report.supported, false);
|
||||
EXPECT_EQ(report.trimResult, -1);
|
||||
EXPECT_EQ(report.rssBeforeKB, -1);
|
||||
EXPECT_EQ(report.rssAfterKB, -1);
|
||||
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
|
||||
EXPECT_EQ(report.minfltDelta, -1);
|
||||
EXPECT_EQ(report.majfltDelta, -1);
|
||||
EXPECT_EQ(report.deltaKB(), 0);
|
||||
|
||||
// Test deltaKB calculation - memory freed
|
||||
report.rssBeforeKB = 1000;
|
||||
report.rssAfterKB = 800;
|
||||
EXPECT_EQ(report.deltaKB(), -200);
|
||||
|
||||
// Test deltaKB calculation - memory increased
|
||||
report.rssBeforeKB = 500;
|
||||
report.rssAfterKB = 600;
|
||||
EXPECT_EQ(report.deltaKB(), 100);
|
||||
|
||||
// Test deltaKB calculation - no change
|
||||
report.rssBeforeKB = 1234;
|
||||
report.rssAfterKB = 1234;
|
||||
EXPECT_EQ(report.deltaKB(), 0);
|
||||
}
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
TEST(parseStatmRSSkB, standard_format)
|
||||
{
|
||||
using xrpl::detail::parseStatmRSSkB;
|
||||
|
||||
// Test standard format: size resident shared text lib data dt
|
||||
// Assuming 4KB page size: resident=1000 pages = 4000 KB
|
||||
{
|
||||
std::string statm = "25365 1000 2377 0 0 5623 0";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
// Note: actual result depends on system page size
|
||||
// On most systems it's 4KB, so 1000 pages = 4000 KB
|
||||
EXPECT_GT(result, 0);
|
||||
}
|
||||
|
||||
// Test with newline
|
||||
{
|
||||
std::string statm = "12345 2000 1234 0 0 3456 0\n";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_GT(result, 0);
|
||||
}
|
||||
|
||||
// Test with tabs
|
||||
{
|
||||
std::string statm = "12345\t2000\t1234\t0\t0\t3456\t0";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_GT(result, 0);
|
||||
}
|
||||
|
||||
// Test zero resident pages
|
||||
{
|
||||
std::string statm = "25365 0 2377 0 0 5623 0";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_EQ(result, 0);
|
||||
}
|
||||
|
||||
// Test with extra whitespace
|
||||
{
|
||||
std::string statm = " 25365 1000 2377 ";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_GT(result, 0);
|
||||
}
|
||||
|
||||
// Test empty string
|
||||
{
|
||||
std::string statm = "";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_EQ(result, -1);
|
||||
}
|
||||
|
||||
// Test malformed data (only one field)
|
||||
{
|
||||
std::string statm = "25365";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_EQ(result, -1);
|
||||
}
|
||||
|
||||
// Test malformed data (non-numeric)
|
||||
{
|
||||
std::string statm = "abc def ghi";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_EQ(result, -1);
|
||||
}
|
||||
|
||||
// Test malformed data (second field non-numeric)
|
||||
{
|
||||
std::string statm = "25365 abc 2377";
|
||||
long result = parseStatmRSSkB(statm);
|
||||
EXPECT_EQ(result, -1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST(mallocTrim, without_debug_logging)
|
||||
{
|
||||
beast::Journal journal{beast::Journal::getNullSink()};
|
||||
|
||||
MallocTrimReport report = mallocTrim("without_debug", journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
EXPECT_EQ(report.supported, true);
|
||||
EXPECT_GE(report.trimResult, 0);
|
||||
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
|
||||
EXPECT_EQ(report.minfltDelta, -1);
|
||||
EXPECT_EQ(report.majfltDelta, -1);
|
||||
#else
|
||||
EXPECT_EQ(report.supported, false);
|
||||
EXPECT_EQ(report.trimResult, -1);
|
||||
EXPECT_EQ(report.rssBeforeKB, -1);
|
||||
EXPECT_EQ(report.rssAfterKB, -1);
|
||||
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
|
||||
EXPECT_EQ(report.minfltDelta, -1);
|
||||
EXPECT_EQ(report.majfltDelta, -1);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(mallocTrim, empty_tag)
|
||||
{
|
||||
beast::Journal journal{beast::Journal::getNullSink()};
|
||||
MallocTrimReport report = mallocTrim("", journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
EXPECT_EQ(report.supported, true);
|
||||
EXPECT_GE(report.trimResult, 0);
|
||||
#else
|
||||
EXPECT_EQ(report.supported, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(mallocTrim, with_debug_logging)
|
||||
{
|
||||
struct DebugSink : public beast::Journal::Sink
|
||||
{
|
||||
DebugSink() : Sink(beast::severities::kDebug, false)
|
||||
{
|
||||
}
|
||||
void
|
||||
write(beast::severities::Severity, std::string const&) override
|
||||
{
|
||||
}
|
||||
void
|
||||
writeAlways(beast::severities::Severity, std::string const&) override
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
DebugSink sink;
|
||||
beast::Journal journal{sink};
|
||||
|
||||
MallocTrimReport report = mallocTrim("debug_test", journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
EXPECT_EQ(report.supported, true);
|
||||
EXPECT_GE(report.trimResult, 0);
|
||||
EXPECT_GE(report.durationUs.count(), 0);
|
||||
EXPECT_GE(report.minfltDelta, 0);
|
||||
EXPECT_GE(report.majfltDelta, 0);
|
||||
#else
|
||||
EXPECT_EQ(report.supported, false);
|
||||
EXPECT_EQ(report.trimResult, -1);
|
||||
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
|
||||
EXPECT_EQ(report.minfltDelta, -1);
|
||||
EXPECT_EQ(report.majfltDelta, -1);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(mallocTrim, repeated_calls)
|
||||
{
|
||||
beast::Journal journal{beast::Journal::getNullSink()};
|
||||
|
||||
// Call malloc_trim multiple times to ensure it's safe
|
||||
for (int i = 0; i < 5; ++i)
|
||||
{
|
||||
MallocTrimReport report = mallocTrim("iteration_" + std::to_string(i), journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
EXPECT_EQ(report.supported, true);
|
||||
EXPECT_GE(report.trimResult, 0);
|
||||
#else
|
||||
EXPECT_EQ(report.supported, false);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -2,13 +2,11 @@
|
||||
#include <xrpld/app/ledger/LedgerToJson.h>
|
||||
#include <xrpld/app/misc/DeliverMax.h>
|
||||
#include <xrpld/app/misc/TxQ.h>
|
||||
#include <xrpld/rpc/CTID.h>
|
||||
#include <xrpld/rpc/Context.h>
|
||||
#include <xrpld/rpc/DeliveredAmount.h>
|
||||
#include <xrpld/rpc/MPTokenIssuanceID.h>
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/core/NetworkIDService.h>
|
||||
#include <xrpl/protocol/ApiVersion.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
@@ -168,17 +166,6 @@ fillJsonTx(
|
||||
}
|
||||
}
|
||||
|
||||
// compute outgoing CTID
|
||||
if (fill.context && stMeta && stMeta->isFieldPresent(sfTransactionIndex))
|
||||
{
|
||||
uint32_t lgrSeq = fill.ledger.seq();
|
||||
uint32_t txnIdx = stMeta->getFieldU32(sfTransactionIndex);
|
||||
uint32_t netID = fill.context->app.getNetworkIDService().getNetworkID();
|
||||
|
||||
if (auto ctid = RPC::encodeCTID(lgrSeq, txnIdx, netID))
|
||||
txJson[jss::ctid] = *ctid;
|
||||
}
|
||||
|
||||
if ((fill.options & LedgerFill::ownerFunds) && txn->getTxnType() == ttOFFER_CREATE)
|
||||
{
|
||||
auto const account = txn->getAccountID(sfAccount);
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <xrpld/shamap/NodeFamily.h>
|
||||
|
||||
#include <xrpl/basics/ByteUtilities.h>
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
#include <xrpl/basics/ResolverAsio.h>
|
||||
#include <xrpl/basics/random.h>
|
||||
#include <xrpl/beast/asio/io_latency_probe.h>
|
||||
@@ -1053,6 +1054,8 @@ public:
|
||||
<< "; size after: " << cachedSLEs_.size();
|
||||
}
|
||||
|
||||
mallocTrim("doSweep", m_journal);
|
||||
|
||||
// Set timer to do another sweep later.
|
||||
setSweepTimer();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user