Compare commits

...

2 Commits

Author SHA1 Message Date
Michael Legleux
8ef1811f63 feat: Build Linux packages in GitHub 2026-03-24 15:42:27 -07:00
Alex Kremer
0eedefbf45 refactor: Enable more clang-tidy readability checks (#6595)
Co-authored-by: Sergey Kuznetsov <kuzzz99@gmail.com>
2026-03-24 15:42:12 +00:00
276 changed files with 2047 additions and 945 deletions

View File

@@ -104,10 +104,13 @@ Checks: "-*,
readability-const-return-type,
readability-container-contains,
readability-container-size-empty,
readability-convert-member-functions-to-static,
readability-duplicate-include,
readability-else-after-return,
readability-enum-initial-value,
readability-implicit-bool-conversion,
readability-make-member-function-const,
readability-math-missing-parentheses,
readability-misleading-indentation,
readability-non-const-parameter,
readability-redundant-casting,
@@ -116,7 +119,9 @@ Checks: "-*,
readability-redundant-member-init,
readability-redundant-string-init,
readability-reference-to-constructed-temporary,
readability-simplify-boolean-expr,
readability-static-definition-in-anonymous-namespace,
readability-suspicious-call-argument,
readability-use-std-min-max
"
# ---
@@ -127,14 +132,9 @@ Checks: "-*,
# misc-include-cleaner,
# misc-redundant-expression,
#
# readability-convert-member-functions-to-static,
# readability-implicit-bool-conversion,
# readability-inconsistent-declaration-parameter-name,
# readability-inconsistent-declaration-parameter-name, # in this codebase this check will break a lot of arg names
# readability-static-accessed-through-instance, # this check is probably unnecessary. it makes the code less readable
# readability-identifier-naming,
# readability-math-missing-parentheses,
# readability-simplify-boolean-expr,
# readability-suspicious-call-argument,
# readability-static-accessed-through-instance,
#
# modernize-concat-nested-namespaces,
# modernize-pass-by-value,

View File

@@ -99,14 +99,15 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# RHEL:
# - 9 using GCC 12: Debug on linux/amd64.
# - 9 using GCC 12: Debug and Release on linux/amd64
# (Release is required for RPM packaging).
# - 10 using Clang: Release on linux/amd64.
if os["distro_name"] == "rhel":
skip = True
if os["distro_version"] == "9":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
and build_type == "Debug"
and build_type in ["Debug", "Release"]
and architecture["platform"] == "linux/amd64"
):
skip = False
@@ -121,7 +122,8 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
continue
# Ubuntu:
# - Jammy using GCC 12: Debug on linux/arm64.
# - Jammy using GCC 12: Debug on linux/arm64, Release on
# linux/amd64 (Release is required for DEB packaging).
# - Noble using GCC 14: Release on linux/amd64.
# - Noble using Clang 18: Debug on linux/amd64.
# - Noble using Clang 19: Release on linux/arm64.
@@ -134,6 +136,12 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
and architecture["platform"] == "linux/arm64"
):
skip = False
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
and build_type == "Release"
and architecture["platform"] == "linux/amd64"
):
skip = False
elif os["distro_version"] == "noble":
if (
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-14"

66
.github/workflows/manual-package.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: Manual Package Build
on:
workflow_dispatch:
inputs:
pkg_type:
description: "Package type"
required: true
type: choice
options:
- deb
- rpm
- both
artifact_run_id:
description: "Run ID to download binary artifact from (leave empty for latest on this branch)"
required: false
type: string
version:
description: "Version override (leave empty to auto-detect)"
required: false
type: string
pkg_release:
description: "Package release number (default: 1)"
required: false
type: string
default: "1"
defaults:
run:
shell: bash
jobs:
generate-version:
runs-on: ubuntu-latest
outputs:
version: ${{ inputs.version || steps.version.outputs.version }}
steps:
- name: Checkout repository
if: ${{ !inputs.version }}
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Generate version
if: ${{ !inputs.version }}
id: version
uses: ./.github/actions/generate-version
package-deb:
if: ${{ inputs.pkg_type == 'deb' || inputs.pkg_type == 'both' }}
needs: generate-version
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: deb
artifact_name: xrpld-ubuntu-jammy-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
pkg_release: ${{ inputs.pkg_release }}
container_image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
package-rpm:
if: ${{ inputs.pkg_type == 'rpm' || inputs.pkg_type == 'both' }}
needs: generate-version
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: rpm
artifact_name: xrpld-rhel-9-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
pkg_release: ${{ inputs.pkg_release }}
container_image: ghcr.io/xrplf/ci/rhel-9:gcc-12

View File

@@ -67,6 +67,7 @@ jobs:
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-clang-tidy.yml
.github/workflows/reusable-clang-tidy-files.yml
.github/workflows/reusable-package.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.github/workflows/reusable-upload-recipe.yml
@@ -81,6 +82,8 @@ jobs:
CMakeLists.txt
conanfile.py
conan.lock
package/**
- name: Check whether to run
# This step determines whether the rest of the workflow should
# run. The rest of the workflow will run if this job runs AND at
@@ -137,6 +140,39 @@ jobs:
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
generate-version:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
runs-on: ubuntu-latest
outputs:
version: ${{ steps.version.outputs.version }}
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Generate version
id: version
uses: ./.github/actions/generate-version
package-deb:
needs: [should-run, build-test, generate-version]
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: deb
artifact_name: xrpld-ubuntu-jammy-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
container_image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
package-rpm:
needs: [should-run, build-test, generate-version]
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: rpm
artifact_name: xrpld-rhel-9-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
container_image: ghcr.io/xrplf/ci/rhel-9:gcc-12
upload-recipe:
needs:
- should-run

View File

@@ -1,5 +1,5 @@
# This workflow uploads the libxrpl recipe to the Conan remote when a versioned
# tag is pushed.
# This workflow uploads the libxrpl recipe to the Conan remote and builds
# release packages when a versioned tag is pushed.
name: Tag
on:
@@ -22,3 +22,49 @@ jobs:
secrets:
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
build-test:
if: ${{ github.repository == 'XRPLF/rippled' }}
uses: ./.github/workflows/reusable-build-test.yml
strategy:
fail-fast: true
matrix:
os: [linux]
with:
ccache_enabled: false
os: ${{ matrix.os }}
strategy_matrix: minimal
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
generate-version:
if: ${{ github.repository == 'XRPLF/rippled' }}
runs-on: ubuntu-latest
outputs:
version: ${{ steps.version.outputs.version }}
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Generate version
id: version
uses: ./.github/actions/generate-version
package-deb:
needs: [build-test, generate-version]
if: ${{ github.repository == 'XRPLF/rippled' }}
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: deb
artifact_name: xrpld-ubuntu-jammy-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
container_image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
package-rpm:
needs: [build-test, generate-version]
if: ${{ github.repository == 'XRPLF/rippled' }}
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: rpm
artifact_name: xrpld-rhel-9-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
container_image: ghcr.io/xrplf/ci/rhel-9:gcc-12

View File

@@ -38,6 +38,8 @@ on:
- "CMakeLists.txt"
- "conanfile.py"
- "conan.lock"
- "package/**"
- ".github/workflows/reusable-package.yml"
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
# will force all dependencies to be rebuilt, which is useful to verify that
@@ -77,7 +79,7 @@ jobs:
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
os: [linux, macos, windows]
os: [linux]
with:
# Enable ccache only for events targeting the XRPLF repository, since
# other accounts will not have access to our remote cache storage.
@@ -98,3 +100,32 @@ jobs:
secrets:
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
generate-version:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.version.outputs.version }}
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Generate version
id: version
uses: ./.github/actions/generate-version
package-deb:
needs: [build-test, generate-version]
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: deb
artifact_name: xrpld-ubuntu-jammy-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
container_image: ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12
package-rpm:
needs: [build-test, generate-version]
uses: ./.github/workflows/reusable-package.yml
with:
pkg_type: rpm
artifact_name: xrpld-rhel-9-gcc-12-amd64-release
version: ${{ needs.generate-version.outputs.version }}
container_image: ghcr.io/xrplf/ci/rhel-9:gcc-12

76
.github/workflows/reusable-package.yml vendored Normal file
View File

@@ -0,0 +1,76 @@
# Build a Linux package (DEB or RPM) from a pre-built binary artifact.
name: Package
on:
workflow_call:
inputs:
pkg_type:
description: "Package type to build: deb or rpm."
required: true
type: string
artifact_name:
description: "Name of the pre-built binary artifact to download."
required: true
type: string
version:
description: "Version string used for naming the output artifact."
required: true
type: string
pkg_release:
description: "Package release number. Increment when repackaging the same executable."
required: false
type: string
default: "1"
container_image:
description: "Container image to use for packaging."
required: true
type: string
defaults:
run:
shell: bash
env:
BUILD_DIR: build
jobs:
package:
name: ${{ inputs.pkg_type }} (${{ inputs.version }})
runs-on: ["self-hosted", "Linux", "X64", "heavy"]
container: ${{ inputs.container_image }}
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Download pre-built binary
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact_name }}
path: ${{ env.BUILD_DIR }}
- name: Make binary executable
run: chmod +x ${{ env.BUILD_DIR }}/xrpld
- name: Generate RPM spec from template
if: ${{ inputs.pkg_type == 'rpm' }}
run: |
mkdir -p ${{ env.BUILD_DIR }}/package/rpm
sed -e "s/@xrpld_version@/${{ inputs.version }}/" \
-e "s/@pkg_release@/${{ inputs.pkg_release }}/" \
package/rpm/xrpld.spec.in > ${{ env.BUILD_DIR }}/package/rpm/xrpld.spec
- name: Build package
run: |
./package/build_pkg.sh ${{ inputs.pkg_type }} . ${{ env.BUILD_DIR }} "${{ inputs.version }}" "${{ inputs.pkg_release }}"
- name: Upload package artifact
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: xrpld-${{ inputs.pkg_type }}-${{ inputs.version }}
path: |
${{ env.BUILD_DIR }}/debbuild/*.deb
${{ env.BUILD_DIR }}/debbuild/*.ddeb
${{ env.BUILD_DIR }}/rpmbuild/RPMS/**/*.rpm
if-no-files-found: error

View File

@@ -133,6 +133,7 @@ endif()
include(XrplCore)
include(XrplInstall)
include(XrplPackaging)
include(XrplValidatorKeys)
if(tests)

View File

@@ -12,14 +12,14 @@ if(is_root_project AND TARGET xrpld)
install(
FILES "${CMAKE_CURRENT_SOURCE_DIR}/cfg/xrpld-example.cfg"
DESTINATION "${CMAKE_INSTALL_SYSCONFDIR}/xrpld"
DESTINATION "${CMAKE_INSTALL_SYSCONFDIR}"
RENAME xrpld.cfg
COMPONENT runtime
)
install(
FILES "${CMAKE_CURRENT_SOURCE_DIR}/cfg/validators-example.txt"
DESTINATION "${CMAKE_INSTALL_SYSCONFDIR}/xrpld"
DESTINATION "${CMAKE_INSTALL_SYSCONFDIR}"
RENAME validators.txt
COMPONENT runtime
)

147
cmake/XrplPackaging.cmake Normal file
View File

@@ -0,0 +1,147 @@
#[===================================================================[
Linux packaging support: RPM and Debian targets + install tests
#]===================================================================]
if(NOT CMAKE_INSTALL_PREFIX STREQUAL "/opt/xrpld")
message(
STATUS
"Packaging targets require -DCMAKE_INSTALL_PREFIX=/opt/xrpld "
"(current: '${CMAKE_INSTALL_PREFIX}'); skipping."
)
return()
endif()
# Generate the RPM spec from template (substitutes @xrpld_version@, @pkg_release@).
if(NOT DEFINED pkg_release)
set(pkg_release 1)
endif()
configure_file(
${CMAKE_SOURCE_DIR}/package/rpm/xrpld.spec.in
${CMAKE_BINARY_DIR}/package/rpm/xrpld.spec
@ONLY
)
find_program(RPMBUILD_EXECUTABLE rpmbuild)
if(RPMBUILD_EXECUTABLE)
add_custom_target(
package-rpm
COMMAND
${CMAKE_SOURCE_DIR}/package/build_pkg.sh rpm ${CMAKE_SOURCE_DIR}
${CMAKE_BINARY_DIR}
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Building RPM package"
VERBATIM
)
else()
message(STATUS "rpmbuild not found; 'package-rpm' target not available")
endif()
find_program(DPKG_BUILDPACKAGE_EXECUTABLE dpkg-buildpackage)
if(DPKG_BUILDPACKAGE_EXECUTABLE)
add_custom_target(
package-deb
COMMAND
${CMAKE_SOURCE_DIR}/package/build_pkg.sh deb ${CMAKE_SOURCE_DIR}
${CMAKE_BINARY_DIR} ${xrpld_version}
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Building Debian package"
VERBATIM
)
else()
message(
STATUS
"dpkg-buildpackage not found; 'package-deb' target not available"
)
endif()
#[===================================================================[
CTest fixtures for package install verification (requires docker)
#]===================================================================]
find_program(DOCKER_EXECUTABLE docker)
if(NOT DOCKER_EXECUTABLE)
message(STATUS "docker not found; package install tests not available")
return()
endif()
set(DEB_TEST_IMAGE "geerlingguy/docker-ubuntu2204-ansible:latest")
set(RPM_TEST_IMAGE "geerlingguy/docker-rockylinux9-ansible:latest")
foreach(PKG deb rpm)
if(PKG STREQUAL "deb")
set(IMAGE ${DEB_TEST_IMAGE})
else()
set(IMAGE ${RPM_TEST_IMAGE})
endif()
# Fixture: start container
add_test(
NAME ${PKG}_container_start
COMMAND
sh -c
"docker rm -f xrpld_${PKG}_install_test 2>/dev/null || true && \
docker run --rm -d \
--name xrpld_${PKG}_install_test \
--memory=45g --memory-swap=45g \
--privileged \
--cgroupns host \
--volume '${CMAKE_SOURCE_DIR}:/root:ro' \
--volume /sys/fs/cgroup:/sys/fs/cgroup:rw \
--tmpfs /tmp --tmpfs /run --tmpfs /run/lock \
${IMAGE} \
/usr/sbin/init"
)
set_tests_properties(
${PKG}_container_start
PROPERTIES FIXTURES_SETUP ${PKG}_container LABELS packaging
)
# Fixture: stop container
# On CI: always stop. Locally: leave running on failure for diagnosis.
add_test(
NAME ${PKG}_container_stop
COMMAND
sh -c
"if [ -n \"$CI\" ] || ! docker exec xrpld_${PKG}_install_test test -f /tmp/test_failed 2>/dev/null; then \
docker rm -f xrpld_${PKG}_install_test; \
else \
echo 'Tests failed — leaving xrpld_${PKG}_install_test running for diagnosis'; \
echo 'Clean up with: docker rm -f xrpld_${PKG}_install_test'; \
fi"
)
set_tests_properties(
${PKG}_container_stop
PROPERTIES FIXTURES_CLEANUP ${PKG}_container LABELS packaging
)
# Install package and run smoke test
add_test(
NAME ${PKG}_install
COMMAND
docker exec -w /root xrpld_${PKG}_install_test bash
/root/package/test/smoketest.sh local
)
set_tests_properties(
${PKG}_install
PROPERTIES
FIXTURES_REQUIRED ${PKG}_container
FIXTURES_SETUP ${PKG}_installed
LABELS packaging
TIMEOUT 600
)
# Validate install paths and compat symlinks
add_test(
NAME ${PKG}_install_paths
COMMAND
docker exec -w /root xrpld_${PKG}_install_test sh
/root/package/test/check_install_paths.sh
)
set_tests_properties(
${PKG}_install_paths
PROPERTIES
FIXTURES_REQUIRED "${PKG}_container;${PKG}_installed"
LABELS packaging
TIMEOUT 60
)
endforeach()

View File

@@ -93,11 +93,14 @@ words:
- desync
- desynced
- determ
- disablerepo
- distro
- doxyfile
- dxrpl
- enablerepo
- endmacro
- exceptioned
- EXPECT_STREQ
- Falco
- fcontext
- finalizers
@@ -151,6 +154,7 @@ words:
- Merkle
- Metafuncton
- misprediction
- missingok
- mptbalance
- MPTDEX
- mptflags
@@ -181,7 +185,9 @@ words:
- NOLINT
- NOLINTNEXTLINE
- nonxrp
- noreplace
- noripple
- notifempty
- nudb
- nullptr
- nunl
@@ -201,6 +207,7 @@ words:
- preauthorize
- preauthorizes
- preclaim
- preun
- protobuf
- protos
- ptrs
@@ -235,12 +242,14 @@ words:
- sfields
- shamap
- shamapitem
- shlibs
- sidechain
- SIGGOOD
- sle
- sles
- soci
- socidb
- SRPMS
- sslws
- statsd
- STATSDCOLLECTOR
@@ -268,8 +277,8 @@ words:
- txn
- txns
- txs
- UBSAN
- ubsan
- UBSAN
- umant
- unacquired
- unambiguity
@@ -305,7 +314,6 @@ words:
- xbridge
- xchain
- ximinez
- EXPECT_STREQ
- XMACRO
- xrpkuwait
- xrpl

View File

@@ -15,7 +15,7 @@
#define ALWAYS_OR_UNREACHABLE(cond, message) assert((message) && (cond))
#define SOMETIMES(cond, message, ...)
#define REACHABLE(message, ...)
#define UNREACHABLE(message, ...) assert((message) && false)
#define UNREACHABLE(message, ...) assert((message) && false) // NOLINT(misc-static-assert)
#endif
#define XRPL_ASSERT ALWAYS_OR_UNREACHABLE

View File

@@ -316,7 +316,7 @@ private:
// Returns the limit of running jobs for the given job type.
// For jobs with no limit, we return the largest int. Hopefully that
// will be enough.
int
static int
getJobLimit(JobType type);
};

View File

@@ -72,12 +72,12 @@ public:
isDelegable(std::uint32_t const& permissionValue, Rules const& rules) const;
// for tx level permission, permission value is equal to tx type plus one
uint32_t
txToPermissionType(TxType const& type) const;
static uint32_t
txToPermissionType(TxType const& type);
// tx type value is permission value minus one
TxType
permissionToTxType(uint32_t const& value) const;
static TxType
permissionToTxType(uint32_t const& value);
};
} // namespace xrpl

View File

@@ -336,7 +336,7 @@ public:
static_assert(N > 0, "");
}
std::size_t
[[nodiscard]] bool
empty() const noexcept
{
return remain_ == 0;

View File

@@ -520,7 +520,7 @@ private:
// getMissingNodes helper functions
void
gmn_ProcessNodes(MissingNodes&, MissingNodes::StackEntry& node);
void
static void
gmn_ProcessDeferredReads(MissingNodes&);
// fetch from DB helper function

View File

@@ -111,7 +111,7 @@ public:
checkInvariants(TER const result, XRPAmount const fee);
private:
TER
static TER
failInvariantCheck(TER const result);
template <std::size_t... Is>

View File

@@ -48,7 +48,7 @@ private:
bool
isValidEntry(std::shared_ptr<SLE const> const& before, std::shared_ptr<SLE const> const& after);
STAmount
static STAmount
calculateBalanceChange(
std::shared_ptr<SLE const> const& before,
std::shared_ptr<SLE const> const& after,
@@ -63,7 +63,7 @@ private:
std::shared_ptr<SLE const>
findIssuer(AccountID const& issuerID, ReadView const& view);
bool
static bool
validateIssuerChanges(
std::shared_ptr<SLE const> const& issuer,
IssuerChanges const& changes,
@@ -71,7 +71,7 @@ private:
beast::Journal const& j,
bool enforce);
bool
static bool
validateFrozenState(
BalanceChange const& change,
bool high,

View File

@@ -111,7 +111,7 @@ public:
void
visitEntry(bool, std::shared_ptr<SLE const> const&, std::shared_ptr<SLE const> const&);
bool
static bool
finalize(STTx const&, TER const, XRPAmount const, ReadView const&, beast::Journal const&);
};

View File

@@ -41,8 +41,8 @@ class ValidLoanBroker
// for LoanBroker pseudo-accounts.
std::vector<SLE::const_pointer> mpts_;
bool
goodZeroDirectory(ReadView const& view, SLE::const_ref dir, beast::Journal const& j) const;
static bool
goodZeroDirectory(ReadView const& view, SLE::const_ref dir, beast::Journal const& j);
public:
void

118
package/README.md Normal file
View File

@@ -0,0 +1,118 @@
# Linux Packaging
This directory contains all files needed to build RPM and Debian packages for `xrpld`.
## Directory layout
```
package/
build_pkg.sh Staging and build script (called by CMake targets and CI)
rpm/
xrpld.spec.in RPM spec template (substitutes @xrpld_version@, @pkg_release@)
deb/
debian/ Debian control files (control, rules, install, links, conffiles, ...)
shared/
xrpld.service systemd unit file (used by both RPM and DEB)
xrpld.sysusers sysusers.d config (used by both RPM and DEB)
xrpld.tmpfiles tmpfiles.d config (used by both RPM and DEB)
xrpld.logrotate logrotate config (installed to /opt/xrpld/bin/, user activates)
update-xrpld.sh auto-update script (installed to /opt/xrpld/bin/)
update-xrpld-cron cron entry for auto-update (installed to /opt/xrpld/bin/)
test/
smoketest.sh Package install smoke test
check_install_paths.sh Verify install paths and compat symlinks
```
## Prerequisites
| Package type | Container | Tool required |
| ------------ | -------------------------------------- | --------------------------------------------------------------- |
| RPM | `ghcr.io/xrplf/ci/rhel-9:gcc-12` | `rpmbuild` |
| DEB | `ghcr.io/xrplf/ci/ubuntu-jammy:gcc-12` | `dpkg-buildpackage`, `debhelper (>= 13)`, `dh-sequence-systemd` |
## Building packages
### Via CI (recommended)
The `reusable-package.yml` workflow downloads a pre-built `xrpld` binary artifact
and calls `build_pkg.sh` directly. No CMake configure or build step is needed in
the packaging job.
### Via CMake (local development)
Configure with the required install prefix, then invoke the target:
```bash
cmake \
-DCMAKE_INSTALL_PREFIX=/opt/xrpld \
-Dxrpld=ON \
-Dtests=OFF \
..
# RPM (in RHEL container):
cmake --build . --target package-rpm
# DEB (in Debian/Ubuntu container):
cmake --build . --target package-deb
```
The `cmake/XrplPackaging.cmake` module gates each target on whether the required
tool (`rpmbuild` / `dpkg-buildpackage`) is present at configure time, so
configuring on a host that lacks one simply omits the corresponding target.
`CMAKE_INSTALL_PREFIX` must be `/opt/xrpld`; if it is not, both targets are
skipped with a `STATUS` message.
## How `build_pkg.sh` works
`build_pkg.sh <pkg_type> <src_dir> <build_dir> [version] [pkg_release]` stages
all files and invokes the platform build tool. It resolves `src_dir` and
`build_dir` to absolute paths, then calls `stage_common()` to copy the binary,
config files, and shared support files into the staging area.
### RPM
1. Creates the standard `rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}` tree inside the build directory.
2. Copies the generated `xrpld.spec` and all source files (binary, configs, service files) into `SOURCES/`.
3. Runs `rpmbuild -bb`. The spec uses manual `install` commands to place files.
4. Output: `rpmbuild/RPMS/x86_64/xrpld-*.rpm`
### DEB
1. Creates a staging source tree at `debbuild/source/` inside the build directory.
2. Stages the binary, configs, `README.md`, and `LICENSE.md`.
3. Copies `package/deb/debian/` control files into `debbuild/source/debian/`.
4. Copies shared service/sysusers/tmpfiles into `debian/` where `dh_installsystemd`, `dh_installsysusers`, and `dh_installtmpfiles` pick them up automatically.
5. Generates a minimal `debian/changelog` (pre-release versions use `~` instead of `-`).
6. Runs `dpkg-buildpackage -b --no-sign`. `debian/rules` uses manual `install` commands.
7. Output: `debbuild/*.deb` and `debbuild/*.ddeb` (dbgsym package)
## Post-build verification
```bash
# DEB
dpkg-deb -c debbuild/*.deb | grep -E 'systemd|sysusers|tmpfiles'
lintian -I debbuild/*.deb
# RPM
rpm -qlp rpmbuild/RPMS/x86_64/*.rpm
```
## Reproducibility
The following environment variables improve build reproducibility. They are not
set automatically by `build_pkg.sh`; set them manually if needed:
```bash
export SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
export TZ=UTC
export LC_ALL=C.UTF-8
export GZIP=-n
export DEB_BUILD_OPTIONS="noautodbgsym reproducible=+fixfilepath"
```
## TODO
- Port debsigs signing instructions and integrate into CI.
- Port RPM GPG signing setup (key import + `%{?_gpg_sign}` in spec).
- Introduce a virtual package for key rotation.

91
package/build_pkg.sh Executable file
View File

@@ -0,0 +1,91 @@
#!/usr/bin/env bash
# Build an RPM or Debian package from a pre-built xrpld binary.
#
# Usage: build_pkg.sh <pkg_type> <src_dir> <build_dir> [version] [pkg_release]
# pkg_type : rpm | deb
# src_dir : path to repository root
# build_dir : directory containing the pre-built xrpld binary
# version : package version string (e.g. 2.4.0-b1)
# pkg_release : package release number (default: 1)
set -euo pipefail
PKG_TYPE="${1:?pkg_type required}"
SRC_DIR="$(cd "${2:?src_dir required}" && pwd)"
BUILD_DIR="$(cd "${3:?build_dir required}" && pwd)"
VERSION="${4:-1.0.0}"
PKG_RELEASE="${5:-1}"
SHARED="${SRC_DIR}/package/shared"
# Stage files common to both package types into a target directory.
stage_common() {
local dest="$1"
cp "${BUILD_DIR}/xrpld" "${dest}/xrpld"
cp "${SRC_DIR}/cfg/xrpld-example.cfg" "${dest}/xrpld.cfg"
cp "${SRC_DIR}/cfg/validators-example.txt" "${dest}/validators.txt"
cp "${SHARED}/xrpld.logrotate" "${dest}/xrpld.logrotate"
cp "${SHARED}/update-xrpld.sh" "${dest}/update-xrpld.sh"
cp "${SHARED}/update-xrpld-cron" "${dest}/update-xrpld-cron"
}
build_rpm() {
local topdir="${BUILD_DIR}/rpmbuild"
mkdir -p "${topdir}"/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
cp "${BUILD_DIR}/package/rpm/xrpld.spec" "${topdir}/SPECS/xrpld.spec"
stage_common "${topdir}/SOURCES"
cp "${SHARED}/xrpld.service" "${topdir}/SOURCES/xrpld.service"
cp "${SHARED}/xrpld.sysusers" "${topdir}/SOURCES/xrpld.sysusers"
cp "${SHARED}/xrpld.tmpfiles" "${topdir}/SOURCES/xrpld.tmpfiles"
set -x
rpmbuild -bb \
--define "_topdir ${topdir}" \
"${topdir}/SPECS/xrpld.spec"
}
build_deb() {
local staging="${BUILD_DIR}/debbuild/source"
rm -rf "${staging}"
mkdir -p "${staging}"
stage_common "${staging}"
cp "${SRC_DIR}/README.md" "${staging}/"
cp "${SRC_DIR}/LICENSE.md" "${staging}/"
# debian/ control files
cp -r "${SRC_DIR}/package/deb/debian" "${staging}/debian"
# Shared support files for dh_installsystemd / sysusers / tmpfiles
cp "${SHARED}/xrpld.service" "${staging}/debian/xrpld.service"
cp "${SHARED}/xrpld.sysusers" "${staging}/debian/xrpld.sysusers"
cp "${SHARED}/xrpld.tmpfiles" "${staging}/debian/xrpld.tmpfiles"
# Generate debian/changelog (pre-release versions use ~ instead of -).
local deb_version="${VERSION//-/\~}"
# TODO: Add facility for generating the changelog
cat > "${staging}/debian/changelog" <<EOF
xrpld (${deb_version}-${PKG_RELEASE}) unstable; urgency=medium
* Release ${VERSION}.
-- XRPL Foundation <contact@xrplf.org> $(LC_ALL=C date -u -R)
EOF
chmod +x "${staging}/debian/rules"
set -x
cd "${staging}"
dpkg-buildpackage -b --no-sign -d
}
case "${PKG_TYPE}" in
rpm) build_rpm ;;
deb) build_deb ;;
*)
echo "Unknown package type: ${PKG_TYPE}" >&2
exit 1
;;
esac

View File

@@ -0,0 +1,33 @@
Source: xrpld
Section: net
Priority: optional
Maintainer: XRPL Foundation <contact@xrpl.org>
Rules-Requires-Root: no
Build-Depends:
debhelper-compat (= 13),
Standards-Version: 4.7.0
Homepage: https://github.com/XRPLF/rippled
Vcs-Git: https://github.com/XRPLF/rippled.git
Vcs-Browser: https://github.com/XRPLF/rippled
Package: xrpld
Section: net
Priority: optional
Architecture: any
Depends:
${shlibs:Depends},
${misc:Depends}
Description: XRP Ledger daemon
xrpld is the reference implementation of the XRP Ledger protocol.
It participates in the peer-to-peer XRP Ledger network, processes
transactions, and maintains the ledger database.
Package: rippled
Architecture: all
Section: oldlibs
Priority: optional
Depends: xrpld, ${misc:Depends}
Description: transitional package - use xrpld
The rippled package has been renamed to xrpld. This transitional
package ensures a smooth upgrade and can be safely removed after
xrpld is installed.

View File

@@ -0,0 +1,20 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: rippled
Source: https://github.com/XRPLF/rippled
Files: *
Copyright: 2012-2025 Ripple Labs Inc.
License: ISC
License: ISC
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

37
package/deb/debian/rules Normal file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/make -f
export DH_VERBOSE = 1
export DH_OPTIONS = -v
%:
dh $@
override_dh_auto_configure override_dh_auto_build override_dh_auto_test:
@:
override_dh_auto_install:
install -Dm0755 xrpld debian/tmp/opt/xrpld/bin/xrpld
install -Dm0644 xrpld.cfg debian/tmp/opt/xrpld/etc/xrpld.cfg
install -Dm0644 validators.txt debian/tmp/opt/xrpld/etc/validators.txt
install -Dm0644 xrpld.logrotate debian/tmp/opt/xrpld/bin/xrpld.logrotate
install -Dm0755 update-xrpld.sh debian/tmp/opt/xrpld/bin/update-xrpld.sh
install -Dm0644 update-xrpld-cron debian/tmp/opt/xrpld/bin/update-xrpld-cron
install -Dm0644 README.md debian/tmp/usr/share/doc/xrpld/README.md
install -Dm0644 LICENSE.md debian/tmp/usr/share/doc/xrpld/LICENSE.md
override_dh_installsystemd:
dh_installsystemd
# see if this still works
# dh_installsystemd --no-start
override_dh_installsysusers:
dh_installsysusers
override_dh_installtmpfiles:
dh_installtmpfiles
override_dh_install:
dh_install
override_dh_dwz:
@:

View File

@@ -0,0 +1 @@
3.0 (quilt)

View File

@@ -0,0 +1,2 @@
/opt/xrpld/etc/xrpld.cfg
/opt/xrpld/etc/validators.txt

View File

@@ -0,0 +1,10 @@
opt/xrpld/bin/xrpld
opt/xrpld/bin/xrpld.logrotate
opt/xrpld/bin/update-xrpld.sh
opt/xrpld/bin/update-xrpld-cron
opt/xrpld/etc/xrpld.cfg
opt/xrpld/etc/validators.txt
usr/share/doc/xrpld/README.md
usr/share/doc/xrpld/LICENSE.md

View File

@@ -0,0 +1,13 @@
opt/xrpld/etc etc/opt/xrpld
opt/xrpld/bin/xrpld usr/bin/xrpld
## remove when "rippled" deprecated
opt/xrpld/bin/xrpld opt/xrpld/bin/rippled
opt/xrpld/bin/xrpld usr/bin/rippled
opt/xrpld/bin/xrpld usr/local/bin/rippled
opt/xrpld/etc/xrpld.cfg opt/xrpld/etc/rippled.cfg
var/log/xrpld var/log/rippled
var/lib/xrpld var/lib/rippled
opt/xrpld opt/ripple
etc/opt/xrpld etc/opt/ripple

113
package/rpm/xrpld.spec.in Normal file
View File

@@ -0,0 +1,113 @@
%global xrpld_version @xrpld_version@
%global pkg_release @pkg_release@
%global _opt_prefix /opt/xrpld
%global ver_base %(v=%{xrpld_version}; echo ${v%%-*})
%global _has_dash %(v=%{xrpld_version}; [ "${v#*-}" != "$v" ] && echo 1 || echo 0)
%if 0%{?_has_dash}
%global ver_suffix %(v=%{xrpld_version}; printf %s "${v#*-}")
%endif
Name: xrpld
Version: %{ver_base}
Release: %{?ver_suffix:0.%{ver_suffix}.}%{pkg_release}%{?dist}
Summary: XRP Ledger daemon
License: ISC
URL: https://github.com/XRPLF/rippled
Source0: xrpld
Source1: xrpld.cfg
Source2: validators.txt
Source3: xrpld.service
Source4: xrpld.sysusers
Source5: xrpld.tmpfiles
Source6: xrpld.logrotate
Source7: update-xrpld.sh
Source8: update-xrpld-cron
BuildArch: x86_64
BuildRequires: systemd-rpm-macros
%undefine _debugsource_packages
%debug_package
%{?systemd_requires}
%{?sysusers_requires_compat}
%description
xrpld is the reference implementation of the XRP Ledger protocol. It
participates in the peer-to-peer XRP Ledger network, processes
transactions, and maintains the ledger database.
%install
rm -rf %{buildroot}
# Suppress debugsource subpackage — no source files in the build tree.
touch %{_builddir}/debugsourcefiles.list
# Install binary and config files.
install -Dm0755 %{SOURCE0} %{buildroot}%{_opt_prefix}/bin/xrpld
install -Dm0644 %{SOURCE1} %{buildroot}%{_opt_prefix}/etc/xrpld.cfg
install -Dm0644 %{SOURCE2} %{buildroot}%{_opt_prefix}/etc/validators.txt
# Compatibility symlinks (matches debian/xrpld.links).
mkdir -p %{buildroot}/etc/opt %{buildroot}/usr/bin %{buildroot}/usr/local/bin \
%{buildroot}/var/log %{buildroot}/var/lib
ln -s %{_opt_prefix}/etc %{buildroot}/etc/opt/xrpld
ln -s %{_opt_prefix}/bin/xrpld %{buildroot}/usr/bin/xrpld
## remove when "rippled" deprecated
ln -s xrpld %{buildroot}%{_opt_prefix}/bin/rippled
ln -s %{_opt_prefix}/bin/xrpld %{buildroot}/usr/bin/rippled
ln -s %{_opt_prefix}/bin/xrpld %{buildroot}/usr/local/bin/rippled
ln -s xrpld.cfg %{buildroot}%{_opt_prefix}/etc/rippled.cfg
ln -s %{_opt_prefix} %{buildroot}/opt/ripple
ln -s /etc/opt/xrpld %{buildroot}/etc/opt/ripple
ln -s xrpld %{buildroot}/var/log/rippled
ln -s xrpld %{buildroot}/var/lib/rippled
# Install systemd/sysusers/tmpfiles support files.
install -Dm0644 %{SOURCE3} %{buildroot}%{_unitdir}/xrpld.service
install -Dm0644 %{SOURCE4} %{buildroot}%{_sysusersdir}/xrpld.conf
install -Dm0644 %{SOURCE5} %{buildroot}%{_tmpfilesdir}/xrpld.conf
install -Dm0644 %{SOURCE6} %{buildroot}%{_opt_prefix}/bin/xrpld.logrotate
install -Dm0755 %{SOURCE7} %{buildroot}%{_opt_prefix}/bin/update-xrpld.sh
install -Dm0644 %{SOURCE8} %{buildroot}%{_opt_prefix}/bin/update-xrpld-cron
%pre
%sysusers_create_compat %{SOURCE4}
%post
%systemd_post xrpld.service
%preun
%systemd_preun xrpld.service
%postun
%systemd_postun_with_restart xrpld.service
%files
%dir %{_opt_prefix}
%dir %{_opt_prefix}/bin
%{_opt_prefix}/bin/xrpld
%{_opt_prefix}/bin/xrpld.logrotate
%{_opt_prefix}/bin/update-xrpld.sh
%{_opt_prefix}/bin/update-xrpld-cron
%{_opt_prefix}/bin/rippled
/usr/bin/xrpld
/usr/bin/rippled
/usr/local/bin/rippled
%dir %{_opt_prefix}/etc
%config(noreplace) %{_opt_prefix}/etc/xrpld.cfg
%config(noreplace) %{_opt_prefix}/etc/validators.txt
%{_opt_prefix}/etc/rippled.cfg
/etc/opt/xrpld
/etc/opt/ripple
/opt/ripple
%{_unitdir}/xrpld.service
%{_sysusersdir}/xrpld.conf
%{_tmpfilesdir}/xrpld.conf
/var/log/rippled
/var/lib/rippled
%ghost %dir /var/opt/ripple
%ghost %dir /var/opt/ripple/lib
%ghost %dir /var/opt/ripple/log

View File

@@ -0,0 +1,9 @@
# For automatic updates, symlink this file to /etc/cron.d/
# Do not remove the newline at the end of this cron script
# bash required for use of RANDOM below.
SHELL=/bin/bash
PATH=/sbin;/bin;/usr/sbin;/usr/bin
# invoke check/update script with random delay up to 59 mins
0 * * * * root sleep $((RANDOM*3540/32768)) && /opt/xrpld/bin/update-xrpld.sh

64
package/shared/update-xrpld.sh Executable file
View File

@@ -0,0 +1,64 @@
#!/usr/bin/env bash
# auto-update script for xrpld daemon
# Check for sudo/root permissions
if [[ $(id -u) -ne 0 ]] ; then
echo "This update script must be run as root or sudo"
exit 1
fi
LOCKDIR=/tmp/xrpld-update.lock
UPDATELOG=/var/log/xrpld/update.log
function cleanup {
# If this directory isn't removed, future updates will fail.
rmdir $LOCKDIR
}
# Use mkdir to check if process is already running. mkdir is atomic, as against file create.
if ! mkdir $LOCKDIR 2>/dev/null; then
echo $(date -u) "lockdir exists - won't proceed." >> $UPDATELOG
exit 1
fi
trap cleanup EXIT
source /etc/os-release
can_update=false
if [[ "$ID" == "ubuntu" || "$ID" == "debian" ]] ; then
# Silent update
apt-get update -qq
# The next line is an "awk"ward way to check if the package needs to be updated.
XRPLD=$(apt-get install -s --only-upgrade xrpld | awk '/^Inst/ { print $2 }')
test "$XRPLD" == "xrpld" && can_update=true
function apply_update {
apt-get install xrpld -qq
}
elif [[ "$ID" == "fedora" || "$ID" == "centos" || "$ID" == "rhel" || "$ID" == "scientific" ]] ; then
RIPPLE_REPO=${RIPPLE_REPO-stable}
yum --disablerepo=* --enablerepo=ripple-$RIPPLE_REPO clean expire-cache
yum check-update -q --enablerepo=ripple-$RIPPLE_REPO xrpld || can_update=true
function apply_update {
yum update -y --enablerepo=ripple-$RIPPLE_REPO xrpld
}
else
echo "unrecognized distro!"
exit 1
fi
# Do the actual update and restart the service after reloading systemctl daemon.
if [ "$can_update" = true ] ; then
exec 3>&1 1>>${UPDATELOG} 2>&1
set -e
apply_update
systemctl daemon-reload
systemctl restart xrpld.service
echo $(date -u) "xrpld daemon updated."
else
echo $(date -u) "no updates available" >> $UPDATELOG
fi

View File

@@ -0,0 +1,15 @@
/var/log/xrpld/*.log {
daily
minsize 200M
rotate 7
nocreate
missingok
notifempty
compress
compresscmd /usr/bin/nice
compressoptions -n19 ionice -c3 gzip
compressext .gz
postrotate
/opt/xrpld/bin/xrpld --conf /etc/opt/xrpld/xrpld.cfg logrotate
endscript
}

View File

@@ -0,0 +1,15 @@
[Unit]
Description=XRP Ledger Daemon
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/opt/xrpld/bin/xrpld --net --silent --conf /etc/opt/xrpld/xrpld.cfg
Restart=on-failure
User=xrpld
Group=xrpld
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1 @@
u xrpld - "XRP Ledger daemon" /var/lib/xrpld /sbin/nologin

View File

@@ -0,0 +1,2 @@
d /var/opt/ripple/lib 0750 xrpld xrpld -
d /var/opt/ripple/log 0750 xrpld xrpld -

View File

@@ -0,0 +1,50 @@
#!/usr/bin/env sh
# Validate installed paths and compat symlinks for xrpld packages.
set -e
set -x
trap 'test $? -ne 0 && touch /tmp/test_failed' EXIT
check() { test $1 "$2" || { echo "FAIL: $1 $2"; exit 1; }; }
check_resolves_to() {
actual=$(readlink -f "$1")
[ "$actual" = "$2" ] || { echo "FAIL: $1 resolves to $actual, expected $2"; exit 1; }
}
# var dirs (compat symlinks)
check -L /var/log/rippled
check -L /var/lib/rippled
# compat directory symlinks — existence and resolved target
check -L /opt/ripple
check_resolves_to /opt/ripple /opt/xrpld
check -L /etc/opt/xrpld
check_resolves_to /etc/opt/xrpld /opt/xrpld/etc
check -L /etc/opt/ripple
check_resolves_to /etc/opt/ripple /opt/xrpld/etc
# config accessible via all expected paths
check -f /opt/xrpld/etc/xrpld.cfg
check -f /opt/xrpld/etc/rippled.cfg
check -f /etc/opt/xrpld/xrpld.cfg
check -f /etc/opt/xrpld/rippled.cfg
check -f /etc/opt/ripple/xrpld.cfg
check -f /etc/opt/ripple/rippled.cfg
if systemctl is-system-running >/dev/null 2>&1; then
# service file sanity check
SERVICE=$(systemctl cat xrpld)
echo "$SERVICE" | grep -q 'ExecStart=/opt/xrpld/bin/xrpld' || { echo "FAIL: ExecStart wrong"; echo "$SERVICE"; exit 1; }
echo "$SERVICE" | grep -q 'User=xrpld' || { echo "FAIL: User not xrpld"; echo "$SERVICE"; exit 1; }
fi
# binary accessible via all expected paths
/opt/xrpld/bin/xrpld --version
/opt/xrpld/bin/rippled --version
/opt/ripple/bin/xrpld --version
/opt/ripple/bin/rippled --version
/usr/bin/xrpld --version
/usr/bin/rippled --version
/usr/local/bin/rippled --version

76
package/test/smoketest.sh Executable file
View File

@@ -0,0 +1,76 @@
#!/usr/bin/env bash
# Install a locally-built package and run basic verification.
#
# Usage: smoketest.sh local
# Expects packages in build/{dpkg,rpm}/packages/ or build/debbuild/ / build/rpmbuild/RPMS/
set -x
trap 'test $? -ne 0 && touch /tmp/test_failed' EXIT
install_from=$1
. /etc/os-release
case ${ID} in
ubuntu|debian)
pkgtype="dpkg"
;;
fedora|centos|rhel|rocky|almalinux)
pkgtype="rpm"
;;
*)
echo "unrecognized distro!"
exit 1
;;
esac
if [ "${install_from}" != "local" ]; then
echo "only 'local' install mode is supported"
exit 1
fi
# Install the package
if [ "${pkgtype}" = "dpkg" ] ; then
apt-get -y update
# Find .deb files — check both possible output locations
debs=$(find build/debbuild/ build/dpkg/packages/ -name '*.deb' ! -name '*dbgsym*' 2>/dev/null | head -5)
if [ -z "$debs" ]; then
echo "No .deb files found"
exit 1
fi
dpkg --no-debsig -i $debs || apt-get -y install -f
elif [ "${pkgtype}" = "rpm" ] ; then
# Find .rpm files — check both possible output locations
rpms=$(find build/rpmbuild/RPMS/ build/rpm/packages/ -name '*.rpm' \
! -name '*debug*' ! -name '*devel*' ! -name '*.src.rpm' 2>/dev/null | head -5)
if [ -z "$rpms" ]; then
echo "No .rpm files found"
exit 1
fi
rpm -i $rpms
fi
# Verify installed version
VERSION_OUTPUT=$(/opt/xrpld/bin/xrpld --version)
INSTALLED=$(echo "$VERSION_OUTPUT" | head -1 | awk '{print $NF}')
echo "Installed version: ${INSTALLED}"
# Run unit tests
if [ -n "${CI:-}" ]; then
unittest_jobs=$(nproc)
else
unittest_jobs=16
fi
cd /tmp
/opt/xrpld/bin/xrpld --unittest --unittest-jobs ${unittest_jobs} > /tmp/unittest_results || true
cd -
num_failures=$(tail /tmp/unittest_results -n1 | grep -oP '\d+(?= failures)')
if [ "${num_failures:-0}" -ne 0 ]; then
echo "$num_failures unit test(s) failed:"
grep 'failed:' /tmp/unittest_results
exit 1
fi
# Compat path checks
"$(dirname "${BASH_SOURCE[0]}")/check_install_paths.sh"

View File

@@ -981,7 +981,7 @@ root(Number f, unsigned d)
auto ex = [e = e, di = di]() // Euclidean remainder of e/d
{
int k = (e >= 0 ? e : e - (di - 1)) / di;
int k2 = e - k * di;
int k2 = e - (k * di);
if (k2 == 0)
return 0;
return di - k2;
@@ -998,7 +998,7 @@ root(Number f, unsigned d)
}
// Quadratic least squares curve fit of f^(1/d) in the range [0, 1]
auto const D = ((6 * di + 11) * di + 6) * di + 1;
auto const D = (((6 * di + 11) * di + 6) * di) + 1;
auto const a0 = 3 * di * ((2 * di - 3) * di + 1);
auto const a1 = 24 * di * (2 * di - 1);
auto const a2 = -30 * (di - 1) * di;

View File

@@ -169,7 +169,7 @@ public:
XRPL_ASSERT(m_stopped == true, "xrpl::ResolverAsioImpl::start : stopped");
XRPL_ASSERT(m_stop_called == false, "xrpl::ResolverAsioImpl::start : not stopping");
if (m_stopped.exchange(false) == true)
if (m_stopped.exchange(false))
{
{
std::lock_guard lk{m_mut};
@@ -182,7 +182,7 @@ public:
void
stop_async() override
{
if (m_stop_called.exchange(true) == false)
if (!m_stop_called.exchange(true))
{
boost::asio::dispatch(
m_io_context,
@@ -229,7 +229,7 @@ public:
{
XRPL_ASSERT(m_stop_called == true, "xrpl::ResolverAsioImpl::do_stop : stopping");
if (m_stopped.exchange(true) == false)
if (!m_stopped.exchange(true))
{
m_work.clear();
m_resolver.cancel();
@@ -271,7 +271,7 @@ public:
m_strand, std::bind(&ResolverAsioImpl::do_work, this, CompletionCounter(this))));
}
HostAndPort
static HostAndPort
parseName(std::string const& str)
{
// first attempt to parse as an endpoint (IP addr + port).
@@ -319,7 +319,7 @@ public:
void
do_work(CompletionCounter)
{
if (m_stop_called == true)
if (m_stop_called)
return;
// We don't have any work to do at this time
@@ -367,7 +367,7 @@ public:
{
XRPL_ASSERT(!names.empty(), "xrpl::ResolverAsioImpl::do_resolve : names non-empty");
if (m_stop_called == false)
if (!m_stop_called)
{
m_work.emplace_back(names, handler);

View File

@@ -24,7 +24,7 @@ sqlBlobLiteral(Blob const& blob)
{
std::string j;
j.reserve(blob.size() * 2 + 3);
j.reserve((blob.size() * 2) + 3);
j.push_back('X');
j.push_back('\'');
boost::algorithm::hex(blob.begin(), blob.end(), std::back_inserter(j));

View File

@@ -107,7 +107,7 @@ encode(void* dest, void const* src, std::size_t len)
char const* in = static_cast<char const*>(src);
auto const tab = base64::get_alphabet();
for (auto n = len / 3; n--;)
for (auto n = len / 3; n != 0u; --n)
{
*out++ = tab[(in[0] & 0xfc) >> 2];
*out++ = tab[((in[0] & 0x03) << 4) + ((in[1] & 0xf0) >> 4)];
@@ -162,7 +162,7 @@ decode(void* dest, char const* src, std::size_t len)
auto const inverse = base64::get_inverse();
while (len-- && *in != '=')
while (((len--) != 0u) && *in != '=')
{
auto const v = inverse[*in];
if (v == -1)
@@ -181,7 +181,7 @@ decode(void* dest, char const* src, std::size_t len)
}
}
if (i)
if (i != 0)
{
c3[0] = (c4[0] << 2) + ((c4[1] & 0x30) >> 4);
c3[1] = ((c4[1] & 0xf) << 4) + ((c4[2] & 0x3c) >> 2);

View File

@@ -253,7 +253,7 @@ initAuthenticated(
// VFALCO Replace fopen() with RAII
FILE* f = fopen(chain_file.c_str(), "r");
if (!f)
if (f == nullptr)
{
LogicError(
"Problem opening SSL chain file" +

View File

@@ -352,7 +352,7 @@ public:
}
}
void
static void
log(std::vector<boost::asio::const_buffer> const& buffers)
{
(void)buffers;

View File

@@ -10,7 +10,7 @@ bool
is_private(AddressV6 const& addr)
{
return (
(addr.to_bytes()[0] & 0xfd) || // TODO fc00::/8 too ?
((addr.to_bytes()[0] & 0xfd) != 0) || // TODO fc00::/8 too ?
(addr.is_v4_mapped() &&
is_private(boost::asio::ip::make_address_v4(boost::asio::ip::v4_mapped, addr))));
}

View File

@@ -57,7 +57,7 @@ Endpoint::to_string() const
if (port() != 0 && address().is_v6())
s += '[';
s += address().to_string();
if (port())
if (port() != 0u)
{
if (address().is_v6())
s += ']';
@@ -111,7 +111,7 @@ operator>>(std::istream& is, Endpoint& endpoint)
// so we continue to honor that here by assuming we are at the end
// of the address portion if we hit a space (or the separator
// we were expecting to see)
if (isspace(static_cast<unsigned char>(i)) || (readTo && i == readTo))
if ((isspace(static_cast<unsigned char>(i)) != 0) || ((readTo != 0) && i == readTo))
break;
if ((i == '.') || (i >= '0' && i <= ':') || (i >= 'a' && i <= 'f') ||
@@ -121,13 +121,13 @@ operator>>(std::istream& is, Endpoint& endpoint)
// don't exceed a reasonable length...
if (addrStr.size() == INET6_ADDRSTRLEN ||
(readTo && readTo == ':' && addrStr.size() > 15))
((readTo != 0) && readTo == ':' && addrStr.size() > 15))
{
is.setstate(std::ios_base::failbit);
return is;
}
if (!readTo && (i == '.' || i == ':'))
if ((readTo == 0) && (i == '.' || i == ':'))
{
// if we see a dot first, must be IPv4
// otherwise must be non-bracketed IPv6
@@ -145,7 +145,7 @@ operator>>(std::istream& is, Endpoint& endpoint)
if (readTo == ']' && is.rdbuf()->in_avail() > 0)
{
is.get(i);
if (!(isspace(static_cast<unsigned char>(i)) || i == ':'))
if ((isspace(static_cast<unsigned char>(i)) == 0) && i != ':')
{
is.unget();
is.setstate(std::ios_base::failbit);

View File

@@ -84,8 +84,7 @@ JobQueue::addRefCountedJob(JobType type, std::string const& name, JobFunction co
JobType const type(job.getType());
XRPL_ASSERT(type != jtINVALID, "xrpl::JobQueue::addRefCountedJob : has valid job type");
XRPL_ASSERT(
m_jobSet.find(job) != m_jobSet.end(), "xrpl::JobQueue::addRefCountedJob : job found");
XRPL_ASSERT(m_jobSet.contains(job), "xrpl::JobQueue::addRefCountedJob : job found");
perfLog_.jobQueue(type);
JobTypeData& data(getJobTypeData(type));

View File

@@ -141,7 +141,7 @@ LoadMonitor::isOver()
update();
if (mLatencyEvents == 0)
return 0;
return false;
return isOverTarget(
mLatencyMSAvg / (mLatencyEvents * 4), mLatencyMSPeak / (mLatencyEvents * 4));

View File

@@ -47,7 +47,7 @@ Workers::setNumberOfThreads(int numberOfThreads)
if (m_numberOfThreads == numberOfThreads)
return;
if (perfLog_)
if (perfLog_ != nullptr)
perfLog_->resizeJobs(numberOfThreads);
if (numberOfThreads > m_numberOfThreads)

View File

@@ -210,8 +210,8 @@ RFC1751::extract(char const* s, int start, int length)
int const shiftR = 24 - (length + (start % 8));
cl = s[start / 8]; // get components
cc = (shiftR < 16) ? s[start / 8 + 1] : 0;
cr = (shiftR < 8) ? s[start / 8 + 2] : 0;
cc = (shiftR < 16) ? s[(start / 8) + 1] : 0;
cr = (shiftR < 8) ? s[(start / 8) + 2] : 0;
x = ((long)(cl << 8 | cc) << 8 | cr); // Put bits together
x = x >> shiftR; // Right justify number
@@ -265,13 +265,13 @@ RFC1751::insert(char* s, int x, int start, int length)
if (shift + length > 16)
{
s[start / 8] |= cl;
s[start / 8 + 1] |= cc;
s[start / 8 + 2] |= cr;
s[(start / 8) + 1] |= cc;
s[(start / 8) + 2] |= cr;
}
else if (shift + length > 8)
{
s[start / 8] |= cc;
s[start / 8 + 1] |= cr;
s[(start / 8) + 1] |= cr;
}
else
{
@@ -284,7 +284,7 @@ RFC1751::standard(std::string& strWord)
{
for (auto& letter : strWord)
{
if (islower(static_cast<unsigned char>(letter)))
if (islower(static_cast<unsigned char>(letter)) != 0)
{
letter = toupper(static_cast<unsigned char>(letter));
}
@@ -312,10 +312,10 @@ RFC1751::wsrch(std::string const& strWord, int iMin, int iMax)
while (iResult < 0 && iMin != iMax)
{
// Have a range to search.
int iMid = iMin + (iMax - iMin) / 2;
int iMid = iMin + ((iMax - iMin) / 2);
int iDir = strWord.compare(s_dictionary[iMid]);
if (!iDir)
if (iDir == 0)
{
iResult = iMid; // Found it.
}

View File

@@ -152,7 +152,7 @@ public:
#ifndef NDEBUG
// Make sure we haven't already seen this tag.
auto& tags = stack_.top().tags;
check(tags.find(tag) == tags.end(), "Already seen tag " + tag);
check(!tags.contains(tag), "Already seen tag " + tag);
tags.insert(tag);
#endif

View File

@@ -296,7 +296,7 @@ Reader::match(Location pattern, int patternLength)
int index = patternLength;
while (index--)
while ((index--) != 0)
{
if (current_[index] != pattern[index])
return false;
@@ -362,7 +362,7 @@ Reader::readNumber()
while (current_ != end_)
{
if (!std::isdigit(static_cast<unsigned char>(*current_)))
if (std::isdigit(static_cast<unsigned char>(*current_)) == 0)
{
auto ret =
std::find(std::begin(extended_tokens), std::end(extended_tokens), *current_);
@@ -913,7 +913,7 @@ Reader::getFormattedErrorMessages() const
formattedMessage += "* " + getLocationLineAndColumn(error.token_.start_) + "\n";
formattedMessage += " " + error.message_ + "\n";
if (error.extra_)
if (error.extra_ != nullptr)
formattedMessage += "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
}

View File

@@ -40,10 +40,10 @@ public:
// return 0;
if (length == unknown)
length = value ? (unsigned int)strlen(value) : 0;
length = (value != nullptr) ? (unsigned int)strlen(value) : 0;
char* newString = static_cast<char*>(malloc(length + 1));
if (value)
if (value != nullptr)
memcpy(newString, value, length);
newString[length] = 0;
return newString;
@@ -52,7 +52,7 @@ public:
void
releaseStringValue(char* value) override
{
if (value)
if (value != nullptr)
free(value);
}
};
@@ -108,14 +108,14 @@ Value::CZString::CZString(CZString const& other)
Value::CZString::~CZString()
{
if (cstr_ && index_ == duplicate)
if ((cstr_ != nullptr) && index_ == duplicate)
valueAllocator()->releaseMemberName(const_cast<char*>(cstr_));
}
bool
Value::CZString::operator<(CZString const& other) const
{
if (cstr_ && other.cstr_)
if ((cstr_ != nullptr) && (other.cstr_ != nullptr))
return strcmp(cstr_, other.cstr_) < 0;
return index_ < other.index_;
@@ -124,7 +124,7 @@ Value::CZString::operator<(CZString const& other) const
bool
Value::CZString::operator==(CZString const& other) const
{
if (cstr_ && other.cstr_)
if ((cstr_ != nullptr) && (other.cstr_ != nullptr))
return strcmp(cstr_, other.cstr_) == 0;
return index_ == other.index_;
@@ -251,7 +251,7 @@ Value::Value(Value const& other) : type_(other.type_)
break;
case stringValue:
if (other.value_.string_)
if (other.value_.string_ != nullptr)
{
value_.string_ = valueAllocator()->duplicateStringValue(other.value_.string_);
allocated_ = true;
@@ -294,7 +294,7 @@ Value::~Value()
case arrayValue:
case objectValue:
if (value_.map_)
if (value_.map_ != nullptr)
delete value_.map_;
break;
@@ -392,11 +392,11 @@ operator<(Value const& x, Value const& y)
return x.value_.real_ < y.value_.real_;
case booleanValue:
return x.value_.bool_ < y.value_.bool_;
return static_cast<int>(x.value_.bool_) < static_cast<int>(y.value_.bool_);
case stringValue:
return (x.value_.string_ == 0 && y.value_.string_) ||
(y.value_.string_ && x.value_.string_ &&
return (x.value_.string_ == 0 && (y.value_.string_ != nullptr)) ||
((y.value_.string_ != nullptr) && (x.value_.string_ != nullptr) &&
strcmp(x.value_.string_, y.value_.string_) < 0);
case arrayValue:
@@ -413,7 +413,7 @@ operator<(Value const& x, Value const& y)
// LCOV_EXCL_STOP
}
return 0; // unreachable
return false; // unreachable
}
bool
@@ -422,9 +422,9 @@ operator==(Value const& x, Value const& y)
if (x.type_ != y.type_)
{
if (x.type_ == intValue && y.type_ == uintValue)
return !integerCmp(x.value_.int_, y.value_.uint_);
return integerCmp(x.value_.int_, y.value_.uint_) == 0;
if (x.type_ == uintValue && y.type_ == intValue)
return !integerCmp(y.value_.int_, x.value_.uint_);
return integerCmp(y.value_.int_, x.value_.uint_) == 0;
return false;
}
@@ -447,8 +447,8 @@ operator==(Value const& x, Value const& y)
case stringValue:
return x.value_.string_ == y.value_.string_ ||
(y.value_.string_ && x.value_.string_ &&
!strcmp(x.value_.string_, y.value_.string_));
((y.value_.string_ != nullptr) && (x.value_.string_ != nullptr) &&
(strcmp(x.value_.string_, y.value_.string_) == 0));
case arrayValue:
case objectValue:
@@ -461,7 +461,7 @@ operator==(Value const& x, Value const& y)
// LCOV_EXCL_STOP
}
return 0; // unreachable
return false; // unreachable
}
char const*
@@ -480,7 +480,7 @@ Value::asString() const
return "";
case stringValue:
return value_.string_ ? value_.string_ : "";
return (value_.string_ != nullptr) ? value_.string_ : "";
case booleanValue:
return value_.bool_ ? "true" : "false";
@@ -525,7 +525,7 @@ Value::asInt() const
case realValue:
JSON_ASSERT_MESSAGE(
value_.real_ >= minInt && value_.real_ <= maxInt,
(value_.real_ >= minInt && value_.real_ <= maxInt),
"Real out of signed integer range");
return Int(value_.real_);
@@ -533,7 +533,7 @@ Value::asInt() const
return value_.bool_ ? 1 : 0;
case stringValue: {
char const* const str{value_.string_ ? value_.string_ : ""};
char const* const str{(value_.string_ != nullptr) ? value_.string_ : ""};
return beast::lexicalCastThrow<int>(str);
}
@@ -584,7 +584,7 @@ Value::asAbsUInt() const
return value_.bool_ ? 1 : 0;
case stringValue: {
char const* const str{value_.string_ ? value_.string_ : ""};
char const* const str{(value_.string_ != nullptr) ? value_.string_ : ""};
auto const temp = beast::lexicalCastThrow<std::int64_t>(str);
if (temp < 0)
{
@@ -626,14 +626,15 @@ Value::asUInt() const
case realValue:
JSON_ASSERT_MESSAGE(
value_.real_ >= 0 && value_.real_ <= maxUInt, "Real out of unsigned integer range");
(value_.real_ >= 0 && value_.real_ <= maxUInt),
"Real out of unsigned integer range");
return UInt(value_.real_);
case booleanValue:
return value_.bool_ ? 1 : 0;
case stringValue: {
char const* const str{value_.string_ ? value_.string_ : ""};
char const* const str{(value_.string_ != nullptr) ? value_.string_ : ""};
return beast::lexicalCastThrow<unsigned int>(str);
}
@@ -703,7 +704,7 @@ Value::asBool() const
return value_.bool_;
case stringValue:
return value_.string_ && value_.string_[0] != 0;
return (value_.string_ != nullptr) && value_.string_[0] != 0;
case arrayValue:
case objectValue:
@@ -745,13 +746,13 @@ Value::isConvertibleTo(ValueType other) const
other == realValue || other == stringValue || other == booleanValue;
case booleanValue:
return (other == nullValue && value_.bool_ == false) || other == intValue ||
return (other == nullValue && !value_.bool_) || other == intValue ||
other == uintValue || other == realValue || other == stringValue ||
other == booleanValue;
case stringValue:
return other == stringValue ||
(other == nullValue && (!value_.string_ || value_.string_[0] == 0));
(other == nullValue && ((value_.string_ == nullptr) || value_.string_[0] == 0));
case arrayValue:
return other == arrayValue || (other == nullValue && value_.map_->empty());
@@ -813,10 +814,10 @@ operator bool() const
if (isString())
{
auto s = asCString();
return s && s[0];
return (s != nullptr) && (s[0] != 0);
}
return !(isArray() || isObject()) || size();
return !(isArray() || isObject()) || (size() != 0u);
}
void
@@ -1139,7 +1140,7 @@ Value::begin() const
{
case arrayValue:
case objectValue:
if (value_.map_)
if (value_.map_ != nullptr)
return const_iterator(value_.map_->begin());
break;
@@ -1157,7 +1158,7 @@ Value::end() const
{
case arrayValue:
case objectValue:
if (value_.map_)
if (value_.map_ != nullptr)
return const_iterator(value_.map_->end());
break;
@@ -1175,7 +1176,7 @@ Value::begin()
{
case arrayValue:
case objectValue:
if (value_.map_)
if (value_.map_ != nullptr)
return iterator(value_.map_->begin());
break;
default:
@@ -1192,7 +1193,7 @@ Value::end()
{
case arrayValue:
case objectValue:
if (value_.map_)
if (value_.map_ != nullptr)
return iterator(value_.map_->end());
break;
default:

View File

@@ -89,7 +89,7 @@ ValueIteratorBase::key() const
{
Value::CZString const czString = (*current_).first;
if (czString.c_str())
if (czString.c_str() != nullptr)
{
if (czString.isStaticString())
return Value(StaticString(czString.c_str()));
@@ -105,7 +105,7 @@ ValueIteratorBase::index() const
{
Value::CZString const czString = (*current_).first;
if (!czString.c_str())
if (czString.c_str() == nullptr)
return czString.index();
return Value::UInt(-1);
@@ -115,7 +115,7 @@ char const*
ValueIteratorBase::memberName() const
{
char const* name = (*current_).first.c_str();
return name ? name : "";
return (name != nullptr) ? name : "";
}
// //////////////////////////////////////////////////////////////////

View File

@@ -23,7 +23,7 @@ isControlCharacter(char ch)
static bool
containsControlCharacter(char const* str)
{
while (*str)
while (*str != 0)
{
if (isControlCharacter(*(str++)))
return true;
@@ -106,7 +106,7 @@ valueToQuotedString(char const* value)
// We have to walk value and escape any special characters.
// Appending to std::string is not efficient, but this should be rare.
// (Note: forward slashes are *not* rare, but I am not escaping them.)
unsigned maxsize = strlen(value) * 2 + 3; // all-escaped+quotes+NULL
unsigned maxsize = (strlen(value) * 2) + 3; // all-escaped+quotes+NULL
std::string result;
result.reserve(maxsize); // to avoid lots of mallocs
result += "\"";
@@ -416,7 +416,7 @@ StyledWriter::isMultilineArray(Value const& value)
{
childValues_.reserve(size);
addChildValues_ = true;
int lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
int lineLength = 4 + ((size - 1) * 2); // '[ ' + ', '*n + ' ]'
for (int index = 0; index < size; ++index)
{
@@ -651,7 +651,7 @@ StyledStreamWriter::isMultilineArray(Value const& value)
{
childValues_.reserve(size);
addChildValues_ = true;
int lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
int lineLength = 4 + ((size - 1) * 2); // '[ ' + ', '*n + ' ]'
for (int index = 0; index < size; ++index)
{

View File

@@ -275,9 +275,7 @@ ApplyStateTable::exists(ReadView const& base, Keylet const& k) const
case Action::modify:
break;
}
if (!k.check(*sle))
return false;
return true;
return k.check(*sle);
}
auto

View File

@@ -36,7 +36,7 @@ findPreviousPage(ApplyView& view, Keylet const& directory, SLE::ref start)
auto node = start;
if (page)
if (page != 0u)
{
node = view.peek(keylet::page(directory, page));
if (!node)

View File

@@ -164,7 +164,7 @@ PaymentSandbox::balanceHook(
auto delta = amount.zeroed();
auto lastBal = amount;
auto minBal = amount;
for (auto curSB = this; curSB; curSB = curSB->ps_)
for (auto curSB = this; curSB != nullptr; curSB = curSB->ps_)
{
if (auto adj = curSB->tab_.adjustments(account, issuer, currency))
{
@@ -198,7 +198,7 @@ std::uint32_t
PaymentSandbox::ownerCountHook(AccountID const& account, std::uint32_t count) const
{
std::uint32_t result = count;
for (auto curSB = this; curSB; curSB = curSB->ps_)
for (auto curSB = this; curSB != nullptr; curSB = curSB->ps_)
{
if (auto adj = curSB->tab_.ownerCount(account))
result = std::max(result, *adj);

View File

@@ -78,7 +78,7 @@ deleteSLE(ApplyView& view, std::shared_ptr<SLE> const& sleCredential, beast::Jou
auto const issuer = sleCredential->getAccountID(sfIssuer);
auto const subject = sleCredential->getAccountID(sfSubject);
bool const accepted = sleCredential->getFlags() & lsfAccepted;
bool const accepted = (sleCredential->getFlags() & lsfAccepted) != 0u;
auto err = delSLE(issuer, sfIssuerNode, !accepted || (subject == issuer));
if (!isTesSuccess(err))
@@ -147,7 +147,7 @@ valid(STTx const& tx, ReadView const& view, AccountID const& src, beast::Journal
return tecBAD_CREDENTIALS;
}
if (!(sleCred->getFlags() & lsfAccepted))
if ((sleCred->getFlags() & lsfAccepted) == 0u)
{
JLOG(j.trace()) << "Credential isn't accepted. Cred: " << h;
return tecBAD_CREDENTIALS;
@@ -188,7 +188,7 @@ validDomain(ReadView const& view, uint256 domainID, AccountID const& subject)
foundExpired = true;
continue;
}
if (sleCredential->getFlags() & lsfAccepted)
if ((sleCredential->getFlags() & lsfAccepted) != 0u)
{
return tesSUCCESS;
}
@@ -309,7 +309,7 @@ verifyValidDomain(ApplyView& view, AccountID const& account, uint256 domainID, b
if (!sleCredential)
continue; // expired, i.e. deleted in credentials::removeExpired
if (sleCredential->getFlags() & lsfAccepted)
if ((sleCredential->getFlags() & lsfAccepted) != 0u)
return tesSUCCESS;
}
@@ -336,7 +336,7 @@ verifyDepositPreauth(
if (credentialsPresent && credentials::removeExpired(view, tx.getFieldV256(sfCredentialIDs), j))
return tecEXPIRED;
if (sleDst && (sleDst->getFlags() & lsfDepositAuth))
if (sleDst && ((sleDst->getFlags() & lsfDepositAuth) != 0u))
{
if (src != dst)
{

View File

@@ -59,6 +59,7 @@ public:
//--------------------------------------------------------------------------
void
// NOLINTNEXTLINE(readability-convert-member-functions-to-static)
makeGet(std::string const& strPath, boost::asio::streambuf& sb, std::string const& strHost)
{
std::ostream osRequest(&sb);

View File

@@ -55,7 +55,7 @@ ManagerImp::make_Backend(
missing_backend();
auto factory{find(type)};
if (!factory)
if (factory == nullptr)
{
missing_backend();
}

View File

@@ -213,7 +213,7 @@ public:
rocksdb::DB* db = nullptr;
m_options.create_if_missing = createIfMissing;
rocksdb::Status status = rocksdb::DB::Open(m_options, m_name, &db);
if (!status.ok() || !db)
if (!status.ok() || (db == nullptr))
{
Throw<std::runtime_error>(
std::string("Unable to open/create RocksDB: ") + status.ToString());

View File

@@ -140,7 +140,7 @@ encodeSoftwareVersion(std::string_view versionStr)
if (x == 0)
x = parsePreRelease(id, "b", 0x40, 0, 63);
if (x & 0xC0)
if ((x & 0xC0) != 0)
{
c |= static_cast<std::uint64_t>(x) << 16;
break;

View File

@@ -205,9 +205,7 @@ make_error(error_code_i code, std::string const& message)
bool
contains_error(Json::Value const& json)
{
if (json.isObject() && json.isMember(jss::error))
return true;
return false;
return json.isObject() && json.isMember(jss::error);
}
int

View File

@@ -209,7 +209,7 @@ FeatureCollections::getRegisteredFeature(std::string const& name) const
XRPL_ASSERT(
readOnly.load(), "xrpl::FeatureCollections::getRegisteredFeature : startup completed");
Feature const* feature = getByName(name);
if (feature)
if (feature != nullptr)
return feature->feature;
return std::nullopt;
}
@@ -229,7 +229,7 @@ FeatureCollections::registerFeature(std::string const& name, Supported support,
support == Supported::yes || vote == VoteBehavior::DefaultNo,
"Invalid feature parameters. Must be supported to be up-voted.");
Feature const* i = getByName(name);
if (!i)
if (i == nullptr)
{
check(features.size() < detail::numFeatures, "More features defined than allocated.");
@@ -283,7 +283,7 @@ FeatureCollections::featureToBitsetIndex(uint256 const& f) const
readOnly.load(), "xrpl::FeatureCollections::featureToBitsetIndex : startup completed");
Feature const* feature = getByFeature(f);
if (!feature)
if (feature == nullptr)
LogicError("Invalid Feature ID");
return getIndex(*feature);
@@ -303,7 +303,7 @@ FeatureCollections::featureToName(uint256 const& f) const
{
XRPL_ASSERT(readOnly.load(), "xrpl::FeatureCollections::featureToName : startup completed");
Feature const* feature = getByFeature(f);
return feature ? feature->name : to_string(f);
return (feature != nullptr) ? feature->name : to_string(f);
}
FeatureCollections featureCollections;

View File

@@ -186,7 +186,7 @@ mulRatio(IOUAmount const& amt, std::uint32_t num, std::uint32_t den, bool roundU
{
using namespace boost::multiprecision;
if (!den)
if (den == 0u)
Throw<std::runtime_error>("division by zero");
// A vector with the value 10^index for indexes from 0 to 29

View File

@@ -172,7 +172,7 @@ SOTemplate const*
InnerObjectFormats::findSOTemplateBySField(SField const& sField) const
{
auto itemPtr = findByType(sField.getCode());
if (itemPtr)
if (itemPtr != nullptr)
return &(itemPtr->getSOTemplate());
return nullptr;

View File

@@ -24,7 +24,7 @@ canHaveNFTokenOfferID(
return false;
TxType const tt = serializedTx->getTxnType();
if (!(tt == ttNFTOKEN_MINT && serializedTx->isFieldPresent(sfAmount)) &&
if ((tt != ttNFTOKEN_MINT || !serializedTx->isFieldPresent(sfAmount)) &&
tt != ttNFTOKEN_CREATE_OFFER)
return false;

View File

@@ -176,13 +176,13 @@ Permission::isDelegable(std::uint32_t const& permissionValue, Rules const& rules
}
uint32_t
Permission::txToPermissionType(TxType const& type) const
Permission::txToPermissionType(TxType const& type)
{
return static_cast<uint32_t>(type) + 1;
}
TxType
Permission::permissionToTxType(uint32_t const& value) const
Permission::permissionToTxType(uint32_t const& value)
{
return static_cast<TxType>(value - 1);
}

View File

@@ -75,7 +75,7 @@ static std::string
sliceToHex(Slice const& slice)
{
std::string s;
if (slice[0] & 0x80)
if ((slice[0] & 0x80) != 0)
{
s.reserve(2 * (slice.size() + 2));
s = "0x00";

View File

@@ -84,7 +84,7 @@ bool
STAccount::isEquivalent(STBase const& t) const
{
auto const* const tPtr = dynamic_cast<STAccount const*>(&t);
return tPtr && (default_ == tPtr->default_) && (value_ == tPtr->value_);
return (tPtr != nullptr) && (default_ == tPtr->default_) && (value_ == tPtr->value_);
}
bool

View File

@@ -152,7 +152,7 @@ STAmount::STAmount(SerialIter& sit, SField const& name) : STBase(name)
value &= ~(1023ull << (64 - 10));
if (value)
if (value != 0u)
{
bool isNegative = (offset & 256) == 0;
offset = (offset & 255) - 97; // center the range
@@ -505,14 +505,11 @@ canAdd(STAmount const& a, STAmount const& b)
XRPAmount A = a.xrp();
XRPAmount B = b.xrp();
if ((B > XRPAmount{0} &&
return !(
(B > XRPAmount{0} &&
A > XRPAmount{std::numeric_limits<XRPAmount::value_type>::max()} - B) ||
(B < XRPAmount{0} &&
A < XRPAmount{std::numeric_limits<XRPAmount::value_type>::min()} - B))
{
return false;
}
return true;
A < XRPAmount{std::numeric_limits<XRPAmount::value_type>::min()} - B));
}
// IOU case (precision check)
@@ -530,15 +527,11 @@ canAdd(STAmount const& a, STAmount const& b)
{
MPTAmount A = a.mpt();
MPTAmount B = b.mpt();
if ((B > MPTAmount{0} &&
return !(
(B > MPTAmount{0} &&
A > MPTAmount{std::numeric_limits<MPTAmount::value_type>::max()} - B) ||
(B < MPTAmount{0} &&
A < MPTAmount{std::numeric_limits<MPTAmount::value_type>::min()} - B))
{
return false;
}
return true;
A < MPTAmount{std::numeric_limits<MPTAmount::value_type>::min()} - B));
}
// LCOV_EXCL_START
UNREACHABLE("STAmount::canAdd : unexpected STAmount type");
@@ -803,7 +796,7 @@ bool
STAmount::isEquivalent(STBase const& t) const
{
STAmount const* v = dynamic_cast<STAmount const*>(&t);
return v && (*v == *this);
return (v != nullptr) && (*v == *this);
}
bool

View File

@@ -53,7 +53,7 @@ bool
STBlob::isEquivalent(STBase const& t) const
{
STBlob const* v = dynamic_cast<STBlob const*>(&t);
return v && (value_ == v->value_);
return (v != nullptr) && (value_ == v->value_);
}
bool

View File

@@ -56,7 +56,7 @@ bool
STCurrency::isEquivalent(STBase const& t) const
{
STCurrency const* v = dynamic_cast<STCurrency const*>(&t);
return v && (*v == *this);
return (v != nullptr) && (*v == *this);
}
bool

View File

@@ -110,7 +110,7 @@ bool
STIssue::isEquivalent(STBase const& t) const
{
STIssue const* v = dynamic_cast<STIssue const*>(&t);
return v && (*v == *this);
return (v != nullptr) && (*v == *this);
}
bool

View File

@@ -136,7 +136,7 @@ STLedgerEntry::isThreadedType(Rules const& rules) const
// Exclude PrevTxnID/PrevTxnLgrSeq if the fixPreviousTxnID amendment is not
// enabled and the ledger object type is in the above set
bool const excludePrevTxnID = !rules.enabled(fixPreviousTxnID) &&
std::count(newPreviousTxnIDTypes.cbegin(), newPreviousTxnIDTypes.cend(), type_);
(std::count(newPreviousTxnIDTypes.cbegin(), newPreviousTxnIDTypes.cend(), type_) != 0);
return !excludePrevTxnID && getFieldIndex(sfPreviousTxnID) != -1;
}

View File

@@ -204,7 +204,7 @@ void
STObject::applyTemplateFromSField(SField const& sField)
{
SOTemplate const* elements = InnerObjectFormats::getInstance().findSOTemplateBySField(sField);
if (elements)
if (elements != nullptr)
applyTemplate(*elements); // May throw
}
@@ -276,7 +276,7 @@ STObject::hasMatchingEntry(STBase const& t) const
{
STBase const* o = peekAtPField(t.getFName());
if (!o)
if (o == nullptr)
return false;
return t == *o;
@@ -343,7 +343,7 @@ STObject::isEquivalent(STBase const& t) const
{
STObject const* v = dynamic_cast<STObject const*>(&t);
if (!v)
if (v == nullptr)
return false;
if (mType != nullptr && v->mType == mType)
@@ -480,7 +480,7 @@ STObject::setFlag(std::uint32_t f)
{
STUInt32* t = dynamic_cast<STUInt32*>(getPField(sfFlags, true));
if (!t)
if (t == nullptr)
return false;
t->setValue(t->value() | f);
@@ -492,7 +492,7 @@ STObject::clearFlag(std::uint32_t f)
{
STUInt32* t = dynamic_cast<STUInt32*>(getPField(sfFlags));
if (!t)
if (t == nullptr)
return false;
t->setValue(t->value() & ~f);
@@ -510,7 +510,7 @@ STObject::getFlags(void) const
{
STUInt32 const* t = dynamic_cast<STUInt32 const*>(peekAtPField(sfFlags));
if (!t)
if (t == nullptr)
return 0;
return t->value();
@@ -574,7 +574,7 @@ STObject::delField(int index)
SOEStyle
STObject::getStyle(SField const& field) const
{
return mType ? mType->style(field) : soeINVALID;
return (mType != nullptr) ? mType->style(field) : soeINVALID;
}
unsigned char
@@ -877,10 +877,7 @@ STObject::operator==(STObject const& obj) const
++fields;
}
if (fields != matches)
return false;
return true;
return fields == matches;
}
void
@@ -917,7 +914,8 @@ STObject::getSortedFields(STObject const& objToSort, WhichFields whichFields)
for (detail::STVar const& elem : objToSort.v_)
{
STBase const& base = elem.get();
if ((base.getSType() != STI_NOTPRESENT) && base.getFName().shouldInclude(whichFields))
if ((base.getSType() != STI_NOTPRESENT) &&
base.getFName().shouldInclude(static_cast<bool>(whichFields)))
{
sf.push_back(&base);
}

View File

@@ -1075,20 +1075,20 @@ parseArray(
// TODO: There doesn't seem to be a nice way to get just the
// first/only key in an object without copying all keys into
// a vector
std::string const objectName(json[i].getMemberNames()[0]);
std::string const memberName(json[i].getMemberNames()[0]);
;
auto const& nameField(SField::getField(objectName));
auto const& nameField(SField::getField(memberName));
if (nameField == sfInvalid)
{
error = unknown_field(json_name, objectName);
error = unknown_field(json_name, memberName);
return std::nullopt;
}
Json::Value const objectFields(json[i][objectName]);
Json::Value const objectFields(json[i][memberName]);
std::stringstream ss;
ss << json_name << "." << "[" << i << "]." << objectName;
ss << json_name << "." << "[" << i << "]." << memberName;
auto ret = parseObject(ss.str(), objectFields, nameField, depth + 1, error);
if (!ret)

View File

@@ -61,7 +61,7 @@ STPathSet::STPathSet(SerialIter& sit, SField const& name) : STBase(name)
if (iType == STPathElement::typeNone)
return;
}
else if (iType & ~STPathElement::typeAll)
else if ((iType & ~STPathElement::typeAll) != 0)
{
JLOG(debugLog().error()) << "Bad path element " << iType << " in pathset";
Throw<std::runtime_error>("bad path element");
@@ -76,13 +76,13 @@ STPathSet::STPathSet(SerialIter& sit, SField const& name) : STBase(name)
Currency currency;
AccountID issuer;
if (hasAccount)
if (hasAccount != 0)
account = sit.get160();
if (hasCurrency)
if (hasCurrency != 0)
currency = sit.get160();
if (hasIssuer)
if (hasIssuer != 0)
issuer = sit.get160();
path.emplace_back(account, currency, issuer, hasCurrency);
@@ -127,7 +127,7 @@ bool
STPathSet::isEquivalent(STBase const& t) const
{
STPathSet const* v = dynamic_cast<STPathSet const*>(&t);
return v && (value == v->value);
return (v != nullptr) && (value == v->value);
}
bool
@@ -160,13 +160,13 @@ STPath::getJson(JsonOptions) const
elem[jss::type] = iType;
if (iType & STPathElement::typeAccount)
if ((iType & STPathElement::typeAccount) != 0u)
elem[jss::account] = to_string(it.getAccountID());
if (iType & STPathElement::typeCurrency)
if ((iType & STPathElement::typeCurrency) != 0u)
elem[jss::currency] = to_string(it.getCurrency());
if (iType & STPathElement::typeIssuer)
if ((iType & STPathElement::typeIssuer) != 0u)
elem[jss::issuer] = to_string(it.getIssuerID());
ret.append(elem);
@@ -209,13 +209,13 @@ STPathSet::add(Serializer& s) const
s.add8(iType);
if (iType & STPathElement::typeAccount)
if ((iType & STPathElement::typeAccount) != 0)
s.addBitString(speElement.getAccountID());
if (iType & STPathElement::typeCurrency)
if ((iType & STPathElement::typeCurrency) != 0)
s.addBitString(speElement.getCurrency());
if (iType & STPathElement::typeIssuer)
if ((iType & STPathElement::typeIssuer) != 0)
s.addBitString(speElement.getIssuerID());
}

View File

@@ -574,7 +574,7 @@ STTx::getBatchTransactionIDs() const
{
XRPL_ASSERT(getTxnType() == ttBATCH, "STTx::getBatchTransactionIDs : not a batch transaction");
XRPL_ASSERT(
getFieldArray(sfRawTransactions).size() != 0,
!getFieldArray(sfRawTransactions).empty(),
"STTx::getBatchTransactionIDs : empty raw transactions");
// The list of inner ids is built once, then reused on subsequent calls.
@@ -618,7 +618,7 @@ isMemoOkay(STObject const& st, std::string& reason)
{
auto memoObj = dynamic_cast<STObject const*>(&memo);
if (!memoObj || (memoObj->getFName() != sfMemo))
if ((memoObj == nullptr) || (memoObj->getFName() != sfMemo))
{
reason = "A memo array may contain only Memo objects.";
return false;
@@ -669,7 +669,7 @@ isMemoOkay(STObject const& st, std::string& reason)
for (unsigned char c : *optData)
{
if (!allowedSymbols[c])
if (allowedSymbols[c] == 0)
{
reason =
"The MemoType and MemoFormat fields may only "
@@ -691,7 +691,7 @@ isAccountFieldOkay(STObject const& st)
for (int i = 0; i < st.getCount(); ++i)
{
auto t = dynamic_cast<STAccount const*>(st.peekAtPIndex(i));
if (t && t->isDefault())
if ((t != nullptr) && t->isDefault())
return false;
}

View File

@@ -107,7 +107,7 @@ STValidation::isValid() const noexcept
getSignerPublic(),
getSigningHash(),
makeSlice(getFieldVL(sfSignature)),
getFlags() & vfFullyCanonicalSig);
(getFlags() & vfFullyCanonicalSig) != 0u);
}
return valid_.value();

View File

@@ -60,7 +60,7 @@ STVar::operator=(STVar const& rhs)
if (&rhs != this)
{
destroy();
if (rhs.p_)
if (rhs.p_ != nullptr)
{
p_ = rhs.p_->copy(max_size, &d_);
}

View File

@@ -68,7 +68,7 @@ bool
STVector256::isEquivalent(STBase const& t) const
{
STVector256 const* v = dynamic_cast<STVector256 const*>(&t);
return v && (mValue == v->mValue);
return (v != nullptr) && (mValue == v->mValue);
}
Json::Value

View File

@@ -167,7 +167,7 @@ bool
STXChainBridge::isEquivalent(STBase const& t) const
{
STXChainBridge const* v = dynamic_cast<STXChainBridge const*>(&t);
return v && (*v == *this);
return (v != nullptr) && (*v == *this);
}
bool

View File

@@ -199,7 +199,7 @@ Serializer::addVL(void const* ptr, int len)
{
int ret = addEncoded(len);
if (len)
if (len != 0)
addRaw(ptr, len);
return ret;
@@ -298,7 +298,7 @@ Serializer::decodeVLLength(int b1, int b2)
if (b1 > 240)
Throw<std::overflow_error>("b1>240");
return 193 + (b1 - 193) * 256 + b2;
return 193 + ((b1 - 193) * 256) + b2;
}
int
@@ -310,7 +310,7 @@ Serializer::decodeVLLength(int b1, int b2, int b3)
if (b1 > 254)
Throw<std::overflow_error>("b1>254");
return 12481 + (b1 - 241) * 65536 + b2 * 256 + b3;
return 12481 + ((b1 - 241) * 65536) + (b2 * 256) + b3;
}
//------------------------------------------------------------------------------

View File

@@ -28,7 +28,7 @@ TxMeta::TxMeta(uint256 const& txid, std::uint32_t ledger, STObject const& obj)
auto affectedNodes = dynamic_cast<STArray const*>(obj.peekAtPField(sfAffectedNodes));
XRPL_ASSERT(affectedNodes, "xrpl::TxMeta::TxMeta(STObject) : type cast succeeded");
if (affectedNodes)
if (affectedNodes != nullptr)
nodes_ = *affectedNodes;
setAdditionalFields(obj);
@@ -96,7 +96,7 @@ TxMeta::getAffectedAccounts() const
{
auto const* inner = dynamic_cast<STObject const*>(&node.peekAtIndex(index));
XRPL_ASSERT(inner, "xrpl::getAffectedAccounts : STObject type cast succeeded");
if (inner)
if (inner != nullptr)
{
for (auto const& field : *inner)
{

View File

@@ -63,7 +63,7 @@ to_string(Currency const& currency)
bool
to_currency(Currency& currency, std::string const& code)
{
if (code.empty() || !code.compare(systemCurrencyCode()))
if (code.empty() || (code.compare(systemCurrencyCode()) == 0))
{
currency = beast::zero;
return true;

View File

@@ -263,7 +263,7 @@ decodeBase58(std::string const& s)
// Allocate enough space in big-endian base256 representation.
// log(58) / log(256), rounded up.
std::vector<unsigned char> b256(remain * 733 / 1000 + 1);
std::vector<unsigned char> b256((remain * 733 / 1000) + 1);
while (remain > 0)
{
auto carry = alphabetReverse[*psz];
@@ -308,7 +308,7 @@ encodeBase58Token(TokenType type, void const* token, std::size_t size)
// Lay the data out as
// <type><token><checksum>
buf[0] = safe_cast<std::underlying_type_t<TokenType>>(type);
if (size)
if (size != 0u)
std::memcpy(buf.data() + 1, token, size);
checksum(buf.data() + 1 + size, buf.data(), 1 + size);
@@ -383,7 +383,7 @@ b256_to_b58_be(std::span<std::uint8_t const> input, std::span<std::uint8_t> out)
{
break;
}
auto const src_i_end = input.size() - i * 8;
auto const src_i_end = input.size() - (i * 8);
if (src_i_end >= 8)
{
std::memcpy(&base_2_64_coeff_buf[num_coeff], &input[src_i_end - 8], 8);
@@ -450,7 +450,7 @@ b256_to_b58_be(std::span<std::uint8_t const> input, std::span<std::uint8_t> out)
{
to_skip = count_leading_zeros(b58_be_s);
skip_zeros = false;
if (out.size() < (i + 1) * 10 - to_skip)
if (out.size() < ((i + 1) * 10) - to_skip)
{
return Unexpected(TokenCodecErrc::outputTooSmall);
}
@@ -502,7 +502,7 @@ b58_to_b256_be(std::string_view input, std::span<std::uint8_t> out)
// log(2^(38*8),58^10)) ~= 5.18. So 6 coeff are enough
std::array<std::uint64_t, 6> b_58_10_coeff{};
auto [num_full_coeffs, partial_coeff_len] = xrpl::b58_fast::detail::div_rem(input.size(), 10);
auto const num_partial_coeffs = partial_coeff_len ? 1 : 0;
auto const num_partial_coeffs = (partial_coeff_len != 0u) ? 1 : 0;
auto const num_b_58_10_coeffs = num_full_coeffs + num_partial_coeffs;
XRPL_ASSERT(
num_b_58_10_coeffs <= b_58_10_coeff.size(),
@@ -521,7 +521,7 @@ b58_to_b256_be(std::string_view input, std::span<std::uint8_t> out)
{
for (int j = 0; j < num_full_coeffs; ++j)
{
unsigned char c = input[partial_coeff_len + j * 10 + i];
unsigned char c = input[partial_coeff_len + (j * 10) + i];
auto cur_val = ::xrpl::alphabetReverse[c];
if (cur_val < 0)
{
@@ -586,7 +586,7 @@ b58_to_b256_be(std::string_view input, std::span<std::uint8_t> out)
cur_out_i += 1;
}
}
if ((cur_out_i + 8 * (cur_result_size - 1)) > out.size())
if ((cur_out_i + (8 * (cur_result_size - 1))) > out.size())
{
return Unexpected(TokenCodecErrc::outputTooSmall);
}

View File

@@ -72,7 +72,7 @@ DatabaseCon::~DatabaseCon()
// checkpoint is currently in progress. Wait for it to end, otherwise
// creating a new DatabaseCon to the same database may fail due to the
// database being locked by our (now old) Checkpointer.
while (wk.use_count())
while (wk.use_count() != 0)
{
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
@@ -84,7 +84,7 @@ std::unique_ptr<std::vector<std::string> const> DatabaseCon::Setup::globalPragma
void
DatabaseCon::setupCheckpointing(JobQueue* q, Logs& l)
{
if (!q)
if (q == nullptr)
Throw<std::logic_error>("No JobQueue");
checkpointer_ = checkpointers.create(session_, *q, l);
}

View File

@@ -98,7 +98,7 @@ getConnection(soci::session& s)
if (auto b = dynamic_cast<soci::sqlite3_session_backend*>(be))
result = b->conn_;
if (!result)
if (result == nullptr)
Throw<std::logic_error>("Didn't get a database connection.");
return result;
@@ -107,7 +107,7 @@ getConnection(soci::session& s)
std::uint32_t
getKBUsedAll(soci::session& s)
{
if (!getConnection(s))
if (getConnection(s) == nullptr)
Throw<std::logic_error>("No connection found.");
return static_cast<size_t>(sqlite_api::sqlite3_memory_used() / kilobytes(1));
}
@@ -249,7 +249,7 @@ public:
{
auto [conn, keepAlive] = getConnection();
(void)keepAlive;
if (!conn)
if (conn == nullptr)
return;
int log = 0, ckpt = 0;

View File

@@ -23,7 +23,7 @@ Consumer::Consumer() : m_logic(nullptr), m_entry(nullptr)
Consumer::Consumer(Consumer const& other) : m_logic(other.m_logic), m_entry(nullptr)
{
if (m_logic && other.m_entry)
if ((m_logic != nullptr) && (other.m_entry != nullptr))
{
m_entry = other.m_entry;
m_logic->acquire(*m_entry);
@@ -32,7 +32,7 @@ Consumer::Consumer(Consumer const& other) : m_logic(other.m_logic), m_entry(null
Consumer::~Consumer()
{
if (m_logic && m_entry)
if ((m_logic != nullptr) && (m_entry != nullptr))
m_logic->release(*m_entry);
}
@@ -43,14 +43,14 @@ Consumer::operator=(Consumer const& other)
return *this;
// remove old ref
if (m_logic && m_entry)
if ((m_logic != nullptr) && (m_entry != nullptr))
m_logic->release(*m_entry);
m_logic = other.m_logic;
m_entry = other.m_entry;
// add new ref
if (m_logic && m_entry)
if ((m_logic != nullptr) && (m_entry != nullptr))
m_logic->acquire(*m_entry);
return *this;
@@ -68,7 +68,7 @@ Consumer::to_string() const
bool
Consumer::isUnlimited() const
{
if (m_entry)
if (m_entry != nullptr)
return m_entry->isUnlimited();
return false;
@@ -78,7 +78,7 @@ Disposition
Consumer::disposition() const
{
Disposition d = ok;
if (m_logic && m_entry)
if ((m_logic != nullptr) && (m_entry != nullptr))
d = m_logic->charge(*m_entry, Charge(0));
return d;
@@ -89,7 +89,7 @@ Consumer::charge(Charge const& what, std::string const& context)
{
Disposition d = ok;
if (m_logic && m_entry && !m_entry->isUnlimited())
if ((m_logic != nullptr) && (m_entry != nullptr) && !m_entry->isUnlimited())
d = m_logic->charge(*m_entry, what, context);
return d;

View File

@@ -31,7 +31,7 @@ initStateDB(soci::session& session, BasicConfig const& config, std::string const
count = *countO;
}
if (!count)
if (count == 0)
{
session << "INSERT INTO DbState VALUES (1, '', '', 0);";
}
@@ -45,7 +45,7 @@ initStateDB(soci::session& session, BasicConfig const& config, std::string const
count = *countO;
}
if (!count)
if (count == 0)
{
session << "INSERT INTO CanDelete VALUES (1, 0);";
}

View File

@@ -130,7 +130,7 @@ SHAMapLeafNode*
SHAMap::findKey(uint256 const& id) const
{
SHAMapLeafNode* leaf = walkTowardsKey(id);
if (leaf && leaf->peekItem()->key() != id)
if ((leaf != nullptr) && leaf->peekItem()->key() != id)
leaf = nullptr;
return leaf;
}
@@ -221,7 +221,7 @@ SHAMap::fetchNodeNT(SHAMapHash const& hash, SHAMapSyncFilter* filter) const
}
}
if (filter)
if (filter != nullptr)
node = checkFilter(hash, filter);
return node;
@@ -255,7 +255,7 @@ SHAMap::descendThrow(SHAMapInnerNode* parent, int branch) const
{
SHAMapTreeNode* ret = descend(parent, branch);
if (!ret && !parent->isEmptyBranch(branch))
if ((ret == nullptr) && !parent->isEmptyBranch(branch))
Throw<SHAMapMissingNode>(type_, parent->getChildHash(branch));
return ret;
@@ -276,7 +276,7 @@ SHAMapTreeNode*
SHAMap::descend(SHAMapInnerNode* parent, int branch) const
{
SHAMapTreeNode* ret = parent->getChildPointer(branch);
if (ret || !backed_)
if ((ret != nullptr) || !backed_)
return ret;
intr_ptr::SharedPtr<SHAMapTreeNode> node = fetchNodeNT(parent->getChildHash(branch));
@@ -328,7 +328,7 @@ SHAMap::descend(
SHAMapTreeNode* child = parent->getChildPointer(branch);
if (!child)
if (child == nullptr)
{
auto const& childHash = parent->getChildHash(branch);
intr_ptr::SharedPtr<SHAMapTreeNode> childNode = fetchNodeNT(childHash, filter);
@@ -354,7 +354,7 @@ SHAMap::descendAsync(
pending = false;
SHAMapTreeNode* ret = parent->getChildPointer(branch);
if (ret)
if (ret != nullptr)
return ret;
auto const& hash = parent->getChildHash(branch);
@@ -362,7 +362,7 @@ SHAMap::descendAsync(
auto ptr = cacheLookup(hash);
if (!ptr)
{
if (filter)
if (filter != nullptr)
ptr = checkFilter(hash, filter);
if (!ptr && backed_)
@@ -483,14 +483,14 @@ SHAMap::onlyBelow(SHAMapTreeNode* node) const
{
if (!inner->isEmptyBranch(i))
{
if (nextNode)
if (nextNode != nullptr)
return no_item;
nextNode = descendThrow(inner, i);
}
}
if (!nextNode)
if (nextNode == nullptr)
{
// LCOV_EXCL_START
UNREACHABLE("xrpl::SHAMap::onlyBelow : no next node");
@@ -514,7 +514,7 @@ SHAMap::peekFirstItem(SharedPtrNodeStack& stack) const
{
XRPL_ASSERT(stack.empty(), "xrpl::SHAMap::peekFirstItem : empty stack input");
SHAMapLeafNode* node = firstBelow(root_, stack);
if (!node)
if (node == nullptr)
{
while (!stack.empty())
stack.pop();
@@ -540,7 +540,7 @@ SHAMap::peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const
{
node = descendThrow(*inner, i);
auto leaf = firstBelow(node, stack, i);
if (!leaf)
if (leaf == nullptr)
Throw<SHAMapMissingNode>(type_, id);
XRPL_ASSERT(leaf->isLeaf(), "xrpl::SHAMap::peekNextItem : leaf is valid");
return leaf;
@@ -557,7 +557,7 @@ SHAMap::peekItem(uint256 const& id) const
{
SHAMapLeafNode* leaf = findKey(id);
if (!leaf)
if (leaf == nullptr)
return no_item;
return leaf->peekItem();
@@ -568,7 +568,7 @@ SHAMap::peekItem(uint256 const& id, SHAMapHash& hash) const
{
SHAMapLeafNode* leaf = findKey(id);
if (!leaf)
if (leaf == nullptr)
return no_item;
hash = leaf->getHash();
@@ -598,7 +598,7 @@ SHAMap::upper_bound(uint256 const& id) const
{
node = descendThrow(*inner, branch);
auto leaf = firstBelow(node, stack, branch);
if (!leaf)
if (leaf == nullptr)
Throw<SHAMapMissingNode>(type_, id);
return const_iterator(this, leaf->peekItem().get(), std::move(stack));
}
@@ -631,7 +631,7 @@ SHAMap::lower_bound(uint256 const& id) const
{
node = descendThrow(*inner, branch);
auto leaf = lastBelow(node, stack, branch);
if (!leaf)
if (leaf == nullptr)
Throw<SHAMapMissingNode>(type_, id);
return const_iterator(this, leaf->peekItem().get(), std::move(stack));
}
@@ -995,7 +995,7 @@ SHAMap::walkSubTree(bool doWrite, NodeObjectType t)
int pos = 0;
// We can't flush an inner node until we flush its children
while (1)
while (true)
{
while (pos < branchFactor)
{
@@ -1109,7 +1109,7 @@ SHAMap::dump(bool hash) const
if (!inner->isEmptyBranch(i))
{
auto child = inner->getChildPointer(i);
if (child)
if (child != nullptr)
{
XRPL_ASSERT(
child->getHash() == inner->getChildHash(i),

View File

@@ -136,7 +136,7 @@ SHAMap::compare(SHAMap const& otherMap, Delta& differences, int maxCount) const
auto [ourNode, otherNode] = nodeStack.top();
nodeStack.pop();
if (!ourNode || !otherNode)
if ((ourNode == nullptr) || (otherNode == nullptr))
{
// LCOV_EXCL_START
UNREACHABLE("xrpl::SHAMap::compare : missing a node");

View File

@@ -76,7 +76,7 @@ SHAMapNodeID::getChildNodeID(unsigned int m) const
Throw<std::logic_error>("Incorrect mask for " + to_string(*this));
SHAMapNodeID node{depth_ + 1, id_};
node.id_.begin()[depth_ / 2] |= (depth_ & 1) ? m : (m << 4);
node.id_.begin()[depth_ / 2] |= ((depth_ & 1) != 0u) ? m : (m << 4);
return node;
}
@@ -106,7 +106,7 @@ selectBranch(SHAMapNodeID const& id, uint256 const& hash)
auto const depth = id.getDepth();
auto branch = static_cast<unsigned int>(*(hash.begin() + (depth / 2)));
if (depth & 1)
if ((depth & 1) != 0u)
{
branch &= 0xf;
}

View File

@@ -93,13 +93,13 @@ SHAMap::visitDifferences(
if (root_->getHash().isZero())
return;
if (have && (root_->getHash() == have->root_->getHash()))
if ((have != nullptr) && (root_->getHash() == have->root_->getHash()))
return;
if (root_->isLeaf())
{
auto leaf = intr_ptr::static_pointer_cast<SHAMapLeafNode>(root_);
if (!have || !have->hasLeafNode(leaf->peekItem()->key(), leaf->getHash()))
if ((have == nullptr) || !have->hasLeafNode(leaf->peekItem()->key(), leaf->getHash()))
function(*root_);
return;
}
@@ -129,11 +129,11 @@ SHAMap::visitDifferences(
if (next->isInner())
{
if (!have || !have->hasInnerNode(childID, childHash))
if ((have == nullptr) || !have->hasInnerNode(childID, childHash))
stack.push({safe_downcast<SHAMapInnerNode*>(next), childID});
}
else if (
!have ||
(have == nullptr) ||
!have->hasLeafNode(
safe_downcast<SHAMapLeafNode*>(next)->peekItem()->key(), childHash))
{
@@ -192,7 +192,7 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se)
fullBelow = false;
++mn.deferred_;
}
else if (!d)
else if (d == nullptr)
{
// node is not in database
@@ -347,7 +347,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter)
// We have either emptied the stack or
// posted as many deferred reads as we can
if (mn.deferred_)
if (mn.deferred_ != 0)
gmn_ProcessDeferredReads(mn);
if (mn.max_ <= 0)
@@ -402,7 +402,7 @@ SHAMap::getNodeFat(
auto node = root_.get();
SHAMapNodeID nodeID;
while (node && node->isInner() && (nodeID.getDepth() < wanted.getDepth()))
while ((node != nullptr) && node->isInner() && (nodeID.getDepth() < wanted.getDepth()))
{
int branch = selectBranch(nodeID, wanted.getNodeID());
auto inner = safe_downcast<SHAMapInnerNode*>(node);
@@ -509,7 +509,7 @@ SHAMap::addRootNode(SHAMapHash const& hash, Slice const& rootNode, SHAMapSyncFil
if (root_->isLeaf())
clearSynching();
if (filter)
if (filter != nullptr)
{
Serializer s;
root_->serializeWithPrefix(s);
@@ -615,7 +615,7 @@ SHAMap::addKnownNode(SHAMapNodeID const& node, Slice const& rawNode, SHAMapSyncF
newNode = prevNode->canonicalizeChild(branch, std::move(newNode));
if (filter)
if (filter != nullptr)
{
Serializer s;
newNode->serializeWithPrefix(s);
@@ -643,7 +643,7 @@ SHAMap::deepCompare(SHAMap& other) const
auto const [node, otherNode] = stack.top();
stack.pop();
if (!node || !otherNode)
if ((node == nullptr) || (otherNode == nullptr))
{
JLOG(journal_.info()) << "unable to fetch node";
return false;
@@ -685,7 +685,7 @@ SHAMap::deepCompare(SHAMap& other) const
auto next = descend(node_inner, i);
auto otherNext = other.descend(other_inner, i);
if (!next || !otherNext)
if ((next == nullptr) || (otherNext == nullptr))
{
JLOG(journal_.warn()) << "unable to fetch inner node";
return false;

View File

@@ -40,7 +40,7 @@ ApplyContext::discard()
std::optional<TxMeta>
ApplyContext::apply(TER ter)
{
return view_->apply(base_, tx, ter, parentBatchId_, flags_ & tapDRY_RUN, journal);
return view_->apply(base_, tx, ter, parentBatchId_, (flags_ & tapDRY_RUN) != 0u, journal);
}
std::size_t

View File

@@ -65,7 +65,7 @@ preflight0(PreflightContext const& ctx, std::uint32_t flagMask)
return temINVALID;
}
if (ctx.tx.getFlags() & flagMask)
if ((ctx.tx.getFlags() & flagMask) != 0u)
{
JLOG(ctx.j.debug()) << ctx.tx.peekAtField(sfTransactionType).getFullText()
<< ": invalid flags.";
@@ -96,7 +96,7 @@ preflightCheckSigningKey(STObject const& sigObject, beast::Journal j)
std::optional<NotTEC>
preflightCheckSimulateKeys(ApplyFlags flags, STObject const& sigObject, beast::Journal j)
{
if (flags & tapDRY_RUN) // simulation
if ((flags & tapDRY_RUN) != 0u) // simulation
{
std::optional<Slice> const signature = sigObject[~sfTxnSignature];
if (signature && !signature->empty())
@@ -318,7 +318,7 @@ Transactor::minimumFee(
Fees const& fees,
ApplyFlags flags)
{
return scaleFeeLoad(baseFee, registry.getFeeTrack(), fees, flags & tapUNLIMITED);
return scaleFeeLoad(baseFee, registry.getFeeTrack(), fees, (flags & tapUNLIMITED) != 0u);
}
TER
@@ -329,7 +329,7 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee)
auto const feePaid = ctx.tx[sfFee].xrp();
if (ctx.flags & tapBATCH)
if ((ctx.flags & tapBATCH) != 0u)
{
if (feePaid == beast::zero)
return tesSUCCESS;
@@ -653,7 +653,7 @@ Transactor::checkSign(
return tesSUCCESS;
}
if ((flags & tapDRY_RUN) && pkSigner.empty() && !sigObject.isFieldPresent(sfSigners))
if (((flags & tapDRY_RUN) != 0u) && pkSigner.empty() && !sigObject.isFieldPresent(sfSigners))
{
// simulate: skip signature validation when neither SigningPubKey nor
// Signers are provided
@@ -886,7 +886,7 @@ Transactor::checkMultiSign(
// Master Key. Account may not have asfDisableMaster set.
std::uint32_t const signerAccountFlags = sleTxSignerRoot->getFieldU32(sfFlags);
if (signerAccountFlags & lsfDisableMaster)
if ((signerAccountFlags & lsfDisableMaster) != 0u)
{
JLOG(j.trace()) << "applyTransaction: Signer:Account lsfDisableMaster.";
return tefMASTER_DISABLED;
@@ -1119,7 +1119,7 @@ Transactor::operator()()
if (ctx_.size() > oversizeMetaDataCap)
result = tecOVERSIZE;
if (isTecClaim(result) && (view().flags() & tapFAIL_HARD))
if (isTecClaim(result) && ((view().flags() & tapFAIL_HARD) != 0u))
{
// If the tapFAIL_HARD flag is set, a tec result
// must not do anything
@@ -1264,7 +1264,7 @@ Transactor::operator()()
metadata = ctx_.apply(result);
}
if (ctx_.flags() & tapDRY_RUN)
if ((ctx_.flags() & tapDRY_RUN) != 0u)
{
applied = false;
}

View File

@@ -149,7 +149,7 @@ applyBatchTransactions(
beast::Journal j)
{
XRPL_ASSERT(
batchTxn.getTxnType() == ttBATCH && batchTxn.getFieldArray(sfRawTransactions).size() != 0,
batchTxn.getTxnType() == ttBATCH && !batchTxn.getFieldArray(sfRawTransactions).empty(),
"Batch transaction missing sfRawTransactions");
auto const parentBatchId = batchTxn.getTransactionID();
@@ -188,13 +188,13 @@ applyBatchTransactions(
if (!isTesSuccess(result.ter))
{
if (mode & tfAllOrNothing)
if ((mode & tfAllOrNothing) != 0u)
return false;
if (mode & tfUntilFailure)
if ((mode & tfUntilFailure) != 0u)
break;
}
else if (mode & tfOnlyOne)
else if ((mode & tfOnlyOne) != 0u)
{
break;
}

View File

@@ -28,7 +28,7 @@ ValidAMM::visitEntry(
}
// AMM pool changed
else if (
(type == ltRIPPLE_STATE && after->getFlags() & lsfAMMNode) ||
(type == ltRIPPLE_STATE && ((after->getFlags() & lsfAMMNode) != 0u)) ||
(type == ltACCOUNT_ROOT && after->isFieldPresent(sfAMMID)))
{
ammPoolChanged_ = true;

Some files were not shown because too many files have changed in this diff Show More