Compare commits

...

5 Commits

Author SHA1 Message Date
Michael Legleux
ab3382b116 rm patch 2026-02-11 00:51:41 -08:00
Michael Legleux
6153e2fa11 build image too 2026-02-11 00:47:34 -08:00
Michael Legleux
aab3305abe use currenty generate.py 2026-02-03 15:19:59 -08:00
Michael Legleux
66dbf460b4 feat: Build packages in GitHub 2026-02-03 15:14:52 -08:00
Copilot
7813683091 fix: Deletes expired NFToken offers from ledger (#5707)
This change introduces the `fixExpiredNFTokenOfferRemoval` amendment that allows expired offers to pass through `preclaim()` and be deleted in `doApply()`, following the same pattern used for expired credentials.
2026-02-03 16:37:24 +00:00
41 changed files with 1896 additions and 368 deletions

View File

@@ -17,194 +17,11 @@
"compiler_version": "12",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "ab4d1f0"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "clang",
"compiler_version": "21",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "8",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "9",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "ab4d1f0"
},
{
"distro_name": "rhel",
"distro_version": "10",
"compiler_name": "clang",
"compiler_version": "any",
"image_sha": "ab4d1f0"
},
{
"distro_name": "ubuntu",
"distro_version": "jammy",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "ab4d1f0"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "ab4d1f0"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "ab4d1f0"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "ab4d1f0"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "ab4d1f0"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "ab4d1f0"
},
{
"distro_name": "ubuntu",
"distro_version": "noble",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "ab4d1f0"
"compiler_version": "12"
}
],
"build_type": ["Debug", "Release"],

View File

@@ -1,11 +1,14 @@
# This workflow runs all workflows to check, build and test the project on
# various Linux flavors, as well as on MacOS and Windows, on every push to a
# This workflow runs all workflows to check, build, package and test the project on
# various Linux flavors, as well as on macOS and Windows, on every push to a
# user branch. However, it will not run if the pull request is a draft unless it
# has the 'DraftRunCI' label. For commits to PRs that target a release branch,
# it also uploads the libxrpl recipe to the Conan remote.
name: PR
on:
push:
branches:
- legleux/build
merge_group:
types:
- checks_requested
@@ -65,6 +68,9 @@ jobs:
.github/workflows/reusable-build.yml
.github/workflows/reusable-build-test-config.yml
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-build-pkg.yml
.github/workflows/reusable-pkg.yml
.github/workflows/reusable-package.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.github/workflows/reusable-upload-recipe.yml
@@ -73,6 +79,7 @@ jobs:
conan/**
external/**
include/**
pkgs/**
src/**
tests/**
CMakeLists.txt
@@ -97,68 +104,57 @@ jobs:
outputs:
go: ${{ steps.go.outputs.go == 'true' }}
check-levelization:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-levelization.yml
# check-levelization:
# needs: should-run
# if: ${{ needs.should-run.outputs.go == 'true' }}
# uses: ./.github/workflows/reusable-check-levelization.yml
check-rename:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-rename.yml
# build-test:
# needs: should-run
# if: ${{ needs.should-run.outputs.go == 'true' }}
# uses: ./.github/workflows/reusable-build-test.yml
# strategy:
# matrix:
# os: [linux, macos, windows]
# with:
# os: ${{ matrix.os }}
# secrets:
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
build-test:
build-package:
name: Build ${{ matrix.pkg_type }} ${{ matrix.arch }} packages
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-build-test.yml
uses: ./.github/workflows/reusable-build-pkg.yml
secrets: inherit
strategy:
fail-fast: false
matrix:
os: [linux, macos, windows]
# pkg_type: [rpm]
pkg_type: [deb, rpm]
arch: [amd64]
# arch: [amd64, arm64]
with:
# Enable ccache only for events targeting the XRPLF repository, since
# other accounts will not have access to our remote cache storage.
ccache_enabled: ${{ github.repository_owner == 'XRPLF' }}
os: ${{ matrix.os }}
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
pkg_type: ${{ matrix.pkg_type }}
arch: ${{ matrix.arch }}
upload-recipe:
needs:
- should-run
- build-test
# Only run when committing to a PR that targets a release branch in the
# XRPLF repository.
if: ${{ github.repository_owner == 'XRPLF' && needs.should-run.outputs.go == 'true' && startsWith(github.ref, 'refs/heads/release') }}
uses: ./.github/workflows/reusable-upload-recipe.yml
secrets:
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
notify-clio:
needs: upload-recipe
runs-on: ubuntu-latest
steps:
# Notify the Clio repository about the newly proposed release version, so
# it can be checked for compatibility before the release is actually made.
- name: Notify Clio
env:
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[ref]=${{ needs.upload-recipe.outputs.recipe_ref }}" \
-F "client_payload[pr_url]=${PR_URL}"
# notify-clio:
# needs:
# - should-run
# - build-test
# if: ${{ needs.should-run.outputs.go == 'true' && contains(fromJSON('["release", "master"]'), github.ref_name) }}
# uses: ./.github/workflows/reusable-notify-clio.yml
# secrets:
# clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
# conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
# conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
passed:
if: failure() || cancelled()
needs:
- check-levelization
- check-rename
- build-test
- upload-recipe
- notify-clio
# - build-test
# - check-levelization
- build-package
runs-on: ubuntu-latest
steps:
- name: Fail

97
.github/workflows/on-trigger.new.yml vendored Normal file
View File

@@ -0,0 +1,97 @@
# This workflow runs all workflows to build and test the code on various Linux
# flavors, as well as on MacOS and Windows, on a scheduled basis, on merge into
# the 'develop' or 'release*' branches, or when requested manually. Upon pushes
# to the develop branch it also uploads the libxrpl recipe to the Conan remote.
name: Trigger
on:
push:
branches:
- legleux/linux_packages
- develop
- release
- master
paths:
# These paths are unique to `on-trigger.yml`.
- ".github/workflows/on-trigger.yml"
# Keep the paths below in sync with those in `on-pr.yml`.
- ".github/actions/build-deps/**"
- ".github/actions/build-test/**"
- ".github/actions/generate-version/**"
- ".github/actions/setup-conan/**"
- ".github/scripts/strategy-matrix/**"
- ".github/workflows/reusable-build.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-build-pkg.yml"
- ".github/workflows/reusable-pkg.yml"
- ".github/workflows/reusable-package.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".github/workflows/reusable-upload-recipe.yml"
- ".codecov.yml"
- "cmake/**"
- "conan/**"
- "external/**"
- "include/**"
- "pkgs/**"
- "src/**"
- "tests/**"
- "CMakeLists.txt"
- "conanfile.py"
- "conan.lock"
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
# will force all dependencies to be rebuilt, which is useful to verify that
# all dependencies can be built successfully. Only the dependencies that
# are actually missing from the remote will be uploaded.
schedule:
- cron: "32 6 * * 1-5"
# Run when manually triggered via the GitHub UI or API.
workflow_dispatch:
concurrency:
# When a PR is merged into the develop branch it will be assigned a unique
# group identifier, so execution will continue even if another PR is merged
# while it is still running. In all other cases the group identifier is shared
# per branch, so that any in-progress runs are cancelled when a new commit is
# pushed.
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# check-missing-commits:
# if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
# uses: ./.github/workflows/reusable-check-missing-commits.yml
# build-test:
# uses: ./.github/workflows/reusable-build-test.yml
# strategy:
# matrix:
# os: [linux, macos, windows]
# with:
# os: ${{ matrix.os }}
# strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
# secrets:
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
build-package:
name: Build ${{ matrix.pkg_type }} ${{ matrix.arch }} packages
uses: ./.github/workflows/reusable-build-pkg.yml
secrets: inherit
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
# pkg_type: [rpm]
pkg_type: [deb, rpm]
arch: [amd64]
# arch: [amd64, arm64]
with:
pkg_type: ${{ matrix.pkg_type }}
arch: ${{ matrix.arch }}

View File

@@ -9,6 +9,7 @@ on:
branches:
- "develop"
- "release*"
- "linux_packages_squashed"
paths:
# These paths are unique to `on-trigger.yml`.
- ".github/workflows/on-trigger.yml"
@@ -22,6 +23,9 @@ on:
- ".github/workflows/reusable-build.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-build-pkg.yml"
- ".github/workflows/reusable-pkg.yml"
- ".github/workflows/reusable-package.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".github/workflows/reusable-upload-recipe.yml"
@@ -30,6 +34,7 @@ on:
- "conan/**"
- "external/**"
- "include/**"
- "pkgs/**"
- "src/**"
- "tests/**"
- "CMakeLists.txt"
@@ -78,6 +83,21 @@ jobs:
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
build-package:
name: Build ${{ matrix.pkg_type }} ${{ matrix.arch }} packages
uses: ./.github/workflows/reusable-build-pkg.yml
secrets: inherit
strategy:
fail-fast: ${{ github.event_name == 'merge_group' }}
matrix:
# pkg_type: [rpm]
pkg_type: [deb, rpm]
arch: [amd64]
# arch: [amd64, arm64]
with:
pkg_type: ${{ matrix.pkg_type }}
arch: ${{ matrix.arch }}
upload-recipe:
needs: build-test
# Only run when pushing to the develop branch in the XRPLF repository.

34
.github/workflows/package-test.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: Test rippled
on:
workflow_call:
inputs:
pkg_type:
description: "Whether to run unit tests"
required: true
type: boolean
arch:
description: Runner to run the job on as a JSON string
required: true
type: string
jobs:
test:
name: Test ${{ inputs.pkg_type }}-${{ inputs.arch }}
strategy:
fail-fast: false
matrix:
include:
- { pkg: rpm, distro: "rocky:9" }
- { pkg: deb, distro: "ubuntu:jammy" }
- { pkg: deb, distro: "debian:trixie" }
runs-on: ubuntu-latest
container: ${{ matrix.distro }}
steps:
- name: run unittests
run: |
ls -lh
# - name: Download rippled artifact
# uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
# with:
# name: rippled-${{ inputs.config_name }}

148
.github/workflows/reusable-build-pkg.yml vendored Normal file
View File

@@ -0,0 +1,148 @@
on:
workflow_call:
inputs:
pkg_type:
required: false
type: string
arch:
required: false
type: string
# secrets:
# GPG_KEY_B64:
# description: "The gpg key to sign packages."
# required: true
# GPG_KEY_PASS_B64:
# description: "The gpg key passphrase."
# required: true
defaults:
run:
shell: bash
jobs:
build:
name: Build ${{ inputs.pkg_type }} ${{ inputs.arch }} package
runs-on: heavy${{ inputs.arch == 'arm64' && '-arm64' || '' }}
container: ghcr.io/xrplf/ci/${{ inputs.pkg_type == 'rpm' && 'rhel-9' || 'ubuntu-jammy' }}:gcc-12
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Build packages
run: |
./pkgs/build.sh
cat build_vars >> $GITHUB_STEP_SUMMARY
- uses: actions/upload-artifact@v4
with:
name: ${{ inputs.pkg_type }}-${{ inputs.arch }}
path: |
*.deb
*.ddeb
if-no-files-found: error
if: inputs.pkg_type == 'deb'
- uses: actions/upload-artifact@v4
with:
name: ${{ inputs.pkg_type }}-${{ inputs.arch }}
path: "*${{ inputs.arch }}.${{ inputs.pkg_type }}"
if-no-files-found: error
if: inputs.pkg_type == 'rpm'
test:
name: Test ${{ inputs.pkg_type }} ${{ inputs.arch }} package
needs: build
runs-on: heavy${{ inputs.arch == 'arm64' && '-arm64' || '' }}
container: ghcr.io/xrplf/ci/${{ inputs.pkg_type == 'rpm' && 'rhel-9' || 'ubuntu-jammy' }}:gcc-12
steps:
- uses: actions/download-artifact@v4
with:
name: ${{ inputs.pkg_type }}-${{ inputs.arch }}
- name: Running tests
run: echo "Running tests..."
sign:
name: Sign ${{ inputs.pkg_type }} ${{ inputs.arch }} package
needs: build
runs-on: ubuntu-latest
container: ghcr.io/astral-sh/uv:python3.13-bookworm-slim
steps:
- name: Install gpg & rpm
run: apt-get update && apt-get install -y gpg rpm
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- uses: actions/download-artifact@v4
with:
name: ${{ inputs.pkg_type }}-${{ inputs.arch }}
- name: Sign
env:
PYTHONUNBUFFERED: 1
GPG_KEY_B64: ${{ secrets.GPG_KEY_B64 }}
GPG_KEY_PASS_B64: ${{ secrets.GPG_KEY_PASS_B64 }}
run: |
if [ "${{ inputs.pkg_type }}" = "rpm" ]; then
for i in $(find . -maxdepth 1 -type f -name "rippled-[0-9]*.rpm"); do
echo "found $i"
./pkgs/sign_packages.py "$i"
done
elif [ "${{ inputs.pkg_type }}" = "deb" ]; then
for i in $(find . -maxdepth 1 -type f -name "rippled_*.deb"); do
echo "found $i"
./pkgs/sign_packages.py "$i"
done
fi
- uses: actions/upload-artifact@v4
with:
name: signed-rippled-${{ inputs.pkg_type }}-${{ inputs.arch }}
path: |
*.deb
*.ddeb
if-no-files-found: error
if: inputs.pkg_type == 'deb'
- uses: actions/upload-artifact@v4
with:
name: signed-rippled-${{ inputs.pkg_type }}-${{ inputs.arch }}
path: "*${{ inputs.arch }}.${{ inputs.pkg_type }}"
if-no-files-found: error
if: inputs.pkg_type == 'rpm'
docker:
name: Build Docker image
if: inputs.pkg_type == 'deb'
needs: build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- uses: actions/download-artifact@v4
with:
name: deb-${{ inputs.arch }}
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- uses: docker/metadata-action@v5
id: meta
with:
images: ghcr.io/${{ github.repository_owner }}/rippled
tags: |
type=ref,event=branch
type=ref,event=tag
type=sha
- uses: docker/build-push-action@v6
with:
context: .
file: pkgs/docker/Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

69
.github/workflows/reusable-package.yml vendored Normal file
View File

@@ -0,0 +1,69 @@
name: Package rippled
on:
workflow_call:
inputs:
build_type:
description: 'The build type to use ("Debug", "Release").'
required: false
type: string
default: 'Release'
cmake_args:
description: "Additional arguments to pass to CMake."
required: false
type: string
cmake_target:
description: "The CMake target to build."
required: false
type: string
runs_on:
description: Runner to run the job on as a JSON string
required: true
type: string
image:
description: "The image to run in (leave empty to run natively)"
required: false
type: string
default: ''
config_name:
description: "The name of the configuration."
required: false
type: string
defaults:
run:
shell: bash
jobs:
build:
name: Package ${{ inputs.config_name }}
runs-on: ${{ fromJSON(inputs.runs_on) }}
container: ${{ inputs.image != '' && inputs.image || null }}
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Build packages
run: |
export BUILD_TYPE=${{ inputs.build_type }}
export CMAKE_ARGS=${{ inputs.cmake_args }}
export CMAKE_TARGETS=${{ inputs.cmake_target }}
./pkgs/build.sh
{
echo "<table>"
while IFS='=' read -r k v; do
printf '<tr><td>%s</td><td align="right"><code>%s</code></td></tr>\n' "$k" "$v"
done < build_vars
echo "</table>"
} >> "$GITHUB_STEP_SUMMARY"
- uses: actions/upload-artifact@v4
with:
name: ${{ inputs.config_name }}
path: '**/*.{deb,rpm}'
if-no-files-found: error

41
.github/workflows/reusable-pkg.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: Package
on:
workflow_call:
inputs:
build_dir:
description: "The directory where to build."
required: false
type: string
default: ".build"
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: false
type: string
default: linux
strategy_matrix_subset:
description: 'The strategy matrix to use for generating a subset of configurations.'
required: false
type: string
default: "package"
jobs:
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix_subset: ${{ inputs.strategy_matrix_subset }}
package:
needs:
- generate-matrix
uses: ./.github/workflows/reusable-package.yml
strategy:
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
with:
build_type: ${{ matrix.build_type }}
cmake_args: ${{ matrix.cmake_args }}
cmake_target: ${{ matrix.cmake_target }}
runs_on: ${{ toJSON(matrix.architecture.runner) }}
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-5dd7158', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || '' }}
config_name: ${{ matrix.config_name }}

View File

@@ -13,6 +13,10 @@ on:
required: false
type: string
default: "minimal"
strategy_matrix_subset:
description: 'The strategy matrix to use for generating a subset of configs).'
required: false
type: string
outputs:
matrix:
description: "The generated strategy matrix."
@@ -42,4 +46,5 @@ jobs:
env:
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"
GENERATE_SUBSET: ${{ inputs.strategy_matrix_subset != '' && format('--{0}', inputs.strategy_matrix_subset) || '' }}
run: ./generate.py ${{ env.GENERATE_SUBSET }} ${{ env.GENERATE_OPTION }} ${{ env.GENERATE_CONFIG }} >> "${GITHUB_OUTPUT}"

View File

@@ -16,6 +16,7 @@
// Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order.
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(LendingProtocol, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)

203
pkgs/build.sh Executable file
View File

@@ -0,0 +1,203 @@
#!/usr/bin/env bash
set -o errexit
set -o xtrace
. /etc/os-release
case "$ID $ID_LIKE" in
*rhel*|*fedora*)
# dnf -y module install "nodejs:20/common"
PKG="rpm"
;;
*debian*|*ubuntu*)
# curl -fsSL https://deb.nodesource.com/setup_20.x -o nodesource_setup.sh
# chmod +x nodesource_setup.sh
# ./nodesource_setup.sh
# apt-get install -y nodejs
PKG="deb"
;;
esac
# build_dir="/root/build/${PKG}/packages"
# ./pkgs/build_rippled.${PKG}.sh
# echo "my build_vars" > build_vars
# exit 0
# if [ 1 -eq 0 ]; then
repo_dir=$PWD
set -a
repo_name="rippled"
pkgs_dir="${repo_dir}/pkgs"
shared_files="${pkgs_dir}/shared"
pkg_files="${pkgs_dir}/packaging/${PKG}"
build_info_src="${repo_dir}/src/libxrpl/protocol/BuildInfo.cpp"
xrpl_version=$(grep -E -i -o "\b(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-[0-9a-z\-]+(\.[0-9a-z\-]+)*)?(\+[0-9a-z\-]+(\.[0-9a-z\-]+)*)?\b" "${build_info_src}")
git config --global --add safe.directory '*'
branch=$(git rev-parse --abbrev-ref HEAD)
commit=$(git rev-parse HEAD)
short_commit=$(git rev-parse --short=7 HEAD)
date=$(git show -s --format=%cd --date=format-local:"%Y%m%d%H%M%S")
conan_remote_name="${CONAN_REMOTE_NAME:-xrplf}"
conan_remote_url="${CONAN_REMOTE_URL:-https://conan.ripplex.io}"
BUILD_TYPE=${BUILD_TYPE:-Release}
set +a
if [ "${branch}" = 'develop' ]; then
# TODO: Can remove when CMake sets version
dev_version="${date}~${short_commit}"
xrpl_version="${xrpl_version}+${dev_version}"
fi
if [ "${PKG}" = 'rpm' ]; then
IFS='-' read -r RIPPLED_RPM_VERSION RELEASE <<< "${xrpl_version}"
export RIPPLED_RPM_VERSION
RPM_RELEASE=${RPM_RELEASE-1}
# post-release version
if [ "hf" = "$(echo "$RELEASE" | cut -c -2)" ]; then
RPM_RELEASE="${RPM_RELEASE}.${RELEASE}"
# pre-release version (-b or -rc)
elif [[ $RELEASE ]]; then
RPM_RELEASE="0.${RPM_RELEASE}.${RELEASE}"
fi
export RPM_RELEASE
if [[ $RPM_PATCH ]]; then
RPM_PATCH=".${RPM_PATCH}"
export RPM_PATCH
fi
build_dir="build/${PKG}/packages"
rm -rf ${build_dir}
mkdir -p ${build_dir}/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
git archive \
--remote "${repo_dir}" HEAD \
--prefix ${repo_name}/ \
--format tar.gz \
--output ${build_dir}/rpmbuild/SOURCES/rippled.tar.gz
ln --symbolic "${repo_dir}" ${build_dir}/rippled
cp -r "${pkgs_dir}/packaging/rpm/rippled.spec" ${build_dir}
pushd "${build_dir}" || exit
rpmbuild \
--define "_topdir ${PWD}/rpmbuild" \
--define "_smp_build_ncpus %(nproc --ignore=2 2>/dev/null || echo 1)" \
-ba rippled.spec
RPM_VERSION_RELEASE=$(rpm -qp --qf='%{NAME}-%{VERSION}-%{RELEASE}' ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm)
tar_file=$RPM_VERSION_RELEASE.tar.gz
printf '%s\n' \
"rpm_md5sum=$(rpm -q --queryformat '%{SIGMD5}\n' -p ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm 2>/dev/null)" \
"rpm_sha256=$(sha256sum ./rpmbuild/RPMS/x86_64/rippled-[0-9]*.rpm | awk '{ print $1 }')" \
"rippled_version=${xrpl_version}" \
"rippled_git_hash=${commit}" \
"rpm_version=${RIPPLED_RPM_VERSION}" \
"rpm_file_name=${tar_file}" \
"rpm_version_release=${RPM_VERSION_RELEASE}" \
> build_vars
# Rename the files to match the debs
mv rpmbuild/RPMS/x86_64/* .
for f in *x86_64.rpm; do
new="${f/x86_64/amd64}"
mv "$f" "$new"
echo "Renamed $f -> $new"
done
rm -rf rpmbuild
rm -f rippled rippled.tar.gz rippled.spec
pushd -0 && dirs -c
mv "${build_dir}/build_vars" .
elif [ "${PKG}" = 'deb' ]; then
dpkg_version=$(echo "${xrpl_version}" | sed 's:-:~:g')
full_version="${dpkg_version}"
build_dir="build/${PKG}/packages"
rm -rf ${build_dir}
mkdir -p ${build_dir}
git archive \
--remote "${repo_dir}" HEAD \
--prefix ${repo_name}/ \
--format tar.gz \
--output "${build_dir}/${repo_name}_${dpkg_version}.orig.tar.gz"
pushd ${build_dir} || exit
tar -zxf "${repo_name}_${dpkg_version}.orig.tar.gz"
pushd ${repo_name} || exit
# Prepare the package metadata directory, `debian/`, within `rippled/`.
cp -r "${pkg_files}/debian" .
cp "${shared_files}/rippled.service" debian/
cp "${shared_files}/update-rippled.sh" .
cp "${shared_files}/update-rippled-cron" .
cp "${shared_files}/rippled-logrotate" .
if [ "${branch}" = 'develop' ]; then
# TODO: Can remove when CMake sets version
sed --in-place "s/versionString = \"\([^\"]*\)\"/versionString = \"${xrpl_version}\"/" "${build_info_src}"
fi
cat << CHANGELOG > ./debian/changelog
rippled (${xrpl_version}) unstable; urgency=low
* see RELEASENOTES
-- Ripple Labs Inc. <support@ripple.com> $(TZ=UTC date -R)
CHANGELOG
cat ./debian/changelog
dpkg-buildpackage -b -d -us -uc
popd || exit
rm -rf ${repo_name}
# for f in *.ddeb; do mv -- "$f" "${f%.ddeb}.deb"; done
popd || exit
cp ${build_dir}/${repo_name}_${xrpl_version}_amd64.changes .
awk '/Checksums-Sha256:/{hit=1;next}/Files:/{hit=0}hit' ${repo_name}_${xrpl_version}_amd64.changes | sed -E 's!^[[:space:]]+!!' > shasums
sha() {
<shasums awk "/$1/ { print \$1 }"
}
printf '%s\n' \
"deb_sha256=$(sha "rippled_${full_version}_amd64.deb")" \
"dbg_sha256=$(sha "rippled-dbgsym_${full_version}_amd64")" \
"rippled_version=${xrpl_version}" \
"rippled_git_hash=${commit}" \
"dpkg_version=${dpkg_version}" \
"dpkg_full_version=${full_version}" \
> build_vars
pushd -0 && dirs -c
fi
# fi
# find . -name "*.${PKG}"
# mkdir -p $build_dir
# if [ "${PKG}" = 'rpm' ]; then
# mv /root/rpmbuild/RPMS/x86_64/* .
# for f in *x86_64.rpm; do
# new="${f/x86_64/amd64}"
# mv "$f" "$build_dir/$new"
# echo "Renamed $f -> $new"
# done
# # mv /root/rpmbuild/RPMS/x86_64/* $build_dir/
# else
# echo $PWD
# find / -name "rippled-3.0.0_amd64.deb"
# mv *.deb $build_dir
# fi
# printf '%s\n' \
# "rippled_version=3.0.0" \
# "rippled_git_hash=deadbeef" \
# > build_vars
cp "${build_dir}/"*.deb . 2>/dev/null || true
cp "${build_dir}/"*.ddeb . 2>/dev/null || true
cp "${build_dir}/"*.rpm . 2>/dev/null || true

24
pkgs/docker/Dockerfile Normal file
View File

@@ -0,0 +1,24 @@
FROM ubuntu:jammy
COPY rippled_*_amd64.deb /tmp/
RUN apt-get update && \
apt-get install -y --no-install-recommends /tmp/rippled_*_amd64.deb && \
rm -f /tmp/*.deb && \
rm -rf /var/lib/apt/lists/*
RUN <<EOF
useradd \
--system \
--no-create-home \
--shell /usr/sbin/nologin \
rippled
chown -R \
rippled:rippled \
/var/lib/rippled \
/var/log/rippled \
/opt/ripple
EOF
EXPOSE 51235 6006
USER rippled
ENTRYPOINT ["/opt/ripple/bin/rippled"]

View File

@@ -0,0 +1,3 @@
rippled daemon
-- Mike Ellery <mellery451@gmail.com> Tue, 04 Dec 2018 18:19:03 +0000

View File

@@ -0,0 +1,20 @@
Source: rippled
Section: net
Priority: optional
Maintainer: Ripple Labs Inc. <support@ripple.com>
Build-Depends: cmake,
debhelper (>= 13),
debhelper-compat (= 13)
# debhelper (>= 14),
# debhelper-compat (= 14),
# TODO: Let's go for 14!
Standards-Version: 4.6.0
Homepage: https://github.com/XRPLF/rippled.git
Rules-Requires-Root: no
Package: rippled
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Description: XRP Ledger server (rippled)
rippled is the core server of the XRP Ledger, providing a peer-to-peer
network node for validating and processing transactions.

View File

@@ -0,0 +1,86 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: rippled
Source: https://github.com/ripple/rippled
Files: *
Copyright: 2012-2019 Ripple Labs Inc.
License: __UNKNOWN__
The accompanying files under various copyrights.
Copyright (c) 2012, 2013, 2014 Ripple Labs Inc.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
The accompanying files incorporate work covered by the following copyright
and previous license notice:
Copyright (c) 2011 Arthur Britto, David Schwartz, Jed McCaleb,
Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant
Some code from Raw Material Software, Ltd., provided under the terms of the
ISC License. See the corresponding source files for more details.
Copyright (c) 2013 - Raw Material Software Ltd.
Please visit http://www.juce.com
Some code from ASIO examples:
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
Some code from Bitcoin:
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2011 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file license.txt or http://www.opensource.org/licenses/mit-license.php.
Some code from Tom Wu:
This software is covered under the following copyright:
/*
* Copyright (c) 2003-2005 Tom Wu
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
*
* IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF
* THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* In addition, the following condition applies:
*
* All redistributions must retain an intact copy of this copyright notice
* and disclaimer.
*/
Address all questions regarding this license to:
Tom Wu
tjw@cs.Stanford.EDU

View File

@@ -0,0 +1,3 @@
/var/log/rippled/
/var/lib/rippled/
/etc/systemd/system/rippled.service.d/

View File

@@ -0,0 +1,2 @@
README.md
LICENSE.md

View File

@@ -0,0 +1,3 @@
opt/ripple/include
opt/ripple/lib/*.a
opt/ripple/lib/cmake/*

View File

@@ -0,0 +1,2 @@
/opt/ripple/etc/rippled.cfg
/opt/ripple/etc/validators.txt

View File

@@ -0,0 +1,10 @@
etc/logrotate.d/rippled
opt/ripple/bin/rippled
opt/ripple/bin/update-rippled.sh
opt/ripple/bin/validator-keys
opt/ripple/bin/xrpld
opt/ripple/etc/rippled.cfg
opt/ripple/etc/update-rippled-cron
opt/ripple/etc/validators.txt
opt/ripple/include/*
opt/ripple/lib/*

View File

@@ -0,0 +1,4 @@
opt/ripple/etc/rippled.cfg etc/opt/ripple/rippled.cfg
opt/ripple/etc/validators.txt etc/opt/ripple/validators.txt
opt/ripple/bin/rippled usr/local/bin/rippled
opt/ripple/bin/rippled opt/ripple/bin/xrpld

View File

@@ -0,0 +1,39 @@
#!/bin/sh
set -e
USER_NAME=rippled
GROUP_NAME=rippled
case "$1" in
configure)
id -u $USER_NAME >/dev/null 2>&1 || \
useradd --system \
--home-dir /nonexistent \
--no-create-home \
--shell /usr/sbin/nologin \
--comment "system user for rippled" \
--user-group \
${USER_NAME}
chown -R $USER_NAME:$GROUP_NAME /var/log/rippled/
chown -R $USER_NAME:$GROUP_NAME /var/lib/rippled/
chown -R $USER_NAME:$GROUP_NAME /opt/ripple
chmod 755 /var/log/rippled/
chmod 755 /var/lib/rippled/
chmod 644 /opt/ripple/etc/update-rippled-cron
chmod 644 /etc/logrotate.d/rippled
chown -R root:$GROUP_NAME /opt/ripple/etc/update-rippled-cron
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

View File

@@ -0,0 +1,17 @@
#!/bin/sh
set -e
case "$1" in
purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

View File

@@ -0,0 +1,20 @@
#!/bin/sh
set -e
case "$1" in
install|upgrade)
;;
abort-upgrade)
;;
*)
echo "preinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

View File

@@ -0,0 +1,20 @@
#!/bin/sh
set -e
case "$1" in
remove|upgrade|deconfigure)
;;
failed-upgrade)
;;
*)
echo "prerm called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

88
pkgs/packaging/deb/debian/rules Executable file
View File

@@ -0,0 +1,88 @@
#!/usr/bin/make -f
export DH_VERBOSE = 1
export DH_OPTIONS = -v
## TODO: Confirm these are still required.
# debuild sets some warnings that don't work well
# for our curent build..so try to remove those flags here:
export CFLAGS:=$(subst -Wformat,,$(CFLAGS))
export CFLAGS:=$(subst -Werror=format-security,,$(CFLAGS))
export CXXFLAGS:=$(subst -Wformat,,$(CXXFLAGS))
export CXXFLAGS:=$(subst -Werror=format-security,,$(CXXFLAGS))
## TODO: Confirm these are still required.
export DEB_BUILD_MAINT_OPTIONS = hardening=+all
export DEB_BUILD_OPTIONS = nodwz
export RIPPLE_REMOTE = xrplf
export RIPPLE_REMOTE_URL = https://conan.ripplex.io
# ## CMake Configure args
# export DEB_CMAKE_GENERATOR = Ninja
export DEB_CMAKE_BUILD_TYPE = RelWithDebInfo
NPROC := $(shell nproc --ignore=2)
export DEB_BUILD_OPTIONS += parallel=$(NPROC)
CONAN_HOME := $(shell conan config home)
CONAN_PROFILE := $(shell conan profile path default)
CONAN_GCONF := $(CONAN_HOME)/global.conf
INSTALL_PREFIX := "/opt/ripple"
BUILD_DIR := obj-$(DEB_BUILD_GNU_TYPE)
.ONESHELL:
SHELL := /bin/bash
%:
dh $@ --buildsystem=cmake
override_dh_installsystemd:
dh_installsystemd --no-start
override_dh_auto_configure:
conan remote add --index 0 $(RIPPLE_REMOTE) $(RIPPLE_REMOTE_URL) --force
conan config install ./conan/profiles/default --target-folder $(CONAN_HOME)/profiles
echo "tools.build:jobs={{ os.cpu_count() - 2 }}" >> ${CONAN_HOME}/global.conf
echo "core.download:parallel={{ os.cpu_count() }}" >> $(CONAN_GCONF)
conan install . \
--settings:all build_type=$(DEB_CMAKE_BUILD_TYPE) \
--output-folder=$(BUILD_DIR) \
--options:host "&:xrpld=True" \
--options:host "&:tests=True" \
--build=missing
# Debian assumes an offline build process and sets CMake's FETCHCONTENT_FULLY_DISCONNECTED variable to ON
# To use as much as the default settings as possible we'll clone it where CMake's FetchContent expects it.
mkdir -p "$(BUILD_DIR)/_deps"
git clone https://github.com/ripple/validator-keys-tool.git "$(BUILD_DIR)/_deps/validator_keys-src"
dh_auto_configure --builddirectory=$(BUILD_DIR) -- \
-DCMAKE_BUILD_TYPE:STRING=$(DEB_CMAKE_BUILD_TYPE) \
-Dxrpld=ON -Dtests=ON -Dvalidator_keys=ON \
-DCMAKE_INSTALL_PREFIX:PATH=$(INSTALL_PREFIX) \
-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=$(BUILD_DIR)/build/generators/conan_toolchain.cmake
override_dh_auto_build:
dh_auto_build \
--builddirectory=$(BUILD_DIR) -- rippled validator-keys
# cmake \
# --build $(BUILD_DIR) \
# --target rippled \
# --target validator-keys \
# --parallel $(NPROC)
override_dh_auto_install:
cmake --install $(BUILD_DIR) --prefix debian/tmp/opt/ripple
# install -D $(BUILD_DIR)/_deps/validator_keys_src-build/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys
install -D $(BUILD_DIR)/_deps/validator_keys_src-build/validator-keys debian/tmp/opt/ripple/bin/validator-keys
install -D update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh
install -D update-rippled-cron debian/tmp/opt/ripple/etc/update-rippled-cron
install -D rippled-logrotate debian/tmp/etc/logrotate.d/rippled
rm -rf debian/tmp/opt/ripple/lib64/cmake/date
override_dh_dwz:
@echo "Skipping DWZ due to huge debug info"

View File

@@ -0,0 +1,84 @@
#!/usr/bin/make -f
export DH_VERBOSE = 1
export DH_OPTIONS = -v
# debuild sets some warnings that don't work well
# for our curent build..so try to remove those flags here:
export CFLAGS:=$(subst -Wformat,,$(CFLAGS))
export CFLAGS:=$(subst -Werror=format-security,,$(CFLAGS))
export CXXFLAGS:=$(subst -Wformat,,$(CXXFLAGS))
export CXXFLAGS:=$(subst -Werror=format-security,,$(CXXFLAGS))
export DEB_BUILD_MAINT_OPTIONS = hardening=+all
export DEB_BUILD_OPTIONS = nodwz
export RIPPLE_REMOTE="xrplf"
export RIPPLE_REMOTE_URL="https://conan.ripplex.io"
export CONAN_HOME := $(shell conan config home)
export CONAN_PROFILE := $(shell conan profile path default)
export DEB_CMAKE_GENERATOR = Ninja
export DEB_CMAKE_BUILD_TYPE = Release
export DEB_CMAKE_EXTRA_FLAGS = -Dvalidator_keys=ON -Dtests=ON -Dxrpld=ON -DCMAKE_TOOLCHAIN_FILE=build/generators/conan_toolchain.cmake
.ONESHELL:
SHELL := /bin/bash
NPROC := $(shell expr $(shell nproc) - 2)
BUILD_DIR := build.rippled
VKT_PATH := $(BUILD_DIR)/vkt
%:
dh $@ --buildsystem=cmake
override_dh_installsystemd:
dh_installsystemd --no-start
override_dh_auto_configure:
dpkg-buildflags --get CFLAGS
dpkg-buildflags --get LDFLAGS
dpkg-buildflags --status
conan remote add --index 0 $(RIPPLE_REMOTE) $(RIPPLE_REMOTE_URL) --force
sed -i "s/gnu17/20/" $(CONAN_PROFILE)
git clone https://github.com/ripple/validator-keys-tool.git $(VKT_PATH)
conan install . --options:a "&:xrpld=True" --options:a "&:tests=True" --build "missing"
dh_auto_configure --builddirectory=$(BUILD_DIR) -- \
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-Dvalidator_keys=ON \
-Dtests=ON \
-Dxrpld=ON \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake
# -DFETCHCONTENT_FULLY_DISCONNECTED=OFF \
# -DFETCHCONTENT_SOURCE_DIR_VALIDATOR_KEYS_SRC=$(VKT_PATH) \
# dh_auto_configure --builddirectory=$(BUILD_DIR)
override_dh_auto_build:
cmake --build . --target rippled --target validator-keys --parallel 30
#manually run:
FETCHCONTENT_BASE_DIR:PATH=/home/emel/dev/Ripple/rippled/rippled/github_linux_packages/build/dpkg/packages/rippled/build.rippled/_deps
FETCHCONTENT_FULLY_DISCONNECTED:BOOL=OFF
FETCHCONTENT_QUIET:BOOL=ON
FETCHCONTENT_SOURCE_DIR_VALIDATOR_KEYS_SRC:PATH=
FETCHCONTENT_UPDATES_DISCONNECTED:BOOL=OFF
FETCHCONTENT_UPDATES_DISCONNECTED_VALIDATOR_KEYS_SRC:BOOL=OFF
override_dh_auto_install:
cmake --install $(BUILD_DIR) --prefix debian/tmp/opt/ripple
install -D $(BUILD_DIR)/validator-keys/validator-keys debian/tmp/opt/ripple/bin/validator-keys
install -D bin/getRippledInfo debian/tmp/opt/ripple/bin/getRippledInfo
install -D update-rippled.sh debian/tmp/opt/ripple/bin/update-rippled.sh
install -D update-rippled-cron debian/tmp/opt/ripple/etc/update-rippled-cron
install -D rippled-logrotate debian/tmp/etc/logrotate.d/rippled
rm -rf debian/tmp/opt/ripple/lib64/cmake/date
override_dh_dwz:
@echo "Skipping DWZ due to huge debug info"

View File

@@ -0,0 +1 @@
3.0 (native)

View File

@@ -0,0 +1,2 @@
#abort-on-upstream-changes
#unapply-patches

View File

@@ -0,0 +1 @@
enable rippled.service

View File

@@ -0,0 +1,170 @@
%global pkg_name %{getenv:repo_name}
%global branch %{getenv:branch}
%global commit %{getenv:commit}
%global shortcommit %{getenv:shortcommit}
%global date %{getenv:commit_date}
%global conan_remote_name %{getenv:conan_remote_name}
%global conan_remote_url %{getenv:conan_remote_url}
%global shared_files %{getenv:shared_files}
%global pkg_files %{getenv:pkg_files}
%global build_type %{getenv:BUILD_TYPE}
%global _prefix /opt/ripple
%global srcdir %{_builddir}/rippled
%global blddir %{srcdir}/bld.rippled
%global xrpl_version %{getenv:xrpl_version}
%global ver_base %(v=%{xrpl_version}; echo ${v%%-*})
%global _has_dash %(v=%{xrpl_version}; [ "${v#*-}" != "$v" ] && echo 1 || echo 0)
%if 0%{?_has_dash}
%global ver_suffix %(v=%{xrpl_version}; printf %s "${v#*-}")
%endif
Name: %{pkg_name}
Version: %{ver_base}
Release: %{?ver_suffix:0.%{ver_suffix}}%{!?ver_suffix:1}%{?dist}
Summary: %{name} XRPL daemon
License: ISC
URL: https://github.com/XRPLF/rippled
Source0: rippled.tar.gz
%{warn:name=%{name}}
%{warn:version=%{version}}
%{warn:ver_base=%{ver_base}}
%{warn:ver_suffix=%{ver_suffix}}
%{warn:release=%{release}}
%{warn:FullReleaseVersion=%{name}-%{version}-%{release}.%{_arch}.rpm}
%description
%{name} with p2p server for the XRP Ledger.
%prep
%autosetup -p1 -n %{name}
# TODO: Remove when version set with CMake.
if [ %{branch} == 'develop' ]; then
sed --in-place "s/versionString = \"\([^\"]*\)\"/versionString = \"\1+%{ver_input}\"/" src/libxrpl/protocol/BuildInfo.cpp
fi
%build
conan remote add --index 0 %{conan_remote_name} %{conan_remote_url} --force
conan config install conan/profiles/default --target-folder $(conan config home)/profiles/
echo "tools.build:jobs={{ os.cpu_count() - 2 }}" >> ${CONAN_HOME}/global.conf
echo "core.download:parallel={{ os.cpu_count() }}" >> ${CONAN_HOME}/global.conf
conan install %{srcdir} \
--settings:all build_type=%{build_type} \
--output-folder %{srcdir}/conan_deps \
--options:host "&:xrpld=True" \
--options:host "&:tests=True" \
--build=missing
cmake \
-S %{srcdir} \
-B %{blddir} \
-Dxrpld=ON \
-Dvalidator_keys=ON \
-Dtests=ON \
-DCMAKE_BUILD_TYPE:STRING=%{build_type} \
-DCMAKE_INSTALL_PREFIX:PATH=%{_prefix} \
-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=%{srcdir}/conan_deps/build/generators/conan_toolchain.cmake
cmake \
--build %{blddir} \
--parallel %{_smp_build_ncpus} \
--target rippled \
--target validator-keys
%install
rm -rf %{buildroot}
DESTDIR=%{buildroot} cmake --install %{blddir}
install -Dm0755 %{shared_files}/update-rippled.sh %{buildroot}%{_bindir}/update-rippled.sh
ln -s rippled %{buildroot}%{_bindir}/xrpld
ln -s update-rippled.sh %{buildroot}%{_bindir}/update-xrpld.sh
# configs
install -Dm0644 %{srcdir}/cfg/rippled-example.cfg %{buildroot}%{_prefix}/etc/rippled.cfg
install -Dm0644 %{srcdir}/cfg/validators-example.txt %{buildroot}%{_prefix}/etc/validators.txt
mkdir -p %{buildroot}%{_sysconfdir}/opt/ripple
#/etc points to /opt
ln -s ../../../opt/ripple/rippled.cfg %{buildroot}%{_sysconfdir}/opt/ripple/xrpld.cfg
ln -s ../../../opt/ripple/etc/rippled.cfg %{buildroot}%{_sysconfdir}/opt/ripple/rippled.cfg
ln -s ../../../opt/ripple/etc/validators.txt %{buildroot}%{_sysconfdir}/opt/ripple/validators.txt
# systemd/sysusers/tmpfiles
install -Dm0644 %{shared_files}/rippled.service %{buildroot}%{_unitdir}/rippled.service
install -Dm0644 %{pkg_files}/rippled.sysusers %{buildroot}%{_sysusersdir}/rippled.conf
install -Dm0644 %{pkg_files}/rippled.tmpfiles %{buildroot}%{_tmpfilesdir}/rippled.conf
%files
%license LICENSE*
%doc README*
# Files/dirs the pkgs owns
%dir %{_prefix}
%dir %{_prefix}/bin
%dir %{_prefix}/etc
%if 0
%dir %{_sysconfdir}/opt # Add this if rpmlint cries.
%endif
%dir %{_sysconfdir}/opt/ripple
# Binaries and symlinks under our (non-standard) _prefix (/opt/ripple)
%{_bindir}/rippled
%{_bindir}/xrpld
%{_bindir}/update-rippled.sh
%{_bindir}/update-xrpld.sh
%{_bindir}/validator-keys
# We won't ship these but we'll create them.
%ghost /usr/local/bin/rippled
%ghost /usr/local/bin/xrpld
%config(noreplace) %{_prefix}/etc/rippled.cfg
%config(noreplace) %{_prefix}/etc/validators.txt
%config(noreplace) %{_sysconfdir}/opt/ripple/rippled.cfg
%config(noreplace) %{_sysconfdir}/opt/ripple/xrpld.cfg
%config(noreplace) %{_sysconfdir}/opt/ripple/validators.txt
# systemd and service user creation
%{_unitdir}/rippled.service
%{_sysusersdir}/rippled.conf
%{_tmpfilesdir}/rippled.conf
# Let tmpfiles create the db and log dirs
%ghost %dir /var/opt/ripple
%ghost %dir /var/opt/ripple/lib
%ghost %dir /var/opt/ripple/log
# TODO: Fix the CMake install() calls so we don't need to exclude these.
%exclude %{_prefix}/include/*
%exclude %{_prefix}/lib/*
%exclude %{_prefix}/lib/pkgconfig/*
%exclude /usr/lib/debug/**
%post
# Add a link to $PATH /usr/local/bin/rippled %{_bindir}/rippled (also non-standard)
mkdir -p /usr/local/bin
for i in rippled xrpld
do
if [ ! -e /usr/local/bin/${i} ]; then
ln -s %{_bindir}/${i} /usr/local/bin/${i}
elif [ -L /usr/local/bin/${i} ] && \
[ "$(readlink -f /usr/local/bin/${i})" != "%{_bindir}/${i}" ]; then
ln -sfn %{_bindir}/${i} /usr/local/bin/${i}
fi
done
%preun
# remove the link only if it points to us (on erase, $1 == 0)
for i in rippled xrpld
do
if [ "$1" -eq 0 ] && [ -L /usr/local/bin/${i} ] && \
[ "$(readlink -f /usr/local/bin/${i})" = "%{_bindir}/${i}" ]; then
rm -f /usr/local/bin/${i}
fi
done

View File

@@ -0,0 +1,2 @@
u rippled - "System user for rippled service"
g rippled - -

View File

@@ -0,0 +1,2 @@
d /var/opt/ripple/lib 0750 rippled rippled -
d /var/opt/ripple/log 0750 rippled adm -

Binary file not shown.

View File

@@ -0,0 +1,15 @@
/var/log/rippled/*.log {
daily
minsize 200M
rotate 7
nocreate
missingok
notifempty
compress
compresscmd /usr/bin/nice
compressoptions -n19 ionice -c3 gzip
compressext .gz
postrotate
/opt/ripple/bin/rippled --conf /opt/ripple/etc/rippled.cfg logrotate
endscript
}

View File

@@ -0,0 +1,15 @@
[Unit]
Description=Ripple Daemon
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/opt/ripple/bin/rippled --net --silent --conf /etc/opt/ripple/rippled.cfg
Restart=on-failure
User=rippled
Group=rippled
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,10 @@
# For automatic updates, symlink this file to /etc/cron.d/
# Do not remove the newline at the end of this cron script
# bash required for use of RANDOM below.
SHELL=/bin/bash
PATH=/sbin;/bin;/usr/sbin;/usr/bin
# invoke check/update script with random delay up to 59 mins
0 * * * * root sleep $((RANDOM*3540/32768)) && /opt/ripple/bin/update-rippled.sh

65
pkgs/shared/update-rippled.sh Executable file
View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# auto-update script for rippled daemon
# Check for sudo/root permissions
if [[ $(id -u) -ne 0 ]] ; then
echo "This update script must be run as root or sudo"
exit 1
fi
LOCKDIR=/tmp/rippleupdate.lock
UPDATELOG=/var/log/rippled/update.log
function cleanup {
# If this directory isn't removed, future updates will fail.
rmdir $LOCKDIR
}
# Use mkdir to check if process is already running. mkdir is atomic, as against file create.
if ! mkdir $LOCKDIR 2>/dev/null; then
echo $(date -u) "lockdir exists - won't proceed." >> $UPDATELOG
exit 1
fi
trap cleanup EXIT
source /etc/os-release
can_update=false
if [[ "$ID" == "ubuntu" || "$ID" == "debian" ]] ; then
# Silent update
apt-get update -qq
# The next line is an "awk"ward way to check if the package needs to be updated.
RIPPLE=$(apt-get install -s --only-upgrade rippled | awk '/^Inst/ { print $2 }')
test "$RIPPLE" == "rippled" && can_update=true
function apply_update {
apt-get install rippled -qq
}
elif [[ "$ID" == "fedora" || "$ID" == "centos" || "$ID" == "rhel" || "$ID" == "scientific" ]] ; then
RIPPLE_REPO=${RIPPLE_REPO-stable}
yum --disablerepo=* --enablerepo=ripple-$RIPPLE_REPO clean expire-cache
yum check-update -q --enablerepo=ripple-$RIPPLE_REPO rippled || can_update=true
function apply_update {
yum update -y --enablerepo=ripple-$RIPPLE_REPO rippled
}
else
echo "unrecognized distro!"
exit 1
fi
# Do the actual update and restart the service after reloading systemctl daemon.
if [ "$can_update" = true ] ; then
exec 3>&1 1>>${UPDATELOG} 2>&1
set -e
apply_update
systemctl daemon-reload
systemctl restart rippled.service
echo $(date -u) "rippled daemon updated."
else
echo $(date -u) "no updates available" >> $UPDATELOG
fi

194
pkgs/sign_packages.py Executable file
View File

@@ -0,0 +1,194 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "python-gnupg",
# ]
# ///
import argparse
import base64
import gnupg
import os
import re
import shutil
import subprocess
import sys
import tempfile
from dataclasses import dataclass
from pathlib import Path
@dataclass(slots=True)
class SignCfg:
gnupghome: Path
fingerprint: str
passphrase: str
def set_tty():
try:
tty = subprocess.check_output(["tty"], text=True, stderr=subprocess.DEVNULL).strip()
os.environ["GPG_TTY"] = tty
# print(f"GPG_TTY set to {tty}")
except subprocess.CalledProcessError:
print("No TTY detected. Skipping setting GPG_TTY.")
def make_cfg(passphrase: str, armored_private_key: str) -> SignCfg:
ghome = Path(tempfile.mkdtemp())
ghome.chmod(0o700)
gpg = gnupg.GPG(gnupghome=str(ghome))
imp = gpg.import_keys(armored_private_key)
fp = imp.fingerprints[0]
return SignCfg(gnupghome=ghome, fingerprint=fp, passphrase=passphrase)
def import_pubkey_into_rpmdb(gnupghome: Path, fingerprint: str, rpmdb: Path):
env = {**os.environ, "GNUPGHOME": str(gnupghome)}
cp = subprocess.run(
["gpg", "--batch", "--yes", "--armor", "--export", fingerprint],
env=env, text=True, capture_output=True, check=True,
)
pub = rpmdb / "pubkey.asc"
pub.write_text(cp.stdout)
rpmdb.mkdir(parents=True, exist_ok=True)
subprocess.run(["rpm", "--dbpath", str(rpmdb), "--import", str(pub)], check=True)
def sign_rpm(pkg: Path, cfg: SignCfg) -> subprocess.CompletedProcess:
fd, pfile = tempfile.mkstemp(text=True)
os.write(fd, cfg.passphrase.rstrip("\r\n").encode()); os.close(fd); os.chmod(pfile, 0o600)
rpm_sign_cmd = [
"rpm",
"--define", "%__gpg /usr/bin/gpg",
"--define", "_signature gpg",
"--define", f"_gpg_name {cfg.fingerprint}",
"--define", f"_gpg_path {cfg.gnupghome}",
"--define", f"_gpg_passfile {pfile}",
"--define", "__gpg_check_password_cmd /bin/true",
"--define",
"__gpg_sign_cmd %{__gpg} --batch --no-tty --no-armor "
"--digest-algo sha512 --pinentry-mode loopback "
"--passphrase-file %{_gpg_passfile} "
"-u '%{_gpg_name}' --sign --detach-sign "
"--output %{__signature_filename} %{__plaintext_filename}",
"--addsign", str(pkg),
]
return subprocess.run(
rpm_sign_cmd,
text=True,
check=False,
capture_output=True,
)
def sign_deb(pkg: Path, cfg: SignCfg) -> subprocess.CompletedProcess:
sig = pkg.with_suffix(pkg.suffix + ".asc")
env = {**os.environ, "GNUPGHOME": str(cfg.gnupghome)}
return subprocess.run(
[
"gpg",
"--batch", "--yes", "--armor",
"--pinentry-mode", "loopback",
"--local-user", cfg.fingerprint,
"--passphrase", cfg.passphrase,
"--output", str(sig),
"--detach-sign", str(pkg),
],
env=env, check=False, capture_output=True, text=True,
)
def sign_package(pkg: Path, cfg: SignCfg) -> subprocess.CompletedProcess:
if pkg.suffix == ".rpm":
return sign_rpm(pkg, cfg)
if pkg.suffix == ".deb":
return sign_deb(pkg, cfg)
raise ValueError(f"unsupported package type: {pkg}")
def verify_signature(pkg: Path, *, gnupghome: Path, expected_fp: str):
print(f"Verifying {pkg.resolve()}")
suf = pkg.suffix.lower()
if suf == ".rpm":
return verify_rpm_signature(pkg, gnupghome=gnupghome, expected_fp=expected_fp)
elif suf == ".deb":
return verify_deb_signature(pkg, gnupghome=gnupghome, expected_fp=expected_fp)
else:
raise ValueError(f"unsupported package type: {pkg}")
def verify_deb_signature(pkg: Path, gnupghome: Path, expected_fp: str) -> None:
pkg = Path(pkg)
sig = pkg.with_suffix(pkg.suffix + ".asc")
env = {**os.environ, "GNUPGHOME": str(gnupghome)}
VALIDSIG_RE = re.compile(r"\[GNUPG:\]\s+VALIDSIG\s+([0-9A-Fa-f]{40})")
verify_cmd = ["gpg", "--batch", "--status-fd", "1", "--verify", str(sig), str(pkg)]
result = subprocess.run(verify_cmd, env=env, text=True, capture_output=True)
if result.returncode != 0:
print(result.stderr or result.stdout)
sys.exit(result.returncode)
m = VALIDSIG_RE.search(result.stdout)
if not m or m.group(1).upper() != expected_fp.upper():
print(f"Signature invalid or wrong signer. Expected {expected_fp}")
sys.exit(result.returncode)
print("********* deb signature verification *********")
print(f"✅ Signature verified for {pkg.name} ({m.group(1)})")
def verify_rpm_signature(pkg: Path, *, gnupghome: Path, expected_fp: str):
env = {**os.environ, "GNUPGHOME": str(gnupghome)}
export_cmd = ["gpg", "--batch", "--yes", "--armor", "--export", expected_fp]
cp = subprocess.run(export_cmd, env=env, text=True, capture_output=True, check=True)
rpmdb = Path(tempfile.mkdtemp())
try:
pub = rpmdb / "pubkey.asc"
pub.write_text(cp.stdout)
# rpm needs the rpmdb for verification
subprocess.run(["rpm", "--dbpath", str(rpmdb), "--import", str(pub)], check=True)
verify_cmd = ["rpm", "--dbpath", str(rpmdb), "-Kv", str(pkg)]
result = subprocess.run(verify_cmd, text=True, capture_output=True)
if result.returncode != 0:
print(result.stdout or result.stderr)
sys.exit(result.returncode)
print("********* rpm signature verification *********")
print(result.stdout)
print(f"✅ Signature verified for {pkg.name}")
return True
finally:
try:
for p in rpmdb.iterdir(): p.unlink()
rpmdb.rmdir()
except Exception:
pass
def main():
set_tty()
GPG_KEY_B64 = os.environ["GPG_KEY_B64"]
GPG_KEY_PASS_B64 = os.environ["GPG_KEY_PASS_B64"]
gpg_passphrase = base64.b64decode(GPG_KEY_PASS_B64).decode("utf-8").strip()
gpg_key = base64.b64decode(GPG_KEY_B64).decode("utf-8").strip()
parser = argparse.ArgumentParser()
parser.add_argument("package")
args = parser.parse_args()
cfg = make_cfg(passphrase=gpg_passphrase, armored_private_key=gpg_key)
try:
pkg = Path(args.package)
res = sign_package(pkg, cfg)
if res.returncode:
print(res.stderr.strip() or res.stdout.strip())
raise sys.exit(res.returncode)
verify_signature(pkg, gnupghome=cfg.gnupghome, expected_fp=cfg.fingerprint)
finally:
shutil.rmtree(cfg.gnupghome, ignore_errors=True)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -876,42 +876,48 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.fund(XRP(1000), alice, buyer, gw);
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
uint256 const nftAlice0ID = token::getNextID(env, alice, 0, tfTransferable);
env(token::mint(alice, 0u), txflags(tfTransferable));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 1);
uint8_t aliceCount = 1;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const nftXrpOnlyID = token::getNextID(env, alice, 0, tfOnlyXRP | tfTransferable);
env(token::mint(alice, 0), txflags(tfOnlyXRP | tfTransferable));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 1);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 nftNoXferID = token::getNextID(env, alice, 0);
env(token::mint(alice, 0));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 1);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// alice creates sell offers for her nfts.
uint256 const plainOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftAlice0ID, XRP(10)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 2);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const audOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftAlice0ID, gwAUD(30)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 3);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const xrpOnlyOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftXrpOnlyID, XRP(20)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 4);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const noXferOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftNoXferID, XRP(30)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 5);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// alice creates a sell offer that will expire soon.
uint256 const aliceExpOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
@@ -919,7 +925,17 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
txflags(tfSellNFToken),
token::expiration(lastClose(env) + 5));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 6);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// buyer creates a Buy offer that will expire soon.
uint256 const buyerExpOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, XRP(40)),
token::owner(alice),
token::expiration(lastClose(env) + 5));
env.close();
uint8_t buyerCount = 1;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
//----------------------------------------------------------------------
// preflight
@@ -927,12 +943,12 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
// Set a negative fee.
env(token::acceptSellOffer(buyer, noXferOfferIndex), fee(STAmount(10ull, true)), ter(temBAD_FEE));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Set an invalid flag.
env(token::acceptSellOffer(buyer, noXferOfferIndex), txflags(0x00008000), ter(temINVALID_FLAG));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Supply nether an sfNFTokenBuyOffer nor an sfNFTokenSellOffer field.
{
@@ -940,7 +956,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
jv.removeMember(sfNFTokenSellOffer.jsonName);
env(jv, ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// A buy offer may not contain a sfNFTokenBrokerFee field.
@@ -949,7 +965,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
jv[sfNFTokenBrokerFee.jsonName] = STAmount(500000).getJson(JsonOptions::none);
env(jv, ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// A sell offer may not contain a sfNFTokenBrokerFee field.
@@ -958,7 +974,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
jv[sfNFTokenBrokerFee.jsonName] = STAmount(500000).getJson(JsonOptions::none);
env(jv, ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// A brokered offer may not contain a negative or zero brokerFee.
@@ -966,7 +982,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
token::brokerFee(gwAUD(0)),
ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
//----------------------------------------------------------------------
// preclaim
@@ -974,33 +990,48 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
// The buy offer must be non-zero.
env(token::acceptBuyOffer(buyer, beast::zero), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The buy offer must be present in the ledger.
uint256 const missingOfferIndex = keylet::nftoffer(alice, 1).key;
env(token::acceptBuyOffer(buyer, missingOfferIndex), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The buy offer must not have expired.
env(token::acceptBuyOffer(buyer, aliceExpOfferIndex), ter(tecEXPIRED));
// NOTE: this is only a preclaim check with the
// fixExpiredNFTokenOfferRemoval amendment disabled.
env(token::acceptBuyOffer(alice, buyerExpOfferIndex), ter(tecEXPIRED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
if (features[fixExpiredNFTokenOfferRemoval])
{
buyerCount--;
}
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The sell offer must be non-zero.
env(token::acceptSellOffer(buyer, beast::zero), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The sell offer must be present in the ledger.
env(token::acceptSellOffer(buyer, missingOfferIndex), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The sell offer must not have expired.
// NOTE: this is only a preclaim check with the
// fixExpiredNFTokenOfferRemoval amendment disabled.
env(token::acceptSellOffer(buyer, aliceExpOfferIndex), ter(tecEXPIRED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
// Alice's count is decremented by one when the expired offer is
// removed.
if (features[fixExpiredNFTokenOfferRemoval])
{
aliceCount--;
}
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
//----------------------------------------------------------------------
// preclaim brokered
@@ -1012,8 +1043,13 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(pay(gw, buyer, gwAUD(30)));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
aliceCount++;
buyerCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// We're about to exercise offer brokering, so we need
// corresponding buy and sell offers.
@@ -1022,35 +1058,38 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, gwAUD(29)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// gw attempts to broker offers that are not for the same token.
env(token::brokerOffers(gw, buyerOfferIndex, xrpOnlyOfferIndex), ter(tecNFTOKEN_BUY_SELL_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// gw attempts to broker offers that are not for the same currency.
env(token::brokerOffers(gw, buyerOfferIndex, plainOfferIndex), ter(tecNFTOKEN_BUY_SELL_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// In a brokered offer, the buyer must offer greater than or
// equal to the selling price.
env(token::brokerOffers(gw, buyerOfferIndex, audOfferIndex), ter(tecINSUFFICIENT_PAYMENT));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Remove buyer's offer.
env(token::cancelOffer(buyer, {buyerOfferIndex}));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 1);
buyerCount--;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
{
// buyer creates a buy offer for one of alice's nfts.
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, gwAUD(31)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Broker sets their fee in a denomination other than the one
// used by the offers
@@ -1058,14 +1097,14 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
token::brokerFee(XRP(40)),
ter(tecNFTOKEN_BUY_SELL_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Broker fee way too big.
env(token::brokerOffers(gw, buyerOfferIndex, audOfferIndex),
token::brokerFee(gwAUD(31)),
ter(tecINSUFFICIENT_PAYMENT));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Broker fee is smaller, but still too big once the offer
// seller's minimum is taken into account.
@@ -1073,12 +1112,13 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
token::brokerFee(gwAUD(1.5)),
ter(tecINSUFFICIENT_PAYMENT));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Remove buyer's offer.
env(token::cancelOffer(buyer, {buyerOfferIndex}));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 1);
buyerCount--;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
//----------------------------------------------------------------------
// preclaim buy
@@ -1087,17 +1127,18 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, gwAUD(30)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Don't accept a buy offer if the sell flag is set.
env(token::acceptBuyOffer(buyer, plainOfferIndex), ter(tecNFTOKEN_OFFER_TYPE_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// An account can't accept its own offer.
env(token::acceptBuyOffer(buyer, buyerOfferIndex), ter(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An offer acceptor must have enough funds to pay for the offer.
env(pay(buyer, gw, gwAUD(30)));
@@ -1105,7 +1146,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
BEAST_EXPECT(env.balance(buyer, gwAUD) == gwAUD(0));
env(token::acceptBuyOffer(alice, buyerOfferIndex), ter(tecINSUFFICIENT_FUNDS));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// alice gives her NFT to gw, so alice no longer owns nftAlice0.
{
@@ -1114,7 +1155,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(gw, offerIndex));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
}
env(pay(gw, buyer, gwAUD(30)));
env.close();
@@ -1122,12 +1163,13 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
// alice can't accept a buy offer for an NFT she no longer owns.
env(token::acceptBuyOffer(alice, buyerOfferIndex), ter(tecNO_PERMISSION));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Remove buyer's offer.
env(token::cancelOffer(buyer, {buyerOfferIndex}));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 1);
buyerCount--;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
//----------------------------------------------------------------------
// preclaim sell
@@ -1136,23 +1178,24 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftXrpOnlyID, XRP(30)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Don't accept a sell offer without the sell flag set.
env(token::acceptSellOffer(alice, buyerOfferIndex), ter(tecNFTOKEN_OFFER_TYPE_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// An account can't accept its own offer.
env(token::acceptSellOffer(alice, plainOfferIndex), ter(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The seller must currently be in possession of the token they
// are selling. alice gave nftAlice0ID to gw.
env(token::acceptSellOffer(buyer, plainOfferIndex), ter(tecNO_PERMISSION));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// gw gives nftAlice0ID back to alice. That allows us to check
// buyer attempting to accept one of alice's offers with
@@ -1163,14 +1206,14 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(alice, offerIndex));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
}
env(pay(buyer, gw, gwAUD(30)));
env.close();
BEAST_EXPECT(env.balance(buyer, gwAUD) == gwAUD(0));
env(token::acceptSellOffer(buyer, audOfferIndex), ter(tecINSUFFICIENT_FUNDS));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
//----------------------------------------------------------------------
@@ -2769,6 +2812,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const nftokenID1 = token::getNextID(env, issuer, 0, tfTransferable);
env(token::mint(minter, 0), token::issuer(issuer), txflags(tfTransferable));
env.close();
uint8_t issuerCount, minterCount, buyerCount;
// Test how adding an Expiration field to an offer affects permissions
// for cancelling offers.
@@ -2792,9 +2836,12 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const offerBuyerToMinter = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter), token::expiration(expiration));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 1);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
issuerCount = 1;
minterCount = 3;
buyerCount = 1;
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Test who gets to cancel the offers. Anyone outside of the
// offer-owner/destination pair should not be able to cancel
@@ -2806,32 +2853,36 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::cancelOffer(buyer, {offerIssuerToMinter}), ter(tecNO_PERMISSION));
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 1);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The offer creator can cancel their own unexpired offer.
env(token::cancelOffer(minter, {offerMinterToAnyone}));
minterCount--;
// The destination of a sell offer can cancel the NFT owner's
// unexpired offer.
env(token::cancelOffer(issuer, {offerMinterToIssuer}));
minterCount--;
// Close enough ledgers to get past the expiration.
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 1);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can cancel expired offers.
env(token::cancelOffer(issuer, {offerBuyerToMinter}));
buyerCount--;
env(token::cancelOffer(buyer, {offerIssuerToMinter}));
issuerCount--;
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that:
// 1. An unexpired sell offer with an expiration can be accepted.
@@ -2844,44 +2895,70 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::createOffer(minter, nftokenID0, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
uint256 const offer1 = keylet::nftoffer(minter, env.seq(minter)).key;
env(token::createOffer(minter, nftokenID1, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can accept an unexpired sell offer.
env(token::acceptSellOffer(buyer, offer0));
minterCount--;
buyerCount++;
// Close enough ledgers to get past the expiration.
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// No one can accept an expired sell offer.
env(token::acceptSellOffer(buyer, offer1), ter(tecEXPIRED));
env(token::acceptSellOffer(issuer, offer1), ter(tecEXPIRED));
// With fixExpiredNFTokenOfferRemoval amendment, the first accept
// attempt deletes the expired offer. Without the amendment,
// the offer remains and we can try to accept it again.
if (features[fixExpiredNFTokenOfferRemoval])
{
// After amendment: offer was deleted by first accept attempt
minterCount--;
env(token::acceptSellOffer(issuer, offer1), ter(tecOBJECT_NOT_FOUND));
}
else
{
// Before amendment: offer still exists, second accept also
// fails
env(token::acceptSellOffer(issuer, offer1), ter(tecEXPIRED));
}
env.close();
// The expired sell offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
// Check if the expired sell offer behavior matches amendment status
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can cancel the expired sell offer.
env(token::cancelOffer(issuer, {offer1}));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
if (!features[fixExpiredNFTokenOfferRemoval])
{
// Before amendment: expired offer still exists and needs to be
// cancelled
env(token::cancelOffer(issuer, {offer1}));
env.close();
minterCount--;
}
// Ensure that owner counts are correct with and without the
// amendment
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
// Transfer nftokenID0 back to minter so we start the next test in
// a simple place.
@@ -2889,10 +2966,11 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::createOffer(buyer, nftokenID0, XRP(0)), txflags(tfSellNFToken), token::destination(minter));
env.close();
env(token::acceptSellOffer(minter, offerSellBack));
buyerCount--;
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that:
// 1. An unexpired buy offer with an expiration can be accepted.
@@ -2903,14 +2981,16 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const offer0 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter), token::expiration(expiration));
buyerCount++;
uint256 const offer1 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID1, drops(1)), token::owner(minter), token::expiration(expiration));
buyerCount++;
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An unexpired buy offer can be accepted.
env(token::acceptBuyOffer(minter, offer0));
@@ -2919,26 +2999,48 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An expired buy offer cannot be accepted.
env(token::acceptBuyOffer(minter, offer1), ter(tecEXPIRED));
env(token::acceptBuyOffer(issuer, offer1), ter(tecEXPIRED));
// With fixExpiredNFTokenOfferRemoval amendment, the first accept
// attempt deletes the expired offer. Without the amendment,
// the offer remains and we can try to accept it again.
if (features[fixExpiredNFTokenOfferRemoval])
{
// After amendment: offer was deleted by first accept attempt
buyerCount--;
env(token::acceptBuyOffer(issuer, offer1), ter(tecOBJECT_NOT_FOUND));
}
else
{
// Before amendment: offer still exists, second accept also
// fails
env(token::acceptBuyOffer(issuer, offer1), ter(tecEXPIRED));
}
env.close();
// The expired buy offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Check if the expired buy offer behavior matches amendment status
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can cancel the expired buy offer.
env(token::cancelOffer(issuer, {offer1}));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
if (!features[fixExpiredNFTokenOfferRemoval])
{
// Before amendment: expired offer still exists and can be
// cancelled
env(token::cancelOffer(issuer, {offer1}));
env.close();
buyerCount--;
}
// Ensure that owner counts are the same with and without the
// amendment
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
// Transfer nftokenID0 back to minter so we start the next test in
// a simple place.
@@ -2947,9 +3049,10 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(minter, offerSellBack));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
buyerCount--;
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that in brokered mode:
// 1. An unexpired sell offer with an expiration can be accepted.
@@ -2962,50 +3065,74 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::createOffer(minter, nftokenID0, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
uint256 const sellOffer1 = keylet::nftoffer(minter, env.seq(minter)).key;
env(token::createOffer(minter, nftokenID1, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
uint256 const buyOffer0 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter));
buyerCount++;
uint256 const buyOffer1 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID1, drops(1)), token::owner(minter));
buyerCount++;
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An unexpired offer can be brokered.
env(token::brokerOffers(issuer, buyOffer0, sellOffer0));
minterCount--;
// Close enough ledgers to get past the expiration.
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// If the sell offer is expired it cannot be brokered.
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
env.close();
// The expired sell offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
if (features[fixExpiredNFTokenOfferRemoval])
{
// With amendment: expired offers are deleted
minterCount--;
}
// Anyone can cancel the expired sell offer.
env(token::cancelOffer(buyer, {buyOffer1, sellOffer1}));
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
if (features[fixExpiredNFTokenOfferRemoval])
{
// The buy offer was deleted, so no need to cancel it
// The sell offer still exists, so we can cancel it
env(token::cancelOffer(buyer, {buyOffer1}));
buyerCount--;
}
else
{
// Anyone can cancel the expired offers
env(token::cancelOffer(buyer, {buyOffer1, sellOffer1}));
minterCount--;
buyerCount--;
}
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
// Ensure that owner counts are the same with and without the
// amendment
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
// Transfer nftokenID0 back to minter so we start the next test in
// a simple place.
@@ -3014,9 +3141,10 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(minter, offerSellBack));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
buyerCount--;
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that in brokered mode:
// 1. An unexpired buy offer with an expiration can be accepted.
@@ -3054,17 +3182,28 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// If the buy offer is expired it cannot be brokered.
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
env.close();
// The expired buy offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired buy offer.
env(token::cancelOffer(minter, {buyOffer1, sellOffer1}));
if (features[fixExpiredNFTokenOfferRemoval])
{
// After amendment: expired offers were deleted during broker
// attempt
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
// The buy offer was deleted, so no need to cancel it
// The sell offer still exists, so we can cancel it
env(token::cancelOffer(minter, {sellOffer1}));
}
else
{
// Before amendment: expired offers still exist in ledger
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired offers
env(token::cancelOffer(minter, {buyOffer1, sellOffer1}));
}
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
@@ -3122,17 +3261,19 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// If the offers are expired they cannot be brokered.
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
env.close();
// The expired offers are still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired offers.
env(token::cancelOffer(issuer, {buyOffer1, sellOffer1}));
if (!features[fixExpiredNFTokenOfferRemoval])
{
// Before amendment: expired offers still exist in ledger
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired offers
env(token::cancelOffer(issuer, {buyOffer1, sellOffer1}));
}
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
@@ -6736,7 +6877,9 @@ public:
void
run() override
{
testWithFeats(allFeatures - fixNFTokenReserve - featureNFTokenMintOffer - featureDynamicNFT);
testWithFeats(
allFeatures - fixNFTokenReserve - featureNFTokenMintOffer - featureDynamicNFT -
fixExpiredNFTokenOfferRemoval);
}
};
@@ -6767,6 +6910,15 @@ class NFTokenWOModify_test : public NFTokenBaseUtil_test
}
};
class NFTokenWOExpiredOfferRemoval_test : public NFTokenBaseUtil_test
{
void
run() override
{
testWithFeats(allFeatures - fixExpiredNFTokenOfferRemoval);
}
};
class NFTokenAllFeatures_test : public NFTokenBaseUtil_test
{
void

View File

@@ -53,7 +53,17 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx)
return {nullptr, tecOBJECT_NOT_FOUND};
if (hasExpired(ctx.view, (*offerSLE)[~sfExpiration]))
return {nullptr, tecEXPIRED};
{
// Before fixExpiredNFTokenOfferRemoval amendment, expired
// offers caused tecEXPIRED in preclaim, leaving them on ledger
// forever. After the amendment, we allow expired offers to
// reach doApply() where they get deleted and tecEXPIRED is
// returned.
if (!ctx.view.rules().enabled(fixExpiredNFTokenOfferRemoval))
return {nullptr, tecEXPIRED};
// Amendment enabled: return the expired offer to be handled in
// doApply
}
if ((*offerSLE)[sfAmount].negative())
return {nullptr, temBAD_OFFER};
@@ -299,7 +309,7 @@ NFTokenAcceptOffer::pay(AccountID const& from, AccountID const& to, STAmount con
{
// This should never happen, but it's easy and quick to check.
if (amount < beast::zero)
return tecINTERNAL;
return tecINTERNAL; // LCOV_EXCL_LINE
auto const result = accountSend(view(), from, to, amount, j_);
@@ -410,6 +420,39 @@ NFTokenAcceptOffer::doApply()
auto bo = loadToken(ctx_.tx[~sfNFTokenBuyOffer]);
auto so = loadToken(ctx_.tx[~sfNFTokenSellOffer]);
// With fixExpiredNFTokenOfferRemoval amendment, check for expired offers
// and delete them, returning tecEXPIRED. This ensures expired offers
// are properly cleaned up from the ledger.
if (view().rules().enabled(fixExpiredNFTokenOfferRemoval))
{
bool foundExpired = false;
auto const deleteOfferIfExpired = [this, &foundExpired](std::shared_ptr<SLE> const& offer) -> TER {
if (offer && hasExpired(view(), (*offer)[~sfExpiration]))
{
JLOG(j_.trace()) << "Offer is expired, deleting: " << offer->key();
if (!nft::deleteTokenOffer(view(), offer))
{
// LCOV_EXCL_START
JLOG(j_.fatal()) << "Unable to delete expired offer '" << offer->key() << "': ignoring";
return tecINTERNAL;
// LCOV_EXCL_STOP
}
JLOG(j_.trace()) << "Deleted offer " << offer->key();
foundExpired = true;
}
return tesSUCCESS;
};
if (auto const r = deleteOfferIfExpired(bo); !isTesSuccess(r))
return r;
if (auto const r = deleteOfferIfExpired(so); !isTesSuccess(r))
return r;
if (foundExpired)
return tecEXPIRED;
}
if (bo && !nft::deleteTokenOffer(view(), bo))
{
// LCOV_EXCL_START