Compare commits

..

9 Commits

Author SHA1 Message Date
Denis Angell
dc8a689a20 feature-subscriptions 2025-09-10 11:03:22 +02:00
Ayaz Salikhov
e5f7a8442d ci: Change upload-conan-deps workflow is run (#5782)
- Don't run upload-conan-deps in PRs, unless the PR changes the workflow file.
- Change cron schedule for uploading Conan dependencies to run after work hours for most dev.
2025-09-09 16:21:12 -04:00
Ayaz Salikhov
e67e0395df ci: Limit number of parallel jobs in "upload-conan-deps" (#5781)
- This should prevent Artifactory from being overloaded by too many requests at a time.
- Uses "max-parallel" to limit the build job to 10 simultaneous instances.
- Only run the minimal matrix on PRs.
2025-09-09 19:47:06 +00:00
Ed Hennis
148f669a25 chore: "passed" fails if any previous jobs fail or are cancelled (#5776)
For the purposes of being able to merge a PR, Github Actions jobs count as passed if they ran and passed, or were skipped.

With this change, if any of the jobs that "passed" depends on fail or are cancelled, then "passed" will fail. If they all succeed or are skipped, then "passed" is skipped, which does not prevent a merge.

This saves spinning up a runner in the usual case where things work, and will simplify our branch protection rules, so that only "passed" will need to be checked.
2025-09-09 18:07:04 +00:00
yinyiqian1
f1eaa6a264 enable fixAMMClawbackRounding (#5750) 2025-09-09 15:57:28 +00:00
Ayaz Salikhov
da4c8c9550 ci: Only run build-test/notify-clio if should-run indicates to (#5777)
- Fixes an issue introduced by #5762 which removed the transitive `should-run` check from these two jobs.
2025-09-09 11:25:41 -04:00
Wo Jake
bcde2790a4 Update old links & descriptions in README.md (#4701) 2025-09-08 18:03:20 +00:00
Ayaz Salikhov
9ebeb413e4 feat: Implement separate upload workflow (#5762)
* feat: Implement separate upload workflow
* Use cleanup-workspace
* Name some workflows reusable
* Add dependencies
2025-09-08 15:15:59 +00:00
Bronek Kozicki
6d40b882a4 Switch on-trigger to minimal build (#5773) 2025-09-08 13:54:50 +00:00
38 changed files with 5015 additions and 367 deletions

View File

@@ -1,7 +1,5 @@
# This action installs and optionally uploads Conan dependencies to a remote
# repository. The dependencies will only be uploaded if the credentials are
# provided.
name: Build Conan dependencies
description: "Install Conan dependencies, optionally forcing a rebuild of all dependencies."
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
@@ -12,28 +10,10 @@ inputs:
build_type:
description: 'The build type to use ("Debug", "Release").'
required: true
conan_remote_name:
description: "The name of the Conan remote to use."
required: true
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: true
conan_remote_username:
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
default: ""
conan_remote_password:
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
default: ""
force_build:
description: 'Force building of all dependencies ("true", "false").'
required: false
default: "false"
force_upload:
description: 'Force uploading of all dependencies ("true", "false").'
required: false
default: "false"
runs:
using: composite
@@ -51,12 +31,3 @@ runs:
--options:host '&:xrpld=True' \
--settings:all build_type=${{ inputs.build_type }} \
--format=json ..
- name: Upload Conan dependencies
if: ${{ inputs.conan_remote_username != '' && inputs.conan_remote_password != '' }}
shell: bash
working-directory: ${{ inputs.build_dir }}
run: |
echo "Logging into Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote login ${{ inputs.conan_remote_name }} "${{ inputs.conan_remote_username }}" --password "${{ inputs.conan_remote_password }}"
echo 'Uploading dependencies.'
conan upload '*' --confirm --check ${{ inputs.force_upload == 'true' && '--force' || '' }} --remote=${{ inputs.conan_remote_name }}

View File

@@ -1,6 +1,7 @@
# This action build and tests the binary. The Conan dependencies must have
# already been installed (see the build-deps action).
name: Build and Test
description: "Build and test the binary."
# Note that actions do not support 'type' and all inputs are strings, see
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.

43
.github/actions/setup-conan/action.yml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: Setup Conan
description: "Set up Conan configuration, profile, and remote."
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: false
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: false
default: https://conan.ripplex.io
runs:
using: composite
steps:
- name: Set up Conan configuration
shell: bash
run: |
echo 'Installing configuration.'
cat conan/global.conf ${{ runner.os == 'Linux' && '>>' || '>' }} $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
shell: bash
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
run: |
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list

43
.github/scripts/strategy-matrix/generate.py vendored Normal file → Executable file
View File

@@ -2,7 +2,17 @@
import argparse
import itertools
import json
import re
from pathlib import Path
from dataclasses import dataclass
THIS_DIR = Path(__file__).parent.resolve()
@dataclass
class Config:
architecture: list[dict]
os: list[dict]
build_type: list[str]
cmake_args: list[str]
'''
Generate a strategy matrix for GitHub Actions CI.
@@ -18,9 +28,9 @@ We will further set additional CMake arguments as follows:
- Certain Debian Bookworm configurations will change the reference fee, enable
codecov, and enable voidstar in PRs.
'''
def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict], build_type: list[str], cmake_args: list[str]) -> dict:
def generate_strategy_matrix(all: bool, config: Config) -> list:
configurations = []
for architecture, os, build_type, cmake_args in itertools.product(architecture, os, build_type, cmake_args):
for architecture, os, build_type, cmake_args in itertools.product(config.architecture, config.os, config.build_type, config.cmake_args):
# The default CMake target is 'all' for Linux and MacOS and 'install'
# for Windows, but it can get overridden for certain configurations.
cmake_target = 'install' if os["distro_name"] == 'windows' else 'all'
@@ -158,21 +168,30 @@ def generate_strategy_matrix(all: bool, architecture: list[dict], os: list[dict]
'architecture': architecture,
})
return {'include': configurations}
return configurations
def read_config(file: Path) -> Config:
config = json.loads(file.read_text())
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
raise Exception('Invalid configuration file.')
return Config(**config)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--all', help='Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).', action="store_true")
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=True, type=str)
parser.add_argument('-c', '--config', help='Path to the JSON file containing the strategy matrix configurations.', required=False, type=Path)
args = parser.parse_args()
# Load the JSON configuration file.
config = None
with open(args.config, 'r') as f:
config = json.load(f)
if config['architecture'] is None or config['os'] is None or config['build_type'] is None or config['cmake_args'] is None:
raise Exception('Invalid configuration file.')
matrix = []
if args.config is None or args.config == '':
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "linux.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "macos.json"))
matrix += generate_strategy_matrix(args.all, read_config(THIS_DIR / "windows.json"))
else:
matrix += generate_strategy_matrix(args.all, read_config(args.config))
# Generate the strategy matrix.
print(f'matrix={json.dumps(generate_strategy_matrix(args.all, config['architecture'], config['os'], config['build_type'], config['cmake_args']))}')
print(f'matrix={json.dumps({"include": matrix})}')

View File

@@ -13,14 +13,6 @@ on:
required: false
type: string
default: ".build"
conan_remote_name:
description: "The name of the Conan remote to use."
required: true
type: string
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: true
type: string
dependencies_force_build:
description: "Force building of all dependencies."
required: false
@@ -45,12 +37,6 @@ on:
codecov_token:
description: "The Codecov token to use for uploading coverage reports."
required: false
conan_remote_username:
description: "The username for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
conan_remote_password:
description: "The password for logging into the Conan remote. If not provided, the dependencies will not be uploaded."
required: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.os }}
@@ -63,20 +49,10 @@ defaults:
jobs:
# Generate the strategy matrix to be used by the following job.
generate-matrix:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
run: python generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} --config=${{ inputs.os }}.json >> "${GITHUB_OUTPUT}"
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
os: ${{ inputs.os }}
strategy_matrix: ${{ inputs.strategy_matrix }}
# Build and test the binary.
build-test:
@@ -148,40 +124,16 @@ jobs:
echo 'Checking nproc version.'
nproc --version
- name: Set up Conan configuration
run: |
echo 'Installing configuration.'
cat conan/global.conf ${{ inputs.os == 'linux' && '>>' || '>' }} $(conan config home)/global.conf
echo 'Conan configuration:'
conan config show '*'
- name: Set up Conan profile
run: |
echo 'Installing profile.'
conan config install conan/profiles/default -tf $(conan config home)/profiles/
echo 'Conan profile:'
conan profile show
- name: Set up Conan remote
shell: bash
run: |
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: ${{ inputs.build_dir }}
build_type: ${{ matrix.build_type }}
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
conan_remote_username: ${{ secrets.conan_remote_username }}
conan_remote_password: ${{ secrets.conan_remote_password }}
force_build: ${{ inputs.dependencies_force_build }}
force_upload: ${{ inputs.dependencies_force_upload }}
- name: Build and test binary
uses: ./.github/actions/build-test
with:

View File

@@ -9,12 +9,14 @@ on:
inputs:
conan_remote_name:
description: "The name of the Conan remote to use."
required: true
required: false
type: string
default: xrplf
conan_remote_url:
description: "The URL of the Conan endpoint to use."
required: true
required: false
type: string
default: https://conan.ripplex.io
secrets:
clio_notify_token:
description: "The GitHub token to notify Clio about new versions."
@@ -54,12 +56,13 @@ jobs:
id: conan_ref
run: |
echo "conan_ref=${{ steps.generate.outputs.version }}@${{ steps.generate.outputs.user }}/${{ steps.generate.outputs.channel }}" >> "${GITHUB_OUTPUT}"
- name: Add Conan remote
run: |
echo "Adding Conan remote '${{ inputs.conan_remote_name }}' at ${{ inputs.conan_remote_url }}."
conan remote add --index 0 --force ${{ inputs.conan_remote_name }} ${{ inputs.conan_remote_url }}
echo 'Listing Conan remotes.'
conan remote list
- name: Set up Conan
uses: ./.github/actions/setup-conan
with:
conan_remote_name: ${{ inputs.conan_remote_name }}
conan_remote_url: ${{ inputs.conan_remote_url }}
- name: Log into Conan remote
run: conan remote login ${{ inputs.conan_remote_name }} "${{ secrets.conan_remote_username }}" --password "${{ secrets.conan_remote_password }}"
- name: Upload package

View File

@@ -23,10 +23,6 @@ defaults:
run:
shell: bash
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
jobs:
# This job determines whether the rest of the workflow should run. It runs
# when the PR is not a draft (which should also cover merge-group) or
@@ -105,51 +101,36 @@ jobs:
if: needs.should-run.outputs.go == 'true'
uses: ./.github/workflows/check-levelization.yml
# This job works around the limitation that GitHub Actions does not support
# using environment variables as inputs for reusable workflows.
generate-outputs:
build-test:
needs: should-run
if: needs.should-run.outputs.go == 'true'
runs-on: ubuntu-latest
steps:
- name: No-op
run: true
outputs:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
build-test:
needs: generate-outputs
uses: ./.github/workflows/build-test.yml
strategy:
matrix:
os: [linux, macos, windows]
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
os: ${{ matrix.os }}
secrets:
codecov_token: ${{ secrets.CODECOV_TOKEN }}
notify-clio:
needs:
- generate-outputs
- should-run
- build-test
if: needs.should-run.outputs.go == 'true'
uses: ./.github/workflows/notify-clio.yml
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
secrets:
clio_notify_token: ${{ secrets.CLIO_NOTIFY_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
passed:
if: failure() || cancelled()
needs:
- build-test
- check-format
- check-levelization
runs-on: ubuntu-latest
steps:
- name: No-op
run: true
- name: Fail
run: false

View File

@@ -66,54 +66,18 @@ defaults:
run:
shell: bash
env:
CONAN_REMOTE_NAME: xrplf
CONAN_REMOTE_URL: https://conan.ripplex.io
jobs:
check-missing-commits:
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
uses: ./.github/workflows/check-missing-commits.yml
# This job works around the limitation that GitHub Actions does not support
# using environment variables as inputs for reusable workflows. It also sets
# outputs that depend on the event that triggered the workflow.
generate-outputs:
runs-on: ubuntu-latest
steps:
- name: Check inputs and set outputs
id: generate
run: |
if [[ '${{ github.event_name }}' == 'push' ]]; then
echo 'dependencies_force_build=false' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
elif [[ '${{ github.event_name }}' == 'schedule' ]]; then
echo 'dependencies_force_build=true' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=false' >> "${GITHUB_OUTPUT}"
else
echo 'dependencies_force_build=${{ inputs.dependencies_force_build }}' >> "${GITHUB_OUTPUT}"
echo 'dependencies_force_upload=${{ inputs.dependencies_force_upload }}' >> "${GITHUB_OUTPUT}"
fi
outputs:
conan_remote_name: ${{ env.CONAN_REMOTE_NAME }}
conan_remote_url: ${{ env.CONAN_REMOTE_URL }}
dependencies_force_build: ${{ steps.generate.outputs.dependencies_force_build }}
dependencies_force_upload: ${{ steps.generate.outputs.dependencies_force_upload }}
build-test:
needs: generate-outputs
uses: ./.github/workflows/build-test.yml
strategy:
matrix:
os: [linux, macos, windows]
with:
conan_remote_name: ${{ needs.generate-outputs.outputs.conan_remote_name }}
conan_remote_url: ${{ needs.generate-outputs.outputs.conan_remote_url }}
dependencies_force_build: ${{ needs.generate-outputs.outputs.dependencies_force_build == 'true' }}
dependencies_force_upload: ${{ needs.generate-outputs.outputs.dependencies_force_upload == 'true' }}
os: ${{ matrix.os }}
strategy_matrix: "all"
strategy_matrix: "minimal"
secrets:
codecov_token: ${{ secrets.CODECOV_TOKEN }}
conan_remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
conan_remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}

View File

@@ -0,0 +1,38 @@
name: Generate strategy matrix
on:
workflow_call:
inputs:
os:
description: 'The operating system to use for the build ("linux", "macos", "windows").'
required: false
type: string
strategy_matrix:
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
required: false
type: string
default: "minimal"
outputs:
matrix:
description: "The generated strategy matrix."
value: ${{ jobs.generate-matrix.outputs.matrix }}
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: 3.13
- name: Generate strategy matrix
working-directory: .github/scripts/strategy-matrix
id: generate
run: ./generate.py ${{ inputs.strategy_matrix == 'all' && '--all' || '' }} ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }} >> "${GITHUB_OUTPUT}"

83
.github/workflows/upload-conan-deps.yml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: Upload Conan Dependencies
on:
schedule:
- cron: "0 3 * * 2-6"
workflow_dispatch:
inputs:
force_source_build:
description: "Force source build of all dependencies"
required: false
default: false
type: boolean
force_upload:
description: "Force upload of all dependencies"
required: false
default: false
type: boolean
pull_request:
branches: [develop]
paths:
# This allows testing changes to the upload workflow in a PR
- .github/workflows/upload-conan-deps.yml
push:
branches: [develop]
paths:
- .github/workflows/upload-conan-deps.yml
- .github/workflows/reusable-strategy-matrix.yml
- .github/actions/build-deps/action.yml
- ".github/scripts/strategy-matrix/**"
- conanfile.py
- conan.lock
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
generate-matrix:
uses: ./.github/workflows/reusable-strategy-matrix.yml
with:
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
run-upload-conan-deps:
needs:
- generate-matrix
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.architecture.runner }}
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version) || null }}
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@3f044c7478548e3c32ff68980eeb36ece02b364e
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@638e0dc11ea230f91bd26622fb542116bb5254d5
with:
disable_ccache: false
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_dir: .build
build_type: ${{ matrix.build_type }}
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
- name: Login to Conan
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request'
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ${{ inputs.conan_remote_name }} ${{ secrets.CONAN_USERNAME }}
- name: Upload Conan packages
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule'
run: conan upload "*" -r=${{ inputs.conan_remote_name }} --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}

View File

@@ -6,7 +6,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe
## XRP
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free crypto-asset native to the XRP Ledger, and is designed as a gas token for network services and to bridge different currencies. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
## rippled
@@ -23,19 +23,19 @@ If you are interested in running an **API Server** (including a **Full History S
- **[Censorship-Resistant Transaction Processing][]:** No single party decides which transactions succeed or fail, and no one can "roll back" a transaction after it completes. As long as those who choose to participate in the network keep it healthy, they can settle transactions in seconds.
- **[Fast, Efficient Consensus Algorithm][]:** The XRP Ledger's consensus algorithm settles transactions in 4 to 5 seconds, processing at a throughput of up to 1500 transactions per second. These properties put XRP at least an order of magnitude ahead of other top digital assets.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction costs.
- **[Responsible Software Governance][]:** A team of full-time, world-class developers at Ripple maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests, and builds constructive relationships with governments and financial institutions worldwide.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction fees.
- **[Responsible Software Governance][]:** A team of full-time developers at Ripple & other organizations maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests.
- **[Secure, Adaptable Cryptography][]:** The XRP Ledger relies on industry standard digital signature systems like ECDSA (the same scheme used by Bitcoin) but also supports modern, efficient algorithms like Ed25519. The extensible nature of the XRP Ledger's software makes it possible to add and disable algorithms as the state of the art in cryptography advances.
- **[Modern Features for Smart Contracts][]:** Features like Escrow, Checks, and Payment Channels support cutting-edge financial applications including the [Interledger Protocol](https://interledger.org/). This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[Modern Features][]:** Features like Escrow, Checks, and Payment Channels support financial applications atop of the XRP Ledger. This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[On-Ledger Decentralized Exchange][]:** In addition to all the features that make XRP useful on its own, the XRP Ledger also has a fully-functional accounting system for tracking and trading obligations denominated in any way users want, and an exchange built into the protocol. The XRP Ledger can settle long, cross-currency payment paths and exchanges of multiple currencies in atomic transactions, bridging gaps of trust with XRP.
[Censorship-Resistant Transaction Processing]: https://xrpl.org/xrp-ledger-overview.html#censorship-resistant-transaction-processing
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/xrp-ledger-overview.html#fast-efficient-consensus-algorithm
[Finite XRP Supply]: https://xrpl.org/xrp-ledger-overview.html#finite-xrp-supply
[Responsible Software Governance]: https://xrpl.org/xrp-ledger-overview.html#responsible-software-governance
[Secure, Adaptable Cryptography]: https://xrpl.org/xrp-ledger-overview.html#secure-adaptable-cryptography
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
[Censorship-Resistant Transaction Processing]: https://xrpl.org/transaction-censorship-detection.html#transaction-censorship-detection
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/consensus-research.html#consensus-research
[Finite XRP Supply]: https://xrpl.org/what-is-xrp.html
[Responsible Software Governance]: https://xrpl.org/contribute-code.html#contribute-code-to-the-xrp-ledger
[Secure, Adaptable Cryptography]: https://xrpl.org/cryptographic-keys.html#cryptographic-keys
[Modern Features]: https://xrpl.org/use-specialized-payment-types.html
[On-Ledger Decentralized Exchange]: https://xrpl.org/decentralized-exchange.html#decentralized-exchange
## Source Code

View File

@@ -349,6 +349,19 @@ permissionedDomain(AccountID const& account, std::uint32_t seq) noexcept;
Keylet
permissionedDomain(uint256 const& domainID) noexcept;
Keylet
subscription(
AccountID const& account,
AccountID const& dest,
std::uint32_t const& seq) noexcept;
inline Keylet
subscription(uint256 const& key) noexcept
{
return {ltSUBSCRIPTION, key};
}
} // namespace keylet
// Everything below is deprecated and should be removed in favor of keylets:

View File

@@ -62,7 +62,6 @@ enum LedgerEntryType : std::uint16_t
#undef LEDGER_ENTRY
#pragma pop_macro("LEDGER_ENTRY")
//---------------------------------------------------------------------------
/** A special type, matching any ledger entry type.

View File

@@ -32,9 +32,10 @@
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FEATURE(Subscription, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo)

View File

@@ -504,5 +504,25 @@ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({
// no PermissionedDomainID ever (use MPTIssuance.sfDomainID)
}))
/** A ledger object representing a subscription.
\sa keylet::mptoken
*/
LEDGER_ENTRY(ltSUBSCRIPTION, 0x0085, Subscription, subscription, ({
{sfPreviousTxnID, soeREQUIRED},
{sfPreviousTxnLgrSeq, soeREQUIRED},
{sfSequence, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},
{sfAccount, soeREQUIRED},
{sfDestination, soeREQUIRED},
{sfDestinationTag, soeOPTIONAL},
{sfAmount, soeREQUIRED},
{sfBalance, soeREQUIRED},
{sfFrequency, soeREQUIRED},
{sfNextClaimTime, soeREQUIRED},
{sfExpiration, soeOPTIONAL},
{sfDestinationNode, soeREQUIRED},
}))
#undef EXPAND
#undef LEDGER_ENTRY_DUPLICATE

View File

@@ -114,6 +114,9 @@ TYPED_SFIELD(sfVoteWeight, UINT32, 48)
TYPED_SFIELD(sfFirstNFTokenSequence, UINT32, 50)
TYPED_SFIELD(sfOracleDocumentID, UINT32, 51)
TYPED_SFIELD(sfPermissionValue, UINT32, 52)
TYPED_SFIELD(sfFrequency, UINT32, 53)
TYPED_SFIELD(sfStartTime, UINT32, 54)
TYPED_SFIELD(sfNextClaimTime, UINT32, 55)
// 64-bit integers (common)
TYPED_SFIELD(sfIndexNext, UINT64, 1)
@@ -197,6 +200,7 @@ TYPED_SFIELD(sfHookSetTxnID, UINT256, 33)
TYPED_SFIELD(sfDomainID, UINT256, 34)
TYPED_SFIELD(sfVaultID, UINT256, 35)
TYPED_SFIELD(sfParentBatchID, UINT256, 36)
TYPED_SFIELD(sfSubscriptionID, UINT256, 37)
// number (common)
TYPED_SFIELD(sfNumber, NUMBER, 1)

View File

@@ -526,6 +526,28 @@ TRANSACTION(ttBATCH, 71, Batch, Delegation::notDelegatable, ({
{sfBatchSigners, soeOPTIONAL},
}))
/** This transaction type batches together transactions. */
TRANSACTION(ttSUBSCRIPTION_SET, 72, SubscriptionSet, Delegation::delegatable, ({
{sfDestination, soeOPTIONAL},
{sfAmount, soeREQUIRED, soeMPTSupported},
{sfFrequency, soeOPTIONAL},
{sfStartTime, soeOPTIONAL},
{sfExpiration, soeOPTIONAL},
{sfDestinationTag, soeOPTIONAL},
{sfSubscriptionID, soeOPTIONAL},
}))
/** This transaction type batches together transactions. */
TRANSACTION(ttSUBSCRIPTION_CANCEL, 73, SubscriptionCancel, Delegation::delegatable, ({
{sfSubscriptionID, soeREQUIRED},
}))
/** This transaction type batches together transactions. */
TRANSACTION(ttSUBSCRIPTION_CLAIM, 74, SubscriptionClaim, Delegation::delegatable, ({
{sfAmount, soeREQUIRED, soeMPTSupported},
{sfSubscriptionID, soeREQUIRED},
}))
/** This system-generated transaction type is used to update the status of the various amendments.
For details, see: https://xrpl.org/amendments.html

View File

@@ -99,6 +99,7 @@ JSS(Signer); // field.
JSS(Signers); // field.
JSS(SigningPubKey); // field.
JSS(Subject); // in: Credential transactions
JSS(SubscriptionID); // in: Subscription transactions
JSS(TakerGets); // field.
JSS(TakerPays); // field.
JSS(TradingFee); // in/out: AMM trading fee
@@ -283,6 +284,7 @@ JSS(fee_mult_max); // in: TransactionSign
JSS(fee_ref); // out: NetworkOPs, DEPRECATED
JSS(fetch_pack); // out: NetworkOPs
JSS(FIELDS); // out: RPC server_definitions
JSS(Frequency); // in: Subscription transactions
// matches definitions.json format
JSS(first); // out: rpc/Version
JSS(finished);

View File

@@ -96,6 +96,7 @@ enum class LedgerNameSpace : std::uint16_t {
PERMISSIONED_DOMAIN = 'm',
DELEGATE = 'E',
VAULT = 'V',
SUBSCRIPTION = 'U',
// No longer used or supported. Left here to reserve the space
// to avoid accidental reuse.
@@ -580,6 +581,17 @@ permissionedDomain(uint256 const& domainID) noexcept
return {ltPERMISSIONED_DOMAIN, domainID};
}
Keylet
subscription(
AccountID const& account,
AccountID const& dest,
std::uint32_t const& seq) noexcept
{
return {
ltSUBSCRIPTION,
indexHash(LedgerNameSpace::SUBSCRIPTION, account, dest, seq)};
}
} // namespace keylet
} // namespace ripple

File diff suppressed because it is too large Load Diff

View File

@@ -175,78 +175,12 @@ public:
BEAST_EXPECT(*lv == -1);
}
void
stopJobQueueWhenCoroutineSuspended()
{
using namespace std::chrono_literals;
using namespace jtx;
testcase("Stop JobQueue when a coroutine is suspended");
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
cfg->FORCE_MULTI_THREAD = true;
return cfg;
}));
bool started = false;
bool finished = false;
std::optional<bool> shouldStop;
std::condition_variable cv;
std::mutex m;
std::unique_lock<std::mutex> lk(m);
auto coro = env.app().getJobQueue().postCoro(
jtCLIENT, "Coroutine-Test", [&](auto const& c) {
started = true;
cv.notify_all();
c->yield();
finished = true;
shouldStop = c->shouldStop();
cv.notify_all();
});
cv.wait_for(lk, 5s, [&]() { return started; });
env.app().getJobQueue().stop();
cv.wait_for(lk, 5s, [&]() { return finished; });
BEAST_EXPECT(finished);
BEAST_EXPECT(shouldStop.has_value() && *shouldStop == true);
}
void
coroutineGetsDestroyedBeforeExecuting()
{
using namespace std::chrono_literals;
using namespace jtx;
testcase("Coroutine gets destroyed before executing");
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
cfg->FORCE_MULTI_THREAD = true;
return cfg;
}));
{
auto coro = std::make_shared<JobQueue::Coro>(
Coro_create_t{},
env.app().getJobQueue(),
JobType::jtCLIENT,
"test",
[](auto coro) {
});
}
pass();
}
void
run() override
{
correct_order();
incorrect_order();
thread_specific_storage();
stopJobQueueWhenCoroutineSuspended();
coroutineGetsDestroyedBeforeExecuting();
}
};

View File

@@ -67,6 +67,7 @@
#include <test/jtx/sendmax.h>
#include <test/jtx/seq.h>
#include <test/jtx/sig.h>
#include <test/jtx/subscription.h>
#include <test/jtx/tag.h>
#include <test/jtx/tags.h>
#include <test/jtx/ter.h>

View File

@@ -0,0 +1,107 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx/subscription.h>
#include <xrpl/protocol/TxFlags.h>
#include <xrpl/protocol/jss.h>
namespace ripple {
namespace test {
namespace jtx {
/** Subscription operations. */
namespace subscription {
void
start_time::operator()(Env& env, JTx& jt) const
{
jt.jv[sfStartTime.jsonName] = value_.time_since_epoch().count();
}
Json::Value
create(
jtx::Account const& account,
jtx::Account const& destination,
STAmount const& amount,
NetClock::duration const& frequency,
std::optional<NetClock::time_point> const& expiration)
{
Json::Value jv;
jv[jss::TransactionType] = jss::SubscriptionSet;
jv[jss::Account] = to_string(account.id());
jv[jss::Destination] = to_string(destination.id());
jv[jss::Amount] = amount.getJson(JsonOptions::none);
jv[jss::Frequency] = frequency.count();
jv[jss::Flags] = tfFullyCanonicalSig;
if (expiration)
jv[sfExpiration.jsonName] = expiration->time_since_epoch().count();
return jv;
}
Json::Value
update(
jtx::Account const& account,
uint256 const& subscriptionId,
STAmount const& amount,
std::optional<NetClock::time_point> const& expiration)
{
Json::Value jv;
jv[jss::TransactionType] = jss::SubscriptionSet;
jv[jss::Account] = to_string(account.id());
jv[jss::SubscriptionID] = to_string(subscriptionId);
jv[jss::Amount] = amount.getJson(JsonOptions::none);
jv[jss::Flags] = tfFullyCanonicalSig;
if (expiration)
jv[sfExpiration.jsonName] = expiration->time_since_epoch().count();
return jv;
}
Json::Value
cancel(jtx::Account const& account, uint256 const& subscriptionId)
{
Json::Value jv;
jv[jss::TransactionType] = jss::SubscriptionCancel;
jv[jss::Account] = to_string(account.id());
jv[jss::SubscriptionID] = to_string(subscriptionId);
jv[jss::Flags] = tfFullyCanonicalSig;
return jv;
}
Json::Value
claim(
jtx::Account const& account,
uint256 const& subscriptionId,
STAmount const& amount)
{
Json::Value jv;
jv[jss::TransactionType] = jss::SubscriptionClaim;
jv[jss::Account] = to_string(account.id());
jv[jss::SubscriptionID] = to_string(subscriptionId);
jv[jss::Amount] = amount.getJson(JsonOptions::none);
jv[jss::Flags] = tfFullyCanonicalSig;
return jv;
}
} // namespace subscription
} // namespace jtx
} // namespace test
} // namespace ripple

View File

@@ -0,0 +1,79 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_TEST_JTX_SUBSCRIPTION_H_INCLUDED
#define RIPPLE_TEST_JTX_SUBSCRIPTION_H_INCLUDED
#include <test/jtx/Account.h>
#include <test/jtx/Env.h>
namespace ripple {
namespace test {
namespace jtx {
/** Subscription operations. */
namespace subscription {
Json::Value
create(
jtx::Account const& account,
jtx::Account const& destination,
STAmount const& amount,
NetClock::duration const& frequency,
std::optional<NetClock::time_point> const& expiration = std::nullopt);
Json::Value
update(
jtx::Account const& account,
uint256 const& subscriptionId,
STAmount const& amount,
std::optional<NetClock::time_point> const& expiration = std::nullopt);
Json::Value
cancel(jtx::Account const& account, uint256 const& subscriptionId);
Json::Value
claim(
jtx::Account const& account,
uint256 const& subscriptionId,
STAmount const& amount);
/** Set the "StartTime" time tag on a JTx */
class start_time
{
private:
NetClock::time_point value_;
public:
explicit start_time(NetClock::time_point const& value) : value_(value)
{
}
void
operator()(Env&, JTx& jtx) const;
};
} // namespace subscription
} // namespace jtx
} // namespace test
} // namespace ripple
#endif

View File

@@ -0,0 +1,284 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_MISC_SUBSCRIPTIONHELPERS_H_INCLUDED
#define RIPPLE_APP_MISC_SUBSCRIPTIONHELPERS_H_INCLUDED
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/paths/Flow.h>
#include <xrpld/app/tx/detail/MPTokenAuthorize.h>
#include <xrpld/ledger/ApplyView.h>
#include <xrpld/ledger/ReadView.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/scope.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/STAccount.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFlags.h>
namespace ripple {
template <ValidIssueType T>
static TER
canTransferTokenHelper(
ReadView const& view,
AccountID const& account,
AccountID const& dest,
STAmount const& amount,
beast::Journal const& j);
template <>
TER
canTransferTokenHelper<Issue>(
ReadView const& view,
AccountID const& account,
AccountID const& dest,
STAmount const& amount,
beast::Journal const& j)
{
AccountID issuer = amount.getIssuer();
if (issuer == account)
{
JLOG(j.trace())
<< "canTransferTokenHelper: Issuer is the same as the account.";
return tesSUCCESS;
}
// If the issuer does not exist, return tecNO_ISSUER
auto const sleIssuer = view.read(keylet::account(issuer));
if (!sleIssuer)
{
JLOG(j.trace()) << "canTransferTokenHelper: Issuer does not exist.";
return tecNO_ISSUER;
}
// If the account does not have a trustline to the issuer, return tecNO_LINE
auto const sleRippleState =
view.read(keylet::line(account, issuer, amount.getCurrency()));
if (!sleRippleState)
{
JLOG(j.trace()) << "canTransferTokenHelper: Trust line does not exist.";
return tecNO_LINE;
}
STAmount const balance = (*sleRippleState)[sfBalance];
// If balance is positive, issuer must have higher address than account
if (balance > beast::zero && issuer < account)
{
JLOG(j.trace()) << "canTransferTokenHelper: Invalid trust line state.";
return tecNO_PERMISSION;
}
// If balance is negative, issuer must have lower address than account
if (balance < beast::zero && issuer > account)
{
JLOG(j.trace()) << "canTransferTokenHelper: Invalid trust line state.";
return tecNO_PERMISSION;
}
// If the issuer has requireAuth set, check if the account is authorized
if (auto const ter = requireAuth(view, amount.issue(), account);
ter != tesSUCCESS)
{
JLOG(j.trace()) << "canTransferTokenHelper: Account is not authorized";
return ter;
}
// If the issuer has requireAuth set, check if the destination is authorized
if (auto const ter = requireAuth(view, amount.issue(), dest);
ter != tesSUCCESS)
{
JLOG(j.trace())
<< "canTransferTokenHelper: Destination is not authorized.";
return ter;
}
// If the issuer has frozen the account, return tecFROZEN
if (isFrozen(view, account, amount.issue()) ||
isDeepFrozen(
view, account, amount.issue().currency, amount.issue().account))
{
JLOG(j.trace()) << "canTransferTokenHelper: Account is frozen.";
return tecFROZEN;
}
// If the issuer has frozen the destination, return tecFROZEN
if (isFrozen(view, dest, amount.issue()) ||
isDeepFrozen(
view, dest, amount.issue().currency, amount.issue().account))
{
JLOG(j.trace()) << "canTransferTokenHelper: Destination is frozen.";
return tecFROZEN;
}
STAmount const spendableAmount = accountHolds(
view, account, amount.getCurrency(), issuer, fhIGNORE_FREEZE, j);
// If the balance is less than or equal to 0, return
// tecINSUFFICIENT_FUNDS
if (spendableAmount <= beast::zero)
{
JLOG(j.trace()) << "canTransferTokenHelper: Spendable amount is less "
"than or equal to 0.";
return tecINSUFFICIENT_FUNDS;
}
// If the spendable amount is less than the amount, return
// tecINSUFFICIENT_FUNDS
if (spendableAmount < amount)
{
JLOG(j.trace()) << "canTransferTokenHelper: Spendable amount is less "
"than the amount.";
return tecINSUFFICIENT_FUNDS;
}
// If the amount is not addable to the balance, return tecPRECISION_LOSS
if (!canAdd(spendableAmount, amount))
return tecPRECISION_LOSS;
return tesSUCCESS;
}
template <>
TER
canTransferTokenHelper<MPTIssue>(
ReadView const& view,
AccountID const& account,
AccountID const& dest,
STAmount const& amount,
beast::Journal const& j)
{
AccountID issuer = amount.getIssuer();
if (issuer == account)
{
JLOG(j.trace())
<< "canTransferTokenHelper: Issuer is the same as the account.";
return tesSUCCESS;
}
// If the mpt does not exist, return tecOBJECT_NOT_FOUND
auto const issuanceKey =
keylet::mptIssuance(amount.get<MPTIssue>().getMptID());
auto const sleIssuance = view.read(issuanceKey);
if (!sleIssuance)
{
JLOG(j.trace())
<< "canTransferTokenHelper: MPT issuance does not exist.";
return tecOBJECT_NOT_FOUND;
}
// If the issuer is not the same as the issuer of the mpt, return
// tecNO_PERMISSION
if (sleIssuance->getAccountID(sfIssuer) != issuer)
{
JLOG(j.trace()) << "canTransferTokenHelper: Issuer is not the same as "
"the issuer of the MPT.";
return tecNO_PERMISSION;
}
// If the account does not have the mpt, return tecOBJECT_NOT_FOUND
if (!view.exists(keylet::mptoken(issuanceKey.key, account)))
{
JLOG(j.trace())
<< "canTransferTokenHelper: Account does not have the MPT.";
return tecOBJECT_NOT_FOUND;
}
// If the issuer has requireAuth set, check if the account is
// authorized
auto const& mptIssue = amount.get<MPTIssue>();
if (auto const ter =
requireAuth(view, mptIssue, account, AuthType::WeakAuth);
ter != tesSUCCESS)
{
JLOG(j.trace()) << "canTransferTokenHelper: Account is not authorized.";
return ter;
}
// If the issuer has requireAuth set, check if the destination is
// authorized
if (auto const ter = requireAuth(view, mptIssue, dest, AuthType::WeakAuth);
ter != tesSUCCESS)
{
JLOG(j.trace())
<< "canTransferTokenHelper: Destination is not authorized.";
return ter;
}
// If the issuer has locked the account, return tecLOCKED
if (isFrozen(view, account, mptIssue))
{
JLOG(j.trace()) << "canTransferTokenHelper: Account is locked.";
return tecLOCKED;
}
// If the issuer has locked the destination, return tecLOCKED
if (isFrozen(view, dest, mptIssue))
{
JLOG(j.trace()) << "canTransferTokenHelper: Destination is locked.";
return tecLOCKED;
}
// If the mpt cannot be transferred, return tecNO_AUTH
if (auto const ter = canTransfer(view, mptIssue, account, dest);
ter != tesSUCCESS)
{
JLOG(j.trace()) << "canTransferTokenHelper: MPT cannot be transferred.";
return ter;
}
STAmount const spendableAmount = accountHolds(
view,
account,
amount.get<MPTIssue>(),
fhIGNORE_FREEZE,
ahIGNORE_AUTH,
j);
// If the balance is less than or equal to 0, return
// tecINSUFFICIENT_FUNDS
if (spendableAmount <= beast::zero)
{
JLOG(j.trace()) << "canTransferTokenHelper: Spendable amount is less "
"than or equal to 0.";
return tecINSUFFICIENT_FUNDS;
}
// If the spendable amount is less than the amount, return
// tecINSUFFICIENT_FUNDS
if (spendableAmount < amount)
{
JLOG(j.trace()) << "canTransferTokenHelper: Spendable amount is less "
"than the amount.";
return tecINSUFFICIENT_FUNDS;
}
// If the amount is not addable to the balance, return tecPRECISION_LOSS
if (!canAdd(spendableAmount, amount))
return tecPRECISION_LOSS;
return tesSUCCESS;
}
} // namespace ripple
#endif

View File

@@ -543,6 +543,7 @@ LedgerEntryTypesMatch::visitEntry(
case ltCREDENTIAL:
case ltPERMISSIONED_DOMAIN:
case ltVAULT:
case ltSUBSCRIPTION:
break;
default:
invalidTypeAdded_ = true;
@@ -1511,6 +1512,9 @@ ValidMPTIssuance::finalize(
if (tx.getTxnType() == ttESCROW_FINISH)
return true;
if (tx.getTxnType() == ttSUBSCRIPTION_CLAIM)
return true;
if ((tx.getTxnType() == ttVAULT_CLAWBACK ||
tx.getTxnType() == ttVAULT_WITHDRAW) &&
mptokensDeleted_ == 1 && mptokensCreated_ == 0 &&

View File

@@ -0,0 +1,106 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/paths/Flow.h>
#include <xrpld/app/tx/detail/SubscriptionCancel.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/scope.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/STAccount.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFlags.h>
namespace ripple {
NotTEC
SubscriptionCancel::preflight(PreflightContext const& ctx)
{
if (!ctx.rules.enabled(featureSubscription))
return temDISABLED;
if (auto const ret = preflight1(ctx); !isTesSuccess(ret))
return ret;
if (ctx.tx.getFlags() & tfUniversalMask)
return temINVALID_FLAG;
return preflight2(ctx);
}
TER
SubscriptionCancel::preclaim(PreclaimContext const& ctx)
{
auto const sleSub = ctx.view.read(
keylet::subscription(ctx.tx.getFieldH256(sfSubscriptionID)));
if (!sleSub)
{
JLOG(ctx.j.debug())
<< "SubscriptionCancel: Subscription does not exist.";
return tecNO_ENTRY;
}
return tesSUCCESS;
}
TER
SubscriptionCancel::doApply()
{
Sandbox sb(&ctx_.view());
auto const sleSub =
sb.peek(keylet::subscription(ctx_.tx.getFieldH256(sfSubscriptionID)));
if (!sleSub)
{
JLOG(ctx_.journal.debug())
<< "SubscriptionCancel: Subscription does not exist.";
return tecINTERNAL;
}
AccountID const account{sleSub->getAccountID(sfAccount)};
AccountID const dstAcct{sleSub->getAccountID(sfDestination)};
auto viewJ = ctx_.app.journal("View");
std::uint64_t const ownerPage{(*sleSub)[sfOwnerNode]};
if (!sb.dirRemove(
keylet::ownerDir(account), ownerPage, sleSub->key(), true))
{
JLOG(j_.fatal()) << "Unable to delete subscription from source.";
return tefBAD_LEDGER;
}
std::uint64_t const destPage{(*sleSub)[sfDestinationNode]};
if (!sb.dirRemove(keylet::ownerDir(dstAcct), destPage, sleSub->key(), true))
{
JLOG(j_.fatal()) << "Unable to delete subscription from destination.";
return tefBAD_LEDGER;
}
auto const sleSrc = sb.peek(keylet::account(account));
sb.erase(sleSub);
adjustOwnerCount(sb, sleSrc, -1, viewJ);
sb.apply(ctx_.rawView());
return tesSUCCESS;
}
} // namespace ripple

View File

@@ -0,0 +1,48 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_TX_SUBSCRIPTIONCANCEL_H_INCLUDED
#define RIPPLE_TX_SUBSCRIPTIONCANCEL_H_INCLUDED
#include <xrpld/app/tx/detail/Transactor.h>
namespace ripple {
class SubscriptionCancel : public Transactor
{
public:
static constexpr ConsequencesFactoryType ConsequencesFactory{Normal};
explicit SubscriptionCancel(ApplyContext& ctx) : Transactor(ctx)
{
}
static NotTEC
preflight(PreflightContext const& ctx);
static TER
preclaim(PreclaimContext const& ctx);
TER
doApply() override;
};
} // namespace ripple
#endif // RIPPLE_TX_SUBSCRIPTIONCANCEL_H_INCLUDED

View File

@@ -0,0 +1,426 @@
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/misc/SubscriptionHelpers.h>
#include <xrpld/app/paths/Flow.h>
#include <xrpld/app/tx/detail/MPTokenAuthorize.h>
#include <xrpld/app/tx/detail/SubscriptionClaim.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/scope.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/STAccount.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFlags.h>
namespace ripple {
NotTEC
SubscriptionClaim::preflight(PreflightContext const& ctx)
{
if (!ctx.rules.enabled(featureSubscription))
return temDISABLED;
if (auto const ret = preflight1(ctx); !isTesSuccess(ret))
return ret;
if (ctx.tx.getFlags() & tfUniversalMask)
return temINVALID_FLAG;
return preflight2(ctx);
}
TER
SubscriptionClaim::preclaim(PreclaimContext const& ctx)
{
auto const sleSub = ctx.view.read(
keylet::subscription(ctx.tx.getFieldH256(sfSubscriptionID)));
if (!sleSub)
{
JLOG(ctx.j.trace())
<< "SubscriptionClaim: Subscription does not exist.";
return tecNO_ENTRY;
}
// Only claim a subscription with this account as the destination.
AccountID const dest = sleSub->getAccountID(sfDestination);
if (ctx.tx[sfAccount] != dest)
{
JLOG(ctx.j.trace()) << "SubscriptionClaim: Cashing a subscription with "
"wrong Destination.";
return tecNO_PERMISSION;
}
AccountID const account = sleSub->getAccountID(sfAccount);
if (account == dest)
{
JLOG(ctx.j.trace()) << "SubscriptionClaim: Malformed transaction: "
"Cashing subscription to self.";
return tecINTERNAL;
}
{
auto const sleSrc = ctx.view.read(keylet::account(account));
auto const sleDst = ctx.view.read(keylet::account(dest));
if (!sleSrc || !sleDst)
{
JLOG(ctx.j.trace())
<< "SubscriptionClaim: source or destination not in ledger";
return tecNO_ENTRY;
}
}
{
STAmount const amount = ctx.tx.getFieldAmount(sfAmount);
STAmount const sleAmount = sleSub->getFieldAmount(sfAmount);
if (amount.asset() != sleAmount.asset())
{
JLOG(ctx.j.trace()) << "SubscriptionClaim: Subscription claim does "
"not match subscription currency.";
return tecWRONG_ASSET;
}
if (amount > sleAmount)
{
JLOG(ctx.j.trace()) << "SubscriptionClaim: Claim amount exceeds "
"subscription amount.";
return temBAD_AMOUNT;
}
// Time/period context
std::uint32_t const currentTime =
ctx.view.info().parentCloseTime.time_since_epoch().count();
std::uint32_t const nextClaimTime =
sleSub->getFieldU32(sfNextClaimTime);
std::uint32_t const frequency = sleSub->getFieldU32(sfFrequency);
// Determine effective available balance:
// - If we have crossed into a later period AND the previous period had
// a partial
// balance remaining (carryover not allowed), then the effective
// period rolls forward once and its balance resets to sleAmount.
// - Otherwise we operate on the period at nextClaimTime with its stored
// balance.
STAmount balance = sleSub->getFieldAmount(sfBalance);
bool const arrears = currentTime >= nextClaimTime + frequency;
if (arrears && balance != sleAmount)
{
// We will effectively operate on (nextClaimTime + frequency) with a
// full balance.
balance = sleAmount;
}
if (amount > balance)
{
JLOG(ctx.j.trace())
<< "SubscriptionClaim: Claim amount exceeds remaining "
"balance for this period.";
return tecINSUFFICIENT_FUNDS;
}
if (isXRP(amount))
{
if (xrpLiquid(ctx.view, account, 0, ctx.j) < amount)
return tecINSUFFICIENT_FUNDS;
}
else
{
if (auto const ret = std::visit(
[&]<typename T>(T const&) {
return canTransferTokenHelper<T>(
ctx.view, account, dest, amount, ctx.j);
},
amount.asset().value());
!isTesSuccess(ret))
return ret;
}
}
// Must be at or past the start of the effective period.
if (!hasExpired(ctx.view, sleSub->getFieldU32(sfNextClaimTime)))
{
JLOG(ctx.j.trace()) << "SubscriptionClaim: The subscription has not "
"reached the next claim time.";
return tecTOO_SOON;
}
return tesSUCCESS;
}
template <ValidIssueType T>
static TER
doTransferTokenHelper(
ApplyView& view,
std::shared_ptr<SLE> const& sleDest,
STAmount const& xrpBalance,
STAmount const& amount,
AccountID const& issuer,
AccountID const& sender,
AccountID const& receiver,
bool createAsset,
beast::Journal journal);
template <>
TER
doTransferTokenHelper<Issue>(
ApplyView& view,
std::shared_ptr<SLE> const& sleDest,
STAmount const& xrpBalance,
STAmount const& amount,
AccountID const& issuer,
AccountID const& sender,
AccountID const& receiver,
bool createAsset,
beast::Journal journal)
{
Keylet const trustLineKey = keylet::line(receiver, amount.issue());
bool const recvLow = issuer > receiver;
// Review Note: We could remove this and just say to use batch to auth the
// token first
if (!view.exists(trustLineKey) && createAsset && issuer != receiver)
{
// Can the account cover the trust line's reserve?
if (std::uint32_t const ownerCount = {sleDest->at(sfOwnerCount)};
xrpBalance < view.fees().accountReserve(ownerCount + 1))
{
JLOG(journal.trace())
<< "doTransferTokenHelper: Trust line does not exist. "
"Insufficent reserve to create line.";
return tecNO_LINE_INSUF_RESERVE;
}
Currency const currency = amount.getCurrency();
STAmount initialBalance(amount.issue());
initialBalance.setIssuer(noAccount());
// clang-format off
if (TER const ter = trustCreate(
view, // payment sandbox
recvLow, // is dest low?
issuer, // source
receiver, // destination
trustLineKey.key, // ledger index
sleDest, // Account to add to
false, // authorize account
(sleDest->getFlags() & lsfDefaultRipple) == 0,
false, // freeze trust line
false, // deep freeze trust line
initialBalance, // zero initial balance
Issue(currency, receiver), // limit of zero
0, // quality in
0, // quality out
journal); // journal
!isTesSuccess(ter))
{
JLOG(journal.trace()) << "doTransferTokenHelper: Failed to create trust line: " << transToken(ter);
return ter;
}
// clang-format on
view.update(sleDest);
}
if (!view.exists(trustLineKey) && issuer != receiver)
return tecNO_LINE;
auto const ter = accountSend(
view, sender, receiver, amount, journal, WaiveTransferFee::No);
if (ter != tesSUCCESS)
{
JLOG(journal.trace()) << "doTransferTokenHelper: Failed to send token: "
<< transToken(ter);
return ter; // LCOV_EXCL_LINE
}
return tesSUCCESS;
}
template <>
TER
doTransferTokenHelper<MPTIssue>(
ApplyView& view,
std::shared_ptr<SLE> const& sleDest,
STAmount const& xrpBalance,
STAmount const& amount,
AccountID const& issuer,
AccountID const& sender,
AccountID const& receiver,
bool createAsset,
beast::Journal journal)
{
auto const mptID = amount.get<MPTIssue>().getMptID();
auto const issuanceKey = keylet::mptIssuance(mptID);
if (!view.exists(keylet::mptoken(issuanceKey.key, receiver)) && createAsset)
{
if (std::uint32_t const ownerCount = {sleDest->at(sfOwnerCount)};
xrpBalance < view.fees().accountReserve(ownerCount + 1))
{
JLOG(journal.trace())
<< "doTransferTokenHelper: MPT does not exist. "
"Insufficent reserve to create MPT.";
return tecINSUFFICIENT_RESERVE;
}
if (auto const ter =
MPTokenAuthorize::createMPToken(view, mptID, receiver, 0);
!isTesSuccess(ter))
{
JLOG(journal.trace())
<< "doTransferTokenHelper: Failed to create MPT: "
<< transToken(ter);
return ter;
}
// Update owner count.
adjustOwnerCount(view, sleDest, 1, journal);
}
if (!view.exists(keylet::mptoken(issuanceKey.key, receiver)))
{
JLOG(journal.trace()) << "doTransferTokenHelper: MPT does not exist.";
return tecNO_PERMISSION;
}
auto const ter = accountSend(
view, sender, receiver, amount, journal, WaiveTransferFee::No);
if (ter != tesSUCCESS)
{
JLOG(journal.trace())
<< "doTransferTokenHelper: Failed to send MPT: " << transToken(ter);
return ter; // LCOV_EXCL_LINE
}
return tesSUCCESS;
}
TER
SubscriptionClaim::doApply()
{
PaymentSandbox psb(&ctx_.view());
auto viewJ = ctx_.app.journal("View");
auto sleSub =
psb.peek(keylet::subscription(ctx_.tx.getFieldH256(sfSubscriptionID)));
if (!sleSub)
{
JLOG(j_.trace()) << "SubscriptionClaim: Subscription does not exist.";
return tecINTERNAL;
}
AccountID const account = sleSub->getAccountID(sfAccount);
if (!psb.exists(keylet::account(account)))
{
JLOG(j_.trace()) << "SubscriptionClaim: Account does not exist.";
return tecINTERNAL;
}
AccountID const dest = sleSub->getAccountID(sfDestination);
if (!psb.exists(keylet::account(dest)))
{
JLOG(j_.trace()) << "SubscriptionClaim: Account does not exist.";
return tecINTERNAL;
}
if (dest != ctx_.tx.getAccountID(sfAccount))
{
JLOG(j_.trace()) << "SubscriptionClaim: Account is not the "
"destination of the subscription.";
return tecNO_PERMISSION;
}
STAmount const sleAmount = sleSub->getFieldAmount(sfAmount);
STAmount const deliverAmount = ctx_.tx.getFieldAmount(sfAmount);
// Pull current period info
std::uint32_t const currentTime =
psb.info().parentCloseTime.time_since_epoch().count();
std::uint32_t nextClaimTime = sleSub->getFieldU32(sfNextClaimTime);
std::uint32_t const frequency = sleSub->getFieldU32(sfFrequency);
STAmount availableBalance = sleSub->getFieldAmount(sfBalance);
bool const arrears = currentTime >= nextClaimTime + frequency;
// If we crossed into a later period and the previous period was partially
// used, forfeit the leftover and roll forward exactly one period; reset the
// balance.
if (arrears && availableBalance != sleAmount)
{
nextClaimTime += frequency;
availableBalance = sleAmount;
// Reflect the rollover immediately in the SLE so subsequent logic is
// consistent.
sleSub->setFieldU32(sfNextClaimTime, nextClaimTime);
sleSub->setFieldAmount(sfBalance, availableBalance);
}
// Enforce available balance for the effective period.
if (deliverAmount > availableBalance)
{
JLOG(j_.trace()) << "SubscriptionClaim: Claim amount exceeds remaining "
<< "balance for this period.";
return tecINTERNAL;
}
// Perform the transfer
if (isXRP(deliverAmount))
{
if (TER const ter{
transferXRP(psb, account, dest, deliverAmount, viewJ)};
ter != tesSUCCESS)
{
return ter;
}
}
else
{
if (auto const ret = std::visit(
[&]<typename T>(T const&) {
return doTransferTokenHelper<T>(
psb,
psb.peek(keylet::account(dest)),
mPriorBalance,
deliverAmount,
deliverAmount.getIssuer(),
account,
dest,
true, // create asset
viewJ);
},
deliverAmount.asset().value());
!isTesSuccess(ret))
return ret;
}
// Update balance and period pointer
STAmount const newBalance = availableBalance - deliverAmount;
if (newBalance == sleAmount.zeroed())
{
// Full period claimed: advance exactly one period and reset next period
// balance.
nextClaimTime += frequency;
sleSub->setFieldU32(sfNextClaimTime, nextClaimTime);
sleSub->setFieldAmount(sfBalance, sleAmount);
}
else
{
// Partial claim within the same effective period.
sleSub->setFieldAmount(sfBalance, newBalance);
// Do not advance nextClaimTime; if we had a rollover-forfeit above,
// we already moved nextClaimTime forward exactly once.
}
psb.update(sleSub);
if (sleSub->isFieldPresent(sfExpiration) &&
psb.info().parentCloseTime.time_since_epoch().count() >=
sleSub->getFieldU32(sfExpiration))
{
psb.erase(sleSub);
}
psb.apply(ctx_.rawView());
return tesSUCCESS;
}
} // namespace ripple

View File

@@ -0,0 +1,48 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_TX_SUBSCRIPTIONCLAIM_H_INCLUDED
#define RIPPLE_TX_SUBSCRIPTIONCLAIM_H_INCLUDED
#include <xrpld/app/tx/detail/Transactor.h>
namespace ripple {
class SubscriptionClaim : public Transactor
{
public:
static constexpr ConsequencesFactoryType ConsequencesFactory{Normal};
explicit SubscriptionClaim(ApplyContext& ctx) : Transactor(ctx)
{
}
static NotTEC
preflight(PreflightContext const& ctx);
static TER
preclaim(PreclaimContext const& ctx);
TER
doApply() override;
};
} // namespace ripple
#endif // RIPPLE_TX_SUBSCRIPTIONCLAIM_H_INCLUDED

View File

@@ -0,0 +1,337 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/misc/SubscriptionHelpers.h>
#include <xrpld/app/paths/Flow.h>
#include <xrpld/app/tx/detail/SubscriptionSet.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/scope.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/STAccount.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/TxFlags.h>
namespace ripple {
template <ValidIssueType T>
static NotTEC
setPreflightHelper(PreflightContext const& ctx);
template <>
NotTEC
setPreflightHelper<Issue>(PreflightContext const& ctx)
{
STAmount const amount = ctx.tx[sfAmount];
if (amount.native() || amount <= beast::zero)
return temBAD_AMOUNT;
if (badCurrency() == amount.getCurrency())
return temBAD_CURRENCY;
return tesSUCCESS;
}
template <>
NotTEC
setPreflightHelper<MPTIssue>(PreflightContext const& ctx)
{
if (!ctx.rules.enabled(featureMPTokensV1))
return temDISABLED;
auto const amount = ctx.tx[sfAmount];
if (amount.native() || amount.mpt() > MPTAmount{maxMPTokenAmount} ||
amount <= beast::zero)
return temBAD_AMOUNT;
return tesSUCCESS;
}
NotTEC
SubscriptionSet::preflight(PreflightContext const& ctx)
{
if (!ctx.rules.enabled(featureSubscription))
return temDISABLED;
if (ctx.tx.getFlags() & tfUniversalMask)
return temINVALID_FLAG;
if (auto const ret = preflight1(ctx); !isTesSuccess(ret))
return ret;
if (ctx.tx.isFieldPresent(sfSubscriptionID))
{
// update
if (!ctx.tx.isFieldPresent(sfAmount))
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: Malformed transaction: SubscriptionID "
"is present, but Amount is not.";
return temMALFORMED;
}
if (ctx.tx.isFieldPresent(sfDestination) ||
ctx.tx.isFieldPresent(sfFrequency) ||
ctx.tx.isFieldPresent(sfStartTime))
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: Malformed transaction: SubscriptionID "
"is present, but optional fields are also present.";
return temMALFORMED;
}
}
else
{
// create
if (!ctx.tx.isFieldPresent(sfDestination) ||
!ctx.tx.isFieldPresent(sfAmount) ||
!ctx.tx.isFieldPresent(sfFrequency))
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: Malformed transaction: SubscriptionID "
"is not present, and required fields are not present.";
return temMALFORMED;
}
if (ctx.tx.getAccountID(sfDestination) ==
ctx.tx.getAccountID(sfAccount))
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: Malformed transaction: Account "
"is the same as the destination.";
return temDST_IS_SRC;
}
}
STAmount const amount = ctx.tx.getFieldAmount(sfAmount);
if (amount.native())
{
if (!isLegalNet(amount) || amount <= beast::zero)
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: Malformed transaction: bad amount: "
<< amount.getFullText();
return temBAD_AMOUNT;
}
}
else
{
if (auto const ret = std::visit(
[&]<typename T>(T const&) {
return setPreflightHelper<T>(ctx);
},
amount.asset().value());
!isTesSuccess(ret))
return ret;
}
return preflight2(ctx);
}
TER
SubscriptionSet::preclaim(PreclaimContext const& ctx)
{
STAmount const amount = ctx.tx.getFieldAmount(sfAmount);
AccountID const account = ctx.tx.getAccountID(sfAccount);
AccountID const dest = ctx.tx.getAccountID(sfDestination);
if (ctx.tx.isFieldPresent(sfSubscriptionID))
{
// update
auto sle = ctx.view.read(
keylet::subscription(ctx.tx.getFieldH256(sfSubscriptionID)));
if (!sle)
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: Subscription does not exist.";
return tecNO_ENTRY;
}
if (sle->getAccountID(sfAccount) != ctx.tx.getAccountID(sfAccount))
{
JLOG(ctx.j.trace()) << "SubscriptionSet: Account is not the "
"owner of the subscription.";
return tecNO_PERMISSION;
}
}
else
{
// create
auto const sleDest =
ctx.view.read(keylet::account(ctx.tx.getAccountID(sfDestination)));
if (!sleDest)
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: Destination account does not exist.";
return tecNO_DST;
}
auto const flags = sleDest->getFlags();
if ((flags & lsfRequireDestTag) && !ctx.tx[~sfDestinationTag])
return tecDST_TAG_NEEDED;
if (ctx.tx.getFieldU32(sfFrequency) <= 0)
{
JLOG(ctx.j.trace())
<< "SubscriptionSet: The frequency is less than or equal to 0.";
return temMALFORMED;
}
}
if (!isXRP(amount))
{
if (auto const ret = std::visit(
[&]<typename T>(T const&) {
return canTransferTokenHelper<T>(
ctx.view, account, dest, amount, ctx.j);
},
amount.asset().value());
!isTesSuccess(ret))
return ret;
}
return tesSUCCESS;
}
TER
SubscriptionSet::doApply()
{
Sandbox sb(&ctx_.view());
AccountID const account = ctx_.tx.getAccountID(sfAccount);
auto const sleAccount = sb.peek(keylet::account(account));
if (!sleAccount)
{
JLOG(ctx_.journal.trace())
<< "SubscriptionSet: Account does not exist.";
return tecINTERNAL;
}
if (ctx_.tx.isFieldPresent(sfSubscriptionID))
{
// update
auto sle = sb.peek(
keylet::subscription(ctx_.tx.getFieldH256(sfSubscriptionID)));
sle->setFieldAmount(sfAmount, ctx_.tx.getFieldAmount(sfAmount));
if (ctx_.tx.isFieldPresent(sfExpiration))
{
auto const currentTime =
sb.info().parentCloseTime.time_since_epoch().count();
auto const expiration = ctx_.tx.getFieldU32(sfExpiration);
if (expiration < currentTime)
{
JLOG(ctx_.journal.trace())
<< "SubscriptionSet: The expiration time is in the past.";
return temBAD_EXPIRATION;
}
sle->setFieldU32(sfExpiration, ctx_.tx.getFieldU32(sfExpiration));
}
sb.update(sle);
}
else
{
auto const currentTime =
sb.info().parentCloseTime.time_since_epoch().count();
auto startTime = currentTime;
auto nextClaimTime = currentTime;
// create
{
auto const balance = STAmount((*sleAccount)[sfBalance]).xrp();
auto const reserve =
sb.fees().accountReserve((*sleAccount)[sfOwnerCount] + 1);
if (balance < reserve)
return tecINSUFFICIENT_RESERVE;
}
AccountID const dest = ctx_.tx.getAccountID(sfDestination);
Keylet const subKeylet =
keylet::subscription(account, dest, ctx_.tx.getSeqProxy().value());
auto sle = std::make_shared<SLE>(subKeylet);
sle->setAccountID(sfAccount, account);
sle->setAccountID(sfDestination, dest);
if (ctx_.tx.isFieldPresent(sfDestinationTag))
sle->setFieldU32(
sfDestinationTag, ctx_.tx.getFieldU32(sfDestinationTag));
sle->setFieldAmount(sfAmount, ctx_.tx.getFieldAmount(sfAmount));
sle->setFieldAmount(sfBalance, ctx_.tx.getFieldAmount(sfAmount));
sle->setFieldU32(sfFrequency, ctx_.tx.getFieldU32(sfFrequency));
if (ctx_.tx.isFieldPresent(sfStartTime))
{
startTime = ctx_.tx.getFieldU32(sfStartTime);
nextClaimTime = startTime;
if (startTime < currentTime)
{
JLOG(ctx_.journal.trace())
<< "SubscriptionSet: The start time is in the past.";
return temMALFORMED;
}
}
sle->setFieldU32(sfNextClaimTime, nextClaimTime);
if (ctx_.tx.isFieldPresent(sfExpiration))
{
auto const expiration = ctx_.tx.getFieldU32(sfExpiration);
if (expiration < currentTime)
{
JLOG(ctx_.journal.trace())
<< "SubscriptionSet: The expiration time is in the past.";
return temBAD_EXPIRATION;
}
if (expiration < nextClaimTime)
{
JLOG(ctx_.journal.trace())
<< "SubscriptionSet: The expiration time is "
"less than the next claim time.";
return temBAD_EXPIRATION;
}
sle->setFieldU32(sfExpiration, expiration);
}
{
auto page = sb.dirInsert(
keylet::ownerDir(account),
subKeylet,
describeOwnerDir(account));
if (!page)
return tecDIR_FULL;
(*sle)[sfOwnerNode] = *page;
}
{
auto page = sb.dirInsert(
keylet::ownerDir(dest), subKeylet, describeOwnerDir(dest));
if (!page)
return tecDIR_FULL;
(*sle)[sfDestinationNode] = *page;
}
adjustOwnerCount(sb, sleAccount, 1, ctx_.journal);
sb.insert(sle);
}
sb.apply(ctx_.rawView());
return tesSUCCESS;
}
} // namespace ripple

View File

@@ -0,0 +1,48 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_TX_SUBSCRIPTIONSET_H_INCLUDED
#define RIPPLE_TX_SUBSCRIPTIONSET_H_INCLUDED
#include <xrpld/app/tx/detail/Transactor.h>
namespace ripple {
class SubscriptionSet : public Transactor
{
public:
static constexpr ConsequencesFactoryType ConsequencesFactory{Normal};
explicit SubscriptionSet(ApplyContext& ctx) : Transactor(ctx)
{
}
static NotTEC
preflight(PreflightContext const& ctx);
static TER
preclaim(PreclaimContext const& ctx);
TER
doApply() override;
};
} // namespace ripple
#endif // RIPPLE_TX_SUBSCRIPTIONSET_H_INCLUDED

View File

@@ -62,6 +62,9 @@
#include <xrpld/app/tx/detail/SetRegularKey.h>
#include <xrpld/app/tx/detail/SetSignerList.h>
#include <xrpld/app/tx/detail/SetTrust.h>
#include <xrpld/app/tx/detail/SubscriptionCancel.h>
#include <xrpld/app/tx/detail/SubscriptionClaim.h>
#include <xrpld/app/tx/detail/SubscriptionSet.h>
#include <xrpld/app/tx/detail/VaultClawback.h>
#include <xrpld/app/tx/detail/VaultCreate.h>
#include <xrpld/app/tx/detail/VaultDelete.h>

View File

@@ -34,21 +34,17 @@ JobQueue::Coro::Coro(
: jq_(jq)
, type_(type)
, name_(name)
, running_(false)
, coro_(
[this, fn = std::forward<F>(f)](
boost::coroutines::asymmetric_coroutine<void>::push_type&
do_yield) {
yield_ = &do_yield;
yield();
// self makes Coro alive until this function returns
std::shared_ptr<Coro> self;
if (!shouldStop())
{
self = shared_from_this();
fn(self);
}
state_ = CoroState::Finished;
cv_.notify_all();
fn(shared_from_this());
#ifndef NDEBUG
finished_ = true;
#endif
},
boost::coroutines::attributes(megabytes(1)))
{
@@ -56,36 +52,17 @@ JobQueue::Coro::Coro(
inline JobQueue::Coro::~Coro()
{
XRPL_ASSERT(
state_ != CoroState::Running,
"ripple::JobQueue::Coro::~Coro : is not running");
exiting_ = true;
// Resume the coroutine so that it has a chance to clean things up
if (state_ == CoroState::Suspended)
{
resume();
}
#ifndef NDEBUG
XRPL_ASSERT(
state_ == CoroState::Finished,
"ripple::JobQueue::Coro::~Coro : is finished");
XRPL_ASSERT(finished_, "ripple::JobQueue::Coro::~Coro : is finished");
#endif
}
inline void
JobQueue::Coro::yield()
JobQueue::Coro::yield() const
{
{
std::lock_guard lock(jq_.m_mutex);
if (shouldStop())
{
return;
}
state_ = CoroState::Suspended;
++jq_.nSuspend_;
jq_.m_suspendedCoros[this] = weak_from_this();
jq_.cv_.notify_all();
}
(*yield_)();
}
@@ -93,6 +70,11 @@ JobQueue::Coro::yield()
inline bool
JobQueue::Coro::post()
{
{
std::lock_guard lk(mutex_run_);
running_ = true;
}
// sp keeps 'this' alive
if (jq_.addJob(
type_, name_, [this, sp = shared_from_this()]() { resume(); }))
@@ -100,6 +82,9 @@ JobQueue::Coro::post()
return true;
}
// The coroutine will not run. Clean up running_.
std::lock_guard lk(mutex_run_);
running_ = false;
cv_.notify_all();
return false;
}
@@ -109,17 +94,11 @@ JobQueue::Coro::resume()
{
{
std::lock_guard lk(mutex_run_);
if (state_ != CoroState::Suspended)
{
return;
}
state_ = CoroState::Running;
running_ = true;
}
{
std::lock_guard lock(jq_.m_mutex);
jq_.m_suspendedCoros.erase(this);
--jq_.nSuspend_;
jq_.cv_.notify_all();
}
auto saved = detail::getLocalValues().release();
detail::getLocalValues().reset(&lvs_);
@@ -130,6 +109,9 @@ JobQueue::Coro::resume()
coro_();
detail::getLocalValues().release();
detail::getLocalValues().reset(saved);
std::lock_guard lk(mutex_run_);
running_ = false;
cv_.notify_all();
}
inline bool
@@ -138,11 +120,32 @@ JobQueue::Coro::runnable() const
return static_cast<bool>(coro_);
}
inline void
JobQueue::Coro::expectEarlyExit()
{
#ifndef NDEBUG
if (!finished_)
#endif
{
// expectEarlyExit() must only ever be called from outside the
// Coro's stack. It you're inside the stack you can simply return
// and be done.
//
// That said, since we're outside the Coro's stack, we need to
// decrement the nSuspend that the Coro's call to yield caused.
std::lock_guard lock(jq_.m_mutex);
--jq_.nSuspend_;
#ifndef NDEBUG
finished_ = true;
#endif
}
}
inline void
JobQueue::Coro::join()
{
std::unique_lock<std::mutex> lk(mutex_run_);
cv_.wait(lk, [this]() { return state_ != CoroState::Running; });
cv_.wait(lk, [this]() { return running_ == false; });
}
} // namespace ripple

View File

@@ -60,22 +60,20 @@ public:
/** Coroutines must run to completion. */
class Coro : public std::enable_shared_from_this<Coro>
{
friend class JobQueue;
private:
enum class CoroState { None, Suspended, Running, Finished };
std::atomic_bool exiting_ = false;
detail::LocalValues lvs_;
JobQueue& jq_;
JobType type_;
std::string name_;
std::atomic<CoroState> state_ = CoroState::None;
bool running_;
std::mutex mutex_;
std::mutex mutex_run_;
std::condition_variable cv_;
boost::coroutines::asymmetric_coroutine<void>::pull_type coro_;
boost::coroutines::asymmetric_coroutine<void>::push_type* yield_;
#ifndef NDEBUG
bool finished_ = false;
#endif
public:
// Private: Used in the implementation
@@ -99,7 +97,7 @@ public:
post.
*/
void
yield();
yield() const;
/** Schedule coroutine execution.
Effects:
@@ -133,13 +131,13 @@ public:
bool
runnable() const;
/** Once called, the Coro allows early exit without an assert. */
void
expectEarlyExit();
/** Waits until coroutine returns from the user function. */
void
join();
/** Returns true if the coroutine should stop executing */
bool
shouldStop() const;
};
using JobFunction = std::function<void()>;
@@ -169,10 +167,6 @@ public:
bool
addJob(JobType type, std::string const& name, JobHandler&& jobHandler)
{
if (!accepting_)
{
return false;
}
if (auto optionalCountedJob =
jobCounter_.wrap(std::forward<JobHandler>(jobHandler)))
{
@@ -255,7 +249,6 @@ private:
std::uint64_t m_lastJob;
std::set<Job> m_jobSet;
JobCounter jobCounter_;
std::atomic_bool accepting_ = true;
std::atomic_bool stopping_{false};
std::atomic_bool stopped_{false};
JobDataMap m_jobData;
@@ -267,8 +260,6 @@ private:
// The number of suspended coroutines
int nSuspend_ = 0;
std::map<void*, std::weak_ptr<Coro>> m_suspendedCoros;
Workers m_workers;
// Statistics tracking
@@ -279,25 +270,6 @@ private:
std::condition_variable cv_;
void
onStopResumeCoros(std::map<void*, std::weak_ptr<Coro>>& coros)
{
for (auto& [_, coro] : coros)
{
if (auto coroPtr = coro.lock())
{
if (auto optionalCountedJob =
jobCounter_.wrap([=]() { coroPtr->resume(); }))
{
addRefCountedJob(
coroPtr->type_,
coroPtr->name_,
std::move(*optionalCountedJob));
}
}
}
}
void
collect();
JobTypeData&
@@ -440,10 +412,6 @@ template <class F>
std::shared_ptr<JobQueue::Coro>
JobQueue::postCoro(JobType t, std::string const& name, F&& f)
{
if (!accepting_)
{
return nullptr;
}
/* First param is a detail type to make construction private.
Last param is the function the coroutine runs. Signature of
void(std::shared_ptr<Coro>).
@@ -454,6 +422,7 @@ JobQueue::postCoro(JobType t, std::string const& name, F&& f)
{
// The Coro was not successfully posted. Disable it so it's destructor
// can run with no negative side effects. Then destroy it.
coro->expectEarlyExit();
coro.reset();
}
return coro;

View File

@@ -26,12 +26,6 @@
namespace ripple {
bool
JobQueue::Coro::shouldStop() const
{
return jq_.stopping_ || jq_.stopped_ || !jq_.accepting_ || exiting_;
}
JobQueue::JobQueue(
int threadCount,
beast::insight::Collector::ptr const& collector,
@@ -301,22 +295,6 @@ JobQueue::getJobTypeData(JobType type)
void
JobQueue::stop()
{
// Once we stop accepting new jobs, all running coroutines won't be able to
// get suspended and yield() will return immediately, so we can safely
// move m_suspendedCoros, and we can assume that no coroutine will be
// suspended in the future.
std::map<void*, std::weak_ptr<Coro>> suspendedCoros;
{
std::unique_lock lock(m_mutex);
accepting_ = false;
suspendedCoros = std::move(m_suspendedCoros);
}
if (!suspendedCoros.empty())
{
// We should resume the suspended coroutines so that the coroutines
// get a chance to exit cleanly.
onStopResumeCoros(suspendedCoros);
}
stopping_ = true;
using namespace std::chrono_literals;
jobCounter_.join("JobQueue", 1s, m_journal);
@@ -327,9 +305,8 @@ JobQueue::stop()
// `Job::doJob` and the return of `JobQueue::processTask`. That is why
// we must wait on the condition variable to make these assertions.
std::unique_lock<std::mutex> lock(m_mutex);
cv_.wait(lock, [this] {
return m_processCount == 0 && nSuspend_ == 0 && m_jobSet.empty();
});
cv_.wait(
lock, [this] { return m_processCount == 0 && m_jobSet.empty(); });
XRPL_ASSERT(
m_processCount == 0,
"ripple::JobQueue::stop : all processes completed");

View File

@@ -681,6 +681,32 @@ parseXChainOwnedCreateAccountClaimID(
return keylet.key;
}
static Expected<uint256, Json::Value>
parseSubscription(Json::Value const& params, Json::StaticString const fieldName)
{
if (!params.isObject())
{
return parseObjectID(params, fieldName);
}
auto const account = LedgerEntryHelpers::requiredAccountID(
params, jss::account, "malformedAccount");
if (!account)
return Unexpected(account.error());
auto const destination = LedgerEntryHelpers::requiredAccountID(
params, jss::destination, "malformedDestination");
if (!destination)
return Unexpected(destination.error());
auto const seq = LedgerEntryHelpers::requiredUInt32(
params, jss::seq, "malformedRequest");
if (!seq)
return Unexpected(seq.error());
return keylet::subscription(*account, *destination, *seq).key;
}
using FunctionType = Expected<uint256, Json::Value> (*)(
Json::Value const&,
Json::StaticString const);

View File

@@ -128,17 +128,21 @@ doRipplePathFind(RPC::JsonContext& context)
// May 2017
jvResult = context.app.getPathRequests().makeLegacyPathRequest(
request,
[coro = context.coro]() {
// Capturing the shared_ptr keeps the coroutine alive up
[&context]() {
// Copying the shared_ptr keeps the coroutine alive up
// through the return. Otherwise the storage under the
// captured reference could evaporate when we return from
// coro->post().
// When post() failed, we won't get a thread to let
// the Coro finish. We should ignore the coroutine and
// let it destruct, as the JobQueu has been signaled to
// close, and resuming it manually messes up the internal
// state in JobQueue.
coro->post();
// coroCopy->resume(). This is not strictly necessary, but
// will make maintenance easier.
std::shared_ptr<JobQueue::Coro> coroCopy{context.coro};
if (!coroCopy->post())
{
// The post() failed, so we won't get a thread to let
// the Coro finish. We'll call Coro::resume() so the
// Coro can finish on our thread. Otherwise the
// application will hang on shutdown.
coroCopy->resume();
}
},
context.consumer,
lpLedger,
@@ -146,14 +150,6 @@ doRipplePathFind(RPC::JsonContext& context)
if (request)
{
context.coro->yield();
// Each time after we resume from yield(), we should
// check if cancellation has been requested. It would
// be a lot more elegant if we replace boost coroutine
// with c++ standard coroutine.
if (context.coro->shouldStop())
{
return jvResult;
}
jvResult = request->doStatus(context.params);
}