Compare commits

...

12 Commits

Author SHA1 Message Date
RichardAH
8329d78f32 Update src/ripple/app/tx/impl/Import.cpp
Co-authored-by: tequ <git@tequ.dev>
2025-12-21 13:42:46 +10:00
RichardAH
bf4579c1d1 Update src/ripple/app/tx/impl/Change.cpp
Co-authored-by: tequ <git@tequ.dev>
2025-12-21 13:42:37 +10:00
RichardAH
73e099eb23 Update src/ripple/app/hook/impl/applyHook.cpp
Co-authored-by: tequ <git@tequ.dev>
2025-12-21 13:42:29 +10:00
RichardAH
2e311b4259 Update src/ripple/app/hook/applyHook.h
Co-authored-by: tequ <git@tequ.dev>
2025-12-21 13:42:20 +10:00
RichardAH
7c8e940091 Merge branch 'dev' into export 2025-12-19 13:27:02 +10:00
Richard Holland
9b90c50789 featureExport compiling, untested 2025-12-19 14:19:17 +11:00
Niq Dudfield
5a118a4e2b fix(logs): formatting fixes, color handling, and debug build defaults (#607) 2025-12-17 09:45:41 +10:00
tequ
960f87857e Self hosted macos runner (#652) 2025-12-17 09:43:25 +10:00
tequ
f731bcfeba Increase ccache size from 10G to 100G in release-builder.sh for improved build performance (#643) 2025-12-16 14:45:45 +10:00
Richard Holland
a18e2cb2c6 remainder of the export feature... untested uncompiled 2025-12-14 19:04:37 +11:00
Richard Holland
be5f425122 change symbol name to xport 2025-12-14 13:27:44 +11:00
Richard Holland
fc6f4762da export hook apis, untested 2025-12-13 15:46:08 +11:00
37 changed files with 797 additions and 178 deletions

View File

@@ -75,37 +75,17 @@ runs:
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
- name: Restore ccache directory for main branch
if: inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true'
id: ccache-restore
uses: ./.github/actions/xahau-ga-cache-restore
with:
path: ~/.ccache-main
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
cache-type: ccache-main
- name: Restore ccache directory for current branch
if: inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
id: ccache-restore-current-branch
uses: ./.github/actions/xahau-ga-cache-restore
with:
path: ~/.ccache-current
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
cache-type: ccache-current
- name: Configure ccache
if: inputs.ccache_enabled == 'true'
shell: bash
run: |
# Create cache directories
mkdir -p ~/.ccache-main ~/.ccache-current
mkdir -p ~/.ccache-cache
# Keep config separate from cache_dir so configs aren't swapped when CCACHE_DIR changes between steps
mkdir -p ~/.config/ccache
export CCACHE_CONFIGPATH="$HOME/.config/ccache/ccache.conf"
echo "CCACHE_CONFIGPATH=$CCACHE_CONFIGPATH" >> $GITHUB_ENV
# Keep config separate from cache_dir so configs aren't swapped when CCACHE_DIR changes between steps
mkdir -p ~/.config/ccache
@@ -116,20 +96,9 @@ runs:
ccache --set-config=max_size=${{ inputs.ccache_max_size }}
ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
ccache --set-config=compiler_check=${{ inputs.ccache_compiler_check }}
# Determine if we're on the main branch
if [ "${{ steps.safe-branch.outputs.name }}" = "${{ inputs.main_branch }}" ]; then
# Main branch: use main branch cache only
ccache --set-config=cache_dir="$HOME/.ccache-main"
echo "CCACHE_DIR=$HOME/.ccache-main" >> $GITHUB_ENV
echo "📦 Main branch: using ~/.ccache-main"
else
# Feature branch: use current branch cache with main as secondary (read-only fallback)
ccache --set-config=cache_dir="$HOME/.ccache-current"
ccache --set-config=secondary_storage="file:$HOME/.ccache-main"
echo "CCACHE_DIR=$HOME/.ccache-current" >> $GITHUB_ENV
echo "📦 Feature branch: using ~/.ccache-current with ~/.ccache-main as secondary"
fi
ccache --set-config=cache_dir="$HOME/.ccache-cache"
echo "CCACHE_DIR=$HOME/.ccache-cache" >> $GITHUB_ENV
echo "📦 using ~/.ccache-cache as ccache cache directory"
# Print config for verification
echo "=== ccache configuration ==="
@@ -244,17 +213,3 @@ runs:
if: inputs.ccache_enabled == 'true'
shell: bash
run: ccache -s
- name: Save ccache directory for main branch
if: success() && inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache-main
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
- name: Save ccache directory for current branch
if: success() && inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache-current
key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }}

View File

@@ -17,10 +17,6 @@ inputs:
description: 'Cache version for invalidation'
required: false
default: '1'
gha_cache_enabled:
description: 'Whether to use actions/cache (disable for self-hosted with volume mounts)'
required: false
default: 'true'
main_branch:
description: 'Main branch name for restore keys'
required: false
@@ -63,18 +59,14 @@ outputs:
runs:
using: 'composite'
steps:
- name: Restore Conan cache
if: inputs.gha_cache_enabled == 'true'
id: cache-restore-conan
uses: ./.github/actions/xahau-ga-cache-restore
with:
path: ~/.conan2
# Note: compiler-id format is compiler-version-stdlib[-gccversion]
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-${{ inputs.configuration }}
restore-keys: |
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
cache-type: Conan
- name: Configure Conan cache paths
if: inputs.os == 'Linux'
shell: bash
run: |
mkdir -p /.conan-cache/conan2 /.conan-cache/conan2_download /.conan-cache/conan2_sources
echo 'core.cache:storage_path=/.conan-cache/conan2' > ~/.conan2/global.conf
echo 'core.download:download_cache=/.conan-cache/conan2_download' >> ~/.conan2/global.conf
echo 'core.sources:download_cache=/.conan-cache/conan2_sources' >> ~/.conan2/global.conf
- name: Configure Conan cache paths
if: inputs.gha_cache_enabled == 'false'
@@ -161,10 +153,3 @@ runs:
--build missing \
--settings build_type=${{ inputs.configuration }} \
..
- name: Save Conan cache
if: success() && inputs.gha_cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
path: ~/.conan2
key: ${{ steps.cache-restore-conan.outputs.cache-primary-key }}

View File

@@ -20,7 +20,7 @@ jobs:
- Ninja
configuration:
- Debug
runs-on: macos-15
runs-on: [self-hosted, macOS]
env:
build_dir: .build
# Bump this number to invalidate all caches globally.
@@ -30,61 +30,29 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Get commit message
id: get-commit-message
uses: ./.github/actions/xahau-ga-get-commit-message
with:
event-name: ${{ github.event_name }}
head-commit-message: ${{ github.event.head_commit.message }}
pr-head-sha: ${{ github.event.pull_request.head.sha }}
- name: Install Conan
- name: Add Homebrew to PATH
run: |
brew install conan
# Verify Conan 2 is installed
conan --version
echo "/opt/homebrew/bin" >> "$GITHUB_PATH"
echo "/opt/homebrew/sbin" >> "$GITHUB_PATH"
- name: Install Coreutils
run: |
brew install coreutils
echo "Num proc: $(nproc)"
- name: Install Ninja
if: matrix.generator == 'Ninja'
run: brew install ninja
# To isolate environments for each Runner, instead of installing globally with brew,
# use mise to isolate environments for each Runner directory.
- name: Setup toolchain (mise)
uses: jdx/mise-action@v2
with:
install: true
- name: Install Python
- name: Install tools via mise
run: |
if which python3 > /dev/null 2>&1; then
echo "Python 3 executable exists"
python3 --version
else
brew install python@3.12
fi
# Create 'python' symlink if it doesn't exist (for tools expecting 'python')
if ! which python > /dev/null 2>&1; then
sudo ln -sf $(which python3) /usr/local/bin/python
fi
- name: Install CMake
run: |
# Install CMake 3.x to match local dev environments
# With Conan 2 and the policy args passed to CMake, newer versions
# can have issues with dependencies that require cmake_minimum_required < 3.5
brew uninstall cmake --ignore-dependencies 2>/dev/null || true
# Download and install CMake 3.31.7 directly
curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz
tar -xzf cmake.tar.gz
# Move the entire CMake.app to /Applications
sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/
echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH
/Applications/CMake.app/Contents/bin/cmake --version
- name: Install ccache
run: brew install ccache
mise install
mise use cmake@3.23.1 python@3.12 pipx@latest conan@2 ninja@latest ccache@latest
mise reshim
echo "$HOME/.local/share/mise/shims" >> "$GITHUB_PATH"
- name: Check environment
run: |
@@ -98,6 +66,14 @@ jobs:
echo "---- Full Environment ----"
env
- name: Get commit message
id: get-commit-message
uses: ./.github/actions/xahau-ga-get-commit-message
with:
event-name: ${{ github.event_name }}
head-commit-message: ${{ github.event.head_commit.message }}
pr-head-sha: ${{ github.event.pull_request.head.sha }}
- name: Detect compiler version
id: detect-compiler
run: |
@@ -129,6 +105,7 @@ jobs:
cache_version: ${{ env.CACHE_VERSION }}
main_branch: ${{ env.MAIN_BRANCH_NAME }}
stdlib: libcxx
ccache_max_size: '100G'
- name: Test
run: |

View File

@@ -181,8 +181,7 @@ jobs:
image: ubuntu:24.04
volumes:
- /home/runner/.conan-cache:/.conan-cache
- /home/runner/.ccache-main:/github/home/.ccache-main
- /home/runner/.ccache-current:/github/home/.ccache-current
- /home/runner/.ccache-cache:/github/home/.ccache-cache
defaults:
run:
shell: bash
@@ -325,7 +324,6 @@ jobs:
main_branch: ${{ env.MAIN_BRANCH_NAME }}
stdlib: ${{ matrix.stdlib }}
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
gha_cache_enabled: 'false' # Disable caching for self hosted runner
ccache_max_size: '100G'
- name: Set artifact name

View File

@@ -48,13 +48,9 @@ target_sources (xrpl_core PRIVATE
src/ripple/beast/net/impl/IPAddressV6.cpp
src/ripple/beast/net/impl/IPEndpoint.cpp
src/ripple/beast/utility/src/beast_Journal.cpp
src/ripple/beast/utility/src/beast_PropertyStream.cpp)
# Conditionally add enhanced logging source when BEAST_ENHANCED_LOGGING is enabled
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
target_sources(xrpl_core PRIVATE
src/ripple/beast/utility/src/beast_EnhancedLogging.cpp)
endif()
src/ripple/beast/utility/src/beast_PropertyStream.cpp
# Enhanced logging - compiles to empty when BEAST_ENHANCED_LOGGING is not defined
src/ripple/beast/utility/src/beast_EnhancedLogging.cpp)
#[===============================[
core sources
@@ -162,12 +158,16 @@ target_link_libraries (xrpl_core
date::date
Ripple::opts)
# Link date-tz library when enhanced logging is enabled
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
if(TARGET date::date-tz)
target_link_libraries(xrpl_core PUBLIC date::date-tz)
endif()
# date-tz for enhanced logging (always linked, code is #ifdef guarded)
if(TARGET date::date-tz)
target_link_libraries(xrpl_core PUBLIC date::date-tz)
endif()
# BEAST_ENHANCED_LOGGING: enable for Debug builds OR when explicitly requested
# Uses generator expression so it works with multi-config generators (Xcode, VS, Ninja Multi-Config)
target_compile_definitions(xrpl_core PUBLIC
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
)
#[=================================[
main/core headers installation
#]=================================]

View File

@@ -37,20 +37,11 @@ endif() #git
set(SOURCE_ROOT_PATH "${CMAKE_CURRENT_SOURCE_DIR}/src/")
add_definitions(-DSOURCE_ROOT_PATH="${SOURCE_ROOT_PATH}")
# BEAST_ENHANCED_LOGGING option - adds file:line numbers and formatting to logs
# Default to ON for Debug builds, OFF for Release
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" ON)
else()
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" OFF)
endif()
if(BEAST_ENHANCED_LOGGING)
add_definitions(-DBEAST_ENHANCED_LOGGING=1)
message(STATUS "Log line numbers enabled")
else()
message(STATUS "Log line numbers disabled")
endif()
# BEAST_ENHANCED_LOGGING - adds file:line numbers and formatting to logs
# Automatically enabled for Debug builds via generator expression
# Can be explicitly controlled with -DBEAST_ENHANCED_LOGGING=ON/OFF
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages (auto: Debug=ON, Release=OFF)" OFF)
message(STATUS "BEAST_ENHANCED_LOGGING option: ${BEAST_ENHANCED_LOGGING}")
if(thread_safety_analysis)
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)

View File

@@ -192,7 +192,7 @@ ENV PATH=/usr/local/bin:$PATH
# Configure ccache and Conan 2
# NOTE: Using echo commands instead of heredocs because heredocs in Docker RUN commands are finnicky
RUN /hbb_exe/activate-exec bash -c "ccache -M 10G && \
RUN /hbb_exe/activate-exec bash -c "ccache -M 100G && \
ccache -o cache_dir=/cache/ccache && \
ccache -o compiler_check=content && \
mkdir -p ~/.conan2 /cache/conan2 /cache/conan2_download /cache/conan2_sources && \

View File

@@ -350,7 +350,10 @@ enum hook_return_code : int64_t {
MEM_OVERLAP = -43, // one or more specified buffers are the same memory
TOO_MANY_STATE_MODIFICATIONS = -44, // more than 5000 modified state
// entires in the combined hook chains
TOO_MANY_NAMESPACES = -45
TOO_MANY_NAMESPACES = -45,
EXPORT_FAILURE = -46,
TOO_MANY_EXPORTED_TXN = -47,
};
enum ExitType : uint8_t {
@@ -364,6 +367,7 @@ const uint16_t max_state_modifications = 256;
const uint8_t max_slots = 255;
const uint8_t max_nonce = 255;
const uint8_t max_emit = 255;
const uint8_t max_export = 4;
const uint8_t max_params = 16;
const double fee_base_multiplier = 1.1f;
@@ -469,6 +473,13 @@ static const APIWhitelist import_whitelist_1{
// clang-format on
};
static const APIWhitelist import_whitelist_2{
// clang-format off
HOOK_API_DEFINITION(I64, xport, (I32, I32)),
HOOK_API_DEFINITION(I64, xport_reserve, (I32)),
// clang-format on
};
#undef HOOK_API_DEFINITION
#undef I32
#undef I64

View File

@@ -1034,6 +1034,12 @@ validateGuards(
{
// PASS, this is a version 1 api
}
else if (rulesVersion & 0x04U &&
hook_api::import_whitelist_2.find(import_name) !=
hook_api::import_whitelist_2.end())
{
// PASS, this is an export api
}
else
{
GUARDLOG(hook::log::IMPORT_ILLEGAL)

View File

@@ -406,6 +406,17 @@ DECLARE_HOOK_FUNCTION(
uint32_t slot_no_tx,
uint32_t slot_no_meta);
DECLARE_HOOK_FUNCTION(
int64_t,
xport,
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len);
DECLARE_HOOK_FUNCTION(
int64_t,
xport_reserve,
uint32_t count);
/*
DECLARE_HOOK_FUNCTION(int64_t, str_find, uint32_t hread_ptr,
uint32_t hread_len, uint32_t nread_ptr, uint32_t nread_len, uint32_t mode,
@@ -485,6 +496,8 @@ struct HookResult
std::queue<std::shared_ptr<ripple::Transaction>>
emittedTxn{}; // etx stored here until accept/rollback
std::queue<std::shared_ptr<ripple::Transaction>>
exportedTxn{};
HookStateMap& stateMap;
uint16_t changedStateCount = 0;
std::map<
@@ -541,6 +554,7 @@ struct HookContext
uint16_t ledger_nonce_counter{0};
int64_t expected_etxn_count{-1}; // make this a 64bit int so the uint32
// from the hookapi cant overflow it
int64_t expected_export_count{-1};
std::map<ripple::uint256, bool> nonce_used{};
uint32_t generation =
0; // used for caching, only generated when txn_generation is called
@@ -877,6 +891,9 @@ public:
ADD_HOOK_FUNCTION(meta_slot, ctx);
ADD_HOOK_FUNCTION(xpop_slot, ctx);
ADD_HOOK_FUNCTION(xport, ctx);
ADD_HOOK_FUNCTION(xport_reserve, ctx);
/*
ADD_HOOK_FUNCTION(str_find, ctx);
ADD_HOOK_FUNCTION(str_replace, ctx);

View File

@@ -79,7 +79,7 @@ main(int argc, char** argv)
close(fd);
auto result = validateGuards(hook, std::cout, "", 3);
auto result = validateGuards(hook, std::cout, "", 7);
if (!result)
{

View File

@@ -1971,6 +1971,8 @@ hook::finalizeHookResult(
// directory) if we are allowed to
std::vector<std::pair<uint256 /* txnid */, uint256 /* emit nonce */>>
emission_txnid;
std::vector<uint256 /* txnid */>
exported_txnid;
if (doEmit)
{
@@ -2026,6 +2028,58 @@ hook::finalizeHookResult(
}
}
}
DBG_PRINTF("exported txn count: %d\n", hookResult.exportedTxn.size());
for (; hookResult.exportedTxn.size() > 0; hookResult.exportedTxn.pop())
{
auto& tpTrans = hookResult.exportedTxn.front();
auto& id = tpTrans->getID();
JLOG(j.trace()) << "HookExport[" << HR_ACC() << "]: " << id;
// exported txns must be marked bad by the hash router to ensure under
// no circumstances they will enter consensus on *this* chain.
applyCtx.app.getHashRouter().setFlags(id, SF_BAD);
std::shared_ptr<const ripple::STTx> ptr =
tpTrans->getSTransaction();
auto exportedId = keylet::exportedTxn(id);
auto sleExported = applyCtx.view().peek(exportedId);
if (!sleExported)
{
exported_txnid.emplace_back(id);
sleExported = std::make_shared<SLE>(exportedId);
// RH TODO: add a new constructor to STObject to avoid this
// serder thing
ripple::Serializer s;
ptr->add(s);
SerialIter sit(s.slice());
sleExported->emplace_back(ripple::STObject(sit, sfExportedTxn));
auto page = applyCtx.view().dirInsert(
keylet::exportedDir(), exportedId, [&](SLE::ref sle) {
(*sle)[sfFlags] = lsfEmittedDir;
});
if (page)
{
(*sleExported)[sfOwnerNode] = *page;
applyCtx.view().insert(sleExported);
}
else
{
JLOG(j.warn())
<< "HookError[" << HR_ACC() << "]: "
<< "Export Directory full when trying to insert "
<< id;
return tecDIR_FULL;
}
}
}
}
bool const fixV2 = applyCtx.view().rules().enabled(fixXahauV2);
@@ -2052,6 +2106,12 @@ hook::finalizeHookResult(
meta.setFieldU16(
sfHookEmitCount,
emission_txnid.size()); // this will never wrap, hard limit
if (applyCtx.view().rules().enabled(featureExport))
{
meta.setFieldU16(
sfHookExportCount,
exported_txnid.size());
}
meta.setFieldU16(sfHookExecutionIndex, exec_index);
meta.setFieldU16(sfHookStateChangeCount, hookResult.changedStateCount);
meta.setFieldH256(sfHookHash, hookResult.hookHash);
@@ -3888,6 +3948,27 @@ DEFINE_HOOK_FUNCTION(int64_t, etxn_reserve, uint32_t count)
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(int64_t, xport_reserve, uint32_t count)
{
HOOK_SETUP(); // populates memory_ctx, memory, memory_length, applyCtx,
// hookCtx on current stack
if (hookCtx.expected_export_count > -1)
return ALREADY_SET;
if (count < 1)
return TOO_SMALL;
if (count > hook_api::max_export)
return TOO_BIG;
hookCtx.expected_export_count = count;
return count;
HOOK_TEARDOWN();
}
// Compute the burden of an emitted transaction based on a number of factors
DEFINE_HOOK_FUNCNARG(int64_t, etxn_burden)
{
@@ -6156,6 +6237,92 @@ DEFINE_HOOK_FUNCTION(
HOOK_TEARDOWN();
}
DEFINE_HOOK_FUNCTION(
int64_t,
xport,
uint32_t write_ptr,
uint32_t write_len,
uint32_t read_ptr,
uint32_t read_len)
{
HOOK_SETUP();
if (NOT_IN_BOUNDS(read_ptr, read_len, memory_length))
return OUT_OF_BOUNDS;
if (NOT_IN_BOUNDS(write_ptr, write_len, memory_length))
return OUT_OF_BOUNDS;
if (write_len < 32)
return TOO_SMALL;
auto& app = hookCtx.applyCtx.app;
if (hookCtx.expected_export_count < 0)
return PREREQUISITE_NOT_MET;
if (hookCtx.result.exportedTxn.size() >= hookCtx.expected_export_count)
return TOO_MANY_EXPORTED_TXN;
ripple::Blob blob{memory + read_ptr, memory + read_ptr + read_len};
std::shared_ptr<STTx const> stpTrans;
try
{
stpTrans = std::make_shared<STTx const>(
SerialIter{memory + read_ptr, read_len});
}
catch (std::exception& e)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC() << "]: Failed " << e.what()
<< "\n";
return EXPORT_FAILURE;
}
if (!stpTrans->isFieldPresent(sfAccount) ||
stpTrans->getAccountID(sfAccount) != hookCtx.result.account)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC()
<< "]: Attempted to export a txn that's not for this Hook's Account ID.";
return EXPORT_FAILURE;
}
std::string reason;
auto tpTrans = std::make_shared<Transaction>(stpTrans, reason, app);
// RHTODO: is this needed or wise? VVV
if (tpTrans->getStatus() != NEW)
{
JLOG(j.trace()) << "HookExport[" << HC_ACC()
<< "]: tpTrans->getStatus() != NEW";
return EXPORT_FAILURE;
}
auto const& txID = tpTrans->getID();
if (txID.size() > write_len)
return TOO_SMALL;
if (NOT_IN_BOUNDS(write_ptr, txID.size(), memory_length))
return OUT_OF_BOUNDS;
auto const write_txid = [&]() -> int64_t {
WRITE_WASM_MEMORY_AND_RETURN(
write_ptr,
txID.size(),
txID.data(),
txID.size(),
memory,
memory_length);
};
int64_t result = write_txid();
if (result == 32)
hookCtx.result.exportedTxn.push(tpTrans);
return result;
HOOK_TEARDOWN();
}
/*
DEFINE_HOOK_FUNCTION(

View File

@@ -599,6 +599,13 @@ public:
return validatorKeys_.publicKey;
}
ValidatorKeys const&
getValidatorKeys() const override
{
return validatorKeys_;
}
NetworkOPs&
getOPs() override
{

View File

@@ -240,7 +240,8 @@ public:
virtual PublicKey const&
getValidationPublicKey() const = 0;
virtual ValidatorKeys const&
getValidatorKeys() const = 0;
virtual Resource::Manager&
getResourceManager() = 0;
virtual PathRequests&

View File

@@ -27,6 +27,8 @@
#include <ripple/protocol/Feature.h>
#include <ripple/protocol/jss.h>
#include <ripple/protocol/st.h>
#include <ripple/app/misc/ValidatorKeys.h>
#include <ripple/protocol/Sign.h>
#include <algorithm>
#include <limits>
#include <numeric>
@@ -1539,6 +1541,247 @@ TxQ::accept(Application& app, OpenView& view)
}
}
// Inject exported transactions/signatures, if any
if (view.rules().enabled(featureExport))
{
do
{
// if we're not a validator we do nothing here
if (app.getValidationPublicKey().empty())
break;
auto const& keys = app.getValidatorKeys();
if (keys.configInvalid())
break;
// and if we're not on the UNLReport we also do nothing
auto const unlRep = view.read(keylet::UNLReport());
if (!unlRep || !unlRep->isFieldPresent(sfActiveValidators))
{
// nothing to do without a unlreport object
break;
}
bool found = false;
auto const& avs = unlRep->getFieldArray(sfActiveValidators);
for (auto const& av : avs)
{
if (PublicKey(av[sfPublicKey]) == keys.masterPublicKey)
{
found = true;
break;
}
}
if (!found)
break;
// execution to here means we're a validator and on the UNLReport
AccountID signingAcc = calcAccountID(keys.publicKey);
Keylet const exportedDirKeylet{keylet::exportedDir()};
if (dirIsEmpty(view, exportedDirKeylet))
break;
std::shared_ptr<SLE const> sleDirNode{};
unsigned int uDirEntry{0};
uint256 dirEntry{beast::zero};
if (!cdirFirst(
view,
exportedDirKeylet.key,
sleDirNode,
uDirEntry,
dirEntry))
break;
do
{
Keylet const itemKeylet{ltCHILD, dirEntry};
auto sleItem = view.read(itemKeylet);
if (!sleItem)
{
// Directory node has an invalid index. Bail out.
JLOG(j_.warn())
<< "ExportedTxn processing: directory node in ledger "
<< view.seq()
<< " has index to object that is missing: "
<< to_string(dirEntry);
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
LedgerEntryType const nodeType{
safe_cast<LedgerEntryType>((*sleItem)[sfLedgerEntryType])};
if (nodeType != ltEXPORTED_TXN)
{
JLOG(j_.warn())
<< "ExportedTxn processing: emitted directory contained "
"non ltEMITTED_TXN type";
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
JLOG(j_.info()) << "Processing exported txn: " << *sleItem;
auto const& exported =
const_cast<ripple::STLedgerEntry&>(*sleItem)
.getField(sfExportedTxn)
.downcast<STObject>();
auto const& txnHash = sleItem->getFieldH256(sfTransactionHash);
auto exportedLgrSeq = exported.getFieldU32(sfLedgerSequence);
auto const seq = view.seq();
if (exportedLgrSeq == seq)
{
// this shouldn't happen, but do nothing
continue;
}
if (exportedLgrSeq < seq - 1)
{
// all old entries need to be turned into Export transactions so they can be removed
// from the directory
// in the previous ledger all the ExportSign transactions were executed, and one-by-one
// added the validators' signatures to the ltEXPORTED_TXN's sfSigners array.
// now we need to collect these together and place them inside the ExportedTxn blob
// and publish the blob in the Export transaction type.
STArray signers = sleItem->getFieldArray(sfSigners);
auto s = std::make_shared<ripple::Serializer>();
exported.add(*s);
SerialIter sitTrans(s->slice());
try
{
auto stpTrans =
std::make_shared<STTx>(std::ref(sitTrans));
if (!stpTrans->isFieldPresent(sfAccount) ||
stpTrans->getAccountID(sfAccount) == beast::zero)
{
JLOG(j_.warn()) << "Hook: Export failure: "
<< "sfAccount missing or zero.";
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
// RH TODO: should we force remove signingpubkey here?
stpTrans->setFieldArray(sfSigners, signers);
Blob const& blob = stpTrans->getSerializer().peekData();
STTx exportTx(ttEXPORT, [&](auto& obj) {
obj.setFieldVL(sfExportedTxn, blob);
obj.setFieldU32(sfLedgerSequence, seq);
obj.setFieldH256(sfTransactionHash, txnHash);
obj.setFieldArray(sfSigners, signers);
});
// submit to the ledger
{
uint256 txID = exportTx.getTransactionID();
auto s = std::make_shared<ripple::Serializer>();
exportTx.add(*s);
app.getHashRouter().setFlags(txID, SF_PRIVATE2);
app.getHashRouter().setFlags(txID, SF_EMITTED);
view.rawTxInsert(txID, std::move(s), nullptr);
ledgerChanged = true;
}
}
catch (std::exception& e)
{
JLOG(j_.warn())
<< "ExportedTxn Processing: Failure: " << e.what()
<< "\n";
}
continue;
}
// this ledger is the one after the exported txn was added to the directory
// so generate the export sign txns
auto s = std::make_shared<ripple::Serializer>();
exported.add(*s);
SerialIter sitTrans(s->slice());
try
{
auto const& stpTrans =
std::make_shared<STTx const>(std::ref(sitTrans));
if (!stpTrans->isFieldPresent(sfAccount) ||
stpTrans->getAccountID(sfAccount) == beast::zero)
{
JLOG(j_.warn()) << "Hook: Export failure: "
<< "sfAccount missing or zero.";
// RH TODO: if this ever happens the entry should be
// gracefully removed (somehow)
continue;
}
auto seq = view.info().seq;
auto txnHash = stpTrans->getTransactionID();
Serializer s =
buildMultiSigningData(*stpTrans, signingAcc);
auto multisig = ripple::sign(keys.publicKey, keys.secretKey, s.slice());
STTx exportSignTx(ttEXPORT_SIGN, [&](auto& obj) {
obj.set(([&]() {
auto inner = std::make_unique<STObject>(sfSigner);
inner->setFieldVL(sfSigningPubKey, keys.publicKey);
inner->setAccountID(sfAccount, signingAcc);
inner->setFieldVL(sfTxnSignature, multisig);
return inner;
})());
obj.setFieldU32(sfLedgerSequence, seq);
obj.setFieldH256(sfTransactionHash, txnHash);
});
// submit to the ledger
{
uint256 txID = exportSignTx.getTransactionID();
auto s = std::make_shared<ripple::Serializer>();
exportSignTx.add(*s);
app.getHashRouter().setFlags(txID, SF_PRIVATE2);
app.getHashRouter().setFlags(txID, SF_EMITTED);
view.rawTxInsert(txID, std::move(s), nullptr);
ledgerChanged = true;
}
}
catch (std::exception& e)
{
JLOG(j_.warn())
<< "ExportedTxn Processing: Failure: " << e.what()
<< "\n";
}
} while (cdirNext(
view, exportedDirKeylet.key, sleDirNode, uDirEntry, dirEntry));
} while (0);
}
// Inject emitted transactions if any
if (view.rules().enabled(featureHooks))
do

View File

@@ -96,6 +96,13 @@ Change::preflight(PreflightContext const& ctx)
}
}
if ((ctx.tx.getTxnType() == ttEXPORT_SIGN || ctx.tx.getTxnType() == ttEXPORT) &&
!ctx.rules.enabled(featureExport))
{
JLOG(ctx.j.warn()) << "Change: Export not enabled";
return temDISABLED;
}
return tesSUCCESS;
}
@@ -154,6 +161,8 @@ Change::preclaim(PreclaimContext const& ctx)
case ttAMENDMENT:
case ttUNL_MODIFY:
case ttEMIT_FAILURE:
case ttEXPORT:
case ttEXPORT_SIGN:
return tesSUCCESS;
case ttUNL_REPORT: {
if (!ctx.tx.isFieldPresent(sfImportVLKey) ||
@@ -209,6 +218,11 @@ Change::doApply()
return applyEmitFailure();
case ttUNL_REPORT:
return applyUNLReport();
case ttEXPORT:
return applyExport();
case ttEXPORT_SIGN:
return applyExportSign();
default:
assert(0);
return tefFAILURE;
@@ -606,7 +620,8 @@ Change::activateXahauGenesis()
loggerStream,
"rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh",
(ctx_.view().rules().enabled(featureHooksUpdate1) ? 1 : 0) +
(ctx_.view().rules().enabled(fix20250131) ? 2 : 0));
(ctx_.view().rules().enabled(fix20250131) ? 2 : 0) +
(ctx_.view().rules().enabled(featureExport) ? 4 : 0));
if (!result)
{
@@ -1072,6 +1087,80 @@ Change::applyEmitFailure()
return tesSUCCESS;
}
TER
Change::applyExport()
{
uint256 txnID(ctx_.tx.getFieldH256(sfTransactionHash));
do
{
JLOG(j_.info()) << "HookExport[" << txnID
<< "]: ttExport exporting transaction";
auto key = keylet::exportedTxn(txnID);
auto const& sle = view().peek(key);
if (!sle)
{
// most likely explanation is that this was somehow a double-up, so just ignore
JLOG(j_.warn())
<< "HookError[" << txnID << "]: ttExport could not find exported txn in ledger";
break;
}
if (!view().dirRemove(
keylet::exportedDir(),
sle->getFieldU64(sfOwnerNode),
key,
false))
{
JLOG(j_.fatal()) << "HookError[" << txnID
<< "]: ttExport (Change) tefBAD_LEDGER";
return tefBAD_LEDGER;
}
view().erase(sle);
} while (0);
return tesSUCCESS;
}
TER
Change::applyExportSign()
{
uint256 txnID(ctx_.tx.getFieldH256(sfTransactionHash));
do
{
JLOG(j_.info()) << "HookExport[" << txnID
<< "]: ttExportSign adding signature to transaction";
auto key = keylet::exportedTxn(txnID);
auto const& sle = view().peek(key);
if (!sle)
{
// most likely explanation is that this was somehow a double-up, so just ignore
JLOG(j_.warn())
<< "HookError[" << txnID << "]: ttExportSign could not find exported txn in ledger";
break;
}
// grab the signer object off the txn
STObject signerObj = const_cast<ripple::STTx&>(ctx_.tx)
.getField(sfSigner)
.downcast<STObject>();
// append it to the signers field in the ledger object
STArray signers = sle->getFieldArray(sfSigners);
signers.push_back(signerObj);
sle->setFieldArray(sfSigners, signers);
// done
view().update(sle);
} while (0);
return tesSUCCESS;
}
TER
Change::applyUNLModify()
{

View File

@@ -74,6 +74,12 @@ private:
TER
applyEmitFailure();
TER
applyExport();
TER
applyExportSign();
TER
applyUNLReport();
};

View File

@@ -37,9 +37,12 @@
#include <charconv>
#include <iostream>
#include <vector>
#include <ripple/app/hook/applyHook.h>
namespace ripple {
static const uint256 shadowTicketNamespace = uint256::fromVoid("RESERVED NAMESPACE SHADOW TICKET");
TxConsequences
Import::makeTxConsequences(PreflightContext const& ctx)
{
@@ -197,7 +200,7 @@ Import::preflight(PreflightContext const& ctx)
if (!stpTrans || !meta)
return temMALFORMED;
if (stpTrans->isFieldPresent(sfTicketSequence))
if (stpTrans->isFieldPresent(sfTicketSequence) && !ctx.rules.enabled(featureExport))
{
JLOG(ctx.j.warn()) << "Import: cannot use TicketSequence XPOP.";
return temMALFORMED;
@@ -888,6 +891,26 @@ Import::preclaim(PreclaimContext const& ctx)
return tefINTERNAL;
}
bool const hasTicket = stpTrans->isFieldPresent(sfTicketSequence);
if (hasTicket)
{
if (!ctx.view.rules().enabled(featureExport))
return tefINTERNAL;
auto const acc = stpTrans->getAccountID(sfAccount);
uint256 const seq = uint256(stpTrans->getFieldU32(sfTicketSequence));
// check if there is a shadow ticket, and if not we won't allow
// the txn to pass into consensus
if (!ctx.view.exists(keylet::hookState(acc, seq, shadowTicketNamespace)))
{
JLOG(ctx.j.warn()) << "Import: attempted to import a txn without shadow ticket.";
return telSHADOW_TICKET_REQUIRED; // tel code to avoid consensus/forward without SF_BAD
}
}
auto const& sle = ctx.view.read(keylet::account(ctx.tx[sfAccount]));
auto const tt = stpTrans->getTxnType();
@@ -928,13 +951,17 @@ Import::preclaim(PreclaimContext const& ctx)
} while (0);
}
if (sle && sle->isFieldPresent(sfImportSequence))
if (!hasTicket)
{
uint32_t sleImportSequence = sle->getFieldU32(sfImportSequence);
// replay attempt
if (sleImportSequence >= stpTrans->getFieldU32(sfSequence))
return tefPAST_IMPORT_SEQ;
if (sle && sle->isFieldPresent(sfImportSequence))
{
uint32_t sleImportSequence = sle->getFieldU32(sfImportSequence);
// replay attempt
if (sleImportSequence >= stpTrans->getFieldU32(sfSequence))
return tefPAST_IMPORT_SEQ;
}
}
// when importing for the first time the fee must be zero
@@ -1242,7 +1269,11 @@ Import::doApply()
auto const id = ctx_.tx[sfAccount];
auto sle = view().peek(keylet::account(id));
if (sle && sle->getFieldU32(sfImportSequence) >= importSequence)
std::optional<uint256> ticket;
if (stpTrans->isFieldPresent(sfTicketSequence))
ticket = uint256(stpTrans->getFieldU32(sfTicketSequence));
if (sle && !ticket.has_value() && sle->getFieldU32(sfImportSequence) >= importSequence)
{
// make double sure import seq hasn't passed
JLOG(ctx_.journal.warn()) << "Import: ImportSequence passed";
@@ -1335,8 +1366,24 @@ Import::doApply()
}
}
sle->setFieldU32(sfImportSequence, importSequence);
if (!ticket.has_value())
sle->setFieldU32(sfImportSequence, importSequence);
sle->setFieldAmount(sfBalance, finalBal);
if (ticket.has_value())
{
auto sleTicket = view().peek(keylet::hookState(id, *ticket, shadowTicketNamespace));
if (!sleTicket)
return tefINTERNAL;
TER result = hook::setHookState(ctx_, id, shadowTicketNamespace, *ticket, {});
if (result != tesSUCCESS)
return result;
// RHUPTO: ticketseq billing?
}
if (create)
{

View File

@@ -491,7 +491,8 @@ SetHook::validateHookSetEntry(SetHookCtx& ctx, STObject const& hookSetObj)
logger,
hsacc,
(ctx.rules.enabled(featureHooksUpdate1) ? 1 : 0) +
(ctx.rules.enabled(fix20250131) ? 2 : 0));
(ctx.rules.enabled(fix20250131) ? 2 : 0) +
(ctx.rules.enabled(featureExport) ? 4 : 0));
if (ctx.j.trace())
{

View File

@@ -374,6 +374,8 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx)
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEMIT_FAILURE:
case ttEXPORT_SIGN:
case ttEXPORT:
return Change::calculateBaseFee(view, tx);
case ttNFTOKEN_MINT:
return NFTokenMint::calculateBaseFee(view, tx);
@@ -544,6 +546,8 @@ invoke_apply(ApplyContext& ctx)
case ttFEE:
case ttUNL_MODIFY:
case ttUNL_REPORT:
case ttEXPORT:
case ttEXPORT_SIGN:
case ttEMIT_FAILURE: {
Change p(ctx);
return p();

View File

@@ -360,7 +360,8 @@ Logs::format(
if (!partition.empty())
{
#ifdef BEAST_ENHANCED_LOGGING
output += beast::detail::get_log_highlight_color();
if (beast::detail::should_log_use_colors())
output += beast::detail::get_log_highlight_color();
#endif
output += partition + ":";
}
@@ -392,7 +393,8 @@ Logs::format(
}
#ifdef BEAST_ENHANCED_LOGGING
output += "\033[0m";
if (beast::detail::should_log_use_colors())
output += "\033[0m";
#endif
output += message;

View File

@@ -41,6 +41,14 @@ get_log_highlight_color();
constexpr const char*
strip_source_root(const char* file)
{
// Handle relative paths from build/ directory (common with ccache)
// e.g., "../src/ripple/..." -> "ripple/..."
if (file && file[0] == '.' && file[1] == '.' && file[2] == '/' &&
file[3] == 's' && file[4] == 'r' && file[5] == 'c' && file[6] == '/')
{
return file + 7; // skip "../src/"
}
#ifdef SOURCE_ROOT_PATH
constexpr const char* sourceRoot = SOURCE_ROOT_PATH;
constexpr auto strlen_constexpr = [](const char* s) constexpr

View File

@@ -17,6 +17,8 @@
*/
//==============================================================================
#ifdef BEAST_ENHANCED_LOGGING
#include <ripple/beast/utility/EnhancedLogging.h>
#include <cstdlib>
#include <cstring>
@@ -112,3 +114,5 @@ log_write_location_string(std::ostream& os, const char* file, int line)
} // namespace detail
} // namespace beast
#endif // BEAST_ENHANCED_LOGGING

View File

@@ -155,14 +155,43 @@ Journal::ScopedStream::~ScopedStream()
#ifdef BEAST_ENHANCED_LOGGING
// Add suffix if location is enabled
if (file_ && detail::should_show_location() && !s.empty() && s != "\n")
if (file_ && detail::should_show_location() && !s.empty())
{
std::ostringstream combined;
combined << s;
if (!s.empty() && s.back() != ' ')
combined << " ";
detail::log_write_location_string(combined, file_, line_);
s = combined.str();
// Single optimized scan from the end
size_t const lastNonWhitespace = s.find_last_not_of(" \n\r\t");
// Skip if message is only whitespace (e.g., just "\n" or " \n\n")
if (lastNonWhitespace != std::string::npos)
{
// Count only the trailing newlines (tiny range)
size_t trailingNewlines = 0;
for (size_t i = lastNonWhitespace + 1; i < s.length(); ++i)
{
if (s[i] == '\n')
++trailingNewlines;
}
// Build location string once
std::ostringstream locStream;
detail::log_write_location_string(locStream, file_, line_);
std::string const location = locStream.str();
// Pre-allocate exact size → zero reallocations
size_t const finalSize = lastNonWhitespace + 1 + 1 +
location.length() + trailingNewlines;
std::string result;
result.reserve(finalSize);
// Direct string ops (no ostringstream overhead)
result.append(s, 0, lastNonWhitespace + 1);
result.push_back(' ');
result += location;
if (trailingNewlines > 0)
result.append(trailingNewlines, '\n');
s = std::move(result); // Move, no copy
}
}
#endif

View File

@@ -74,7 +74,7 @@ namespace detail {
// Feature.cpp. Because it's only used to reserve storage, and determine how
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
// the actual number of amendments. A LogicError on startup will verify this.
static constexpr std::size_t numFeatures = 90;
static constexpr std::size_t numFeatures = 91;
/** Amendments that this server supports and the default voting behavior.
Whether they are enabled depends on the Rules defined in the validated
@@ -378,6 +378,7 @@ extern uint256 const fixInvalidTxFlags;
extern uint256 const featureExtendedHookState;
extern uint256 const fixCronStacking;
extern uint256 const fixHookAPI20251128;
extern uint256 const featureExport;
} // namespace ripple
#endif

View File

@@ -56,9 +56,15 @@ namespace keylet {
Keylet const&
emittedDir() noexcept;
Keylet const&
exportedDir() noexcept;
Keylet
emittedTxn(uint256 const& id) noexcept;
Keylet
exportedTxn(uint256 const& id) noexcept;
Keylet
hookDefinition(uint256 const& hash) noexcept;

View File

@@ -260,6 +260,8 @@ enum LedgerEntryType : std::uint16_t
\sa keylet::emitted
*/
ltEMITTED_TXN = 'E',
ltEXPORTED_TXN = 0x4578, // Ex (exported transaction)
};
// clang-format off
@@ -318,7 +320,8 @@ enum LedgerSpecificFlags {
// ltDIR_NODE
lsfNFTokenBuyOffers = 0x00000001,
lsfNFTokenSellOffers = 0x00000002,
lsfEmittedDir = 0x00000004,
lsfEmittedDir = 0x00000004,
lsfExportedDir = 0x00000008,
// ltNFTOKEN_OFFER
lsfSellNFToken = 0x00000001,

View File

@@ -355,6 +355,7 @@ extern SF_UINT16 const sfHookEmitCount;
extern SF_UINT16 const sfHookExecutionIndex;
extern SF_UINT16 const sfHookApiVersion;
extern SF_UINT16 const sfHookStateScale;
extern SF_UINT16 const sfHookExportCount;
// 32-bit integers (common)
extern SF_UINT32 const sfNetworkID;
@@ -595,6 +596,7 @@ extern SField const sfSigner;
extern SField const sfMajority;
extern SField const sfDisabledValidator;
extern SField const sfEmittedTxn;
extern SField const sfExportedTxn;
extern SField const sfHookExecution;
extern SField const sfHookDefinition;
extern SField const sfHookParameter;

View File

@@ -67,6 +67,7 @@ enum TELcodes : TERUnderlyingType {
telNON_LOCAL_EMITTED_TXN,
telIMPORT_VL_KEY_NOT_RECOGNISED,
telCAN_NOT_QUEUE_IMPORT,
telSHADOW_TICKET_REQUIRED,
};
//------------------------------------------------------------------------------

View File

@@ -149,6 +149,12 @@ enum TxType : std::uint16_t
ttURITOKEN_CREATE_SELL_OFFER = 48,
ttURITOKEN_CANCEL_SELL_OFFER = 49,
/* A pseudo-txn containing an exported transaction plus signatures from the validators */
ttEXPORT = 90,
/* A pseudo-txn containing a validator's signature for an export transaction */
ttEXPORT_SIGN = 91,
/* A pseudo-txn alarm signal for invoking a hook, emitted by validators after alarm set conditions are met */
ttCRON = 92,

View File

@@ -484,6 +484,7 @@ REGISTER_FIX (fixInvalidTxFlags, Supported::yes, VoteBehavior::De
REGISTER_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);
REGISTER_FIX (fixCronStacking, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FIX (fixHookAPI20251128, Supported::yes, VoteBehavior::DefaultYes);
REGISTER_FEATURE(Export, Supported::yes, VoteBehavior::DefaultNo);
// The following amendments are obsolete, but must remain supported
// because they could potentially get enabled.

View File

@@ -66,6 +66,8 @@ enum class LedgerNameSpace : std::uint16_t {
HOOK_DEFINITION = 'D',
EMITTED_TXN = 'E',
EMITTED_DIR = 'F',
EXPORTED_TXN = 0x4578, // Ex
EXPORTED_DIR = 0x4564, // Ed
NFTOKEN_OFFER = 'q',
NFTOKEN_BUY_OFFERS = 'h',
NFTOKEN_SELL_OFFERS = 'i',
@@ -147,6 +149,14 @@ emittedDir() noexcept
return ret;
}
Keylet const&
exportedDir() noexcept
{
static Keylet const ret{
ltDIR_NODE, indexHash(LedgerNameSpace::EXPORTED_DIR)};
return ret;
}
Keylet
hookStateDir(AccountID const& id, uint256 const& ns) noexcept
{
@@ -159,6 +169,12 @@ emittedTxn(uint256 const& id) noexcept
return {ltEMITTED_TXN, indexHash(LedgerNameSpace::EMITTED_TXN, id)};
}
Keylet
exportedTxn(uint256 const& id) noexcept
{
return {ltEXPORTED_TXN, indexHash(LedgerNameSpace::EXPORTED_TXN, id)};
}
Keylet
hook(AccountID const& id) noexcept
{

View File

@@ -380,6 +380,15 @@ LedgerFormats::LedgerFormats()
{sfPreviousTxnLgrSeq, soeREQUIRED}
},
commonFields);
add(jss::ExportedTxn,
ltEXPORTED_TXN,
{
{sfExportedTxn, soeOPTIONAL},
{sfOwnerNode, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
},
commonFields);
// clang-format on
}

View File

@@ -103,6 +103,7 @@ CONSTRUCT_TYPED_SFIELD(sfHookEmitCount, "HookEmitCount", UINT16,
CONSTRUCT_TYPED_SFIELD(sfHookExecutionIndex, "HookExecutionIndex", UINT16, 19);
CONSTRUCT_TYPED_SFIELD(sfHookApiVersion, "HookApiVersion", UINT16, 20);
CONSTRUCT_TYPED_SFIELD(sfHookStateScale, "HookStateScale", UINT16, 21);
CONSTRUCT_TYPED_SFIELD(sfHookExportCount, "HookExportCount", UINT16, 22);
// 32-bit integers (common)
CONSTRUCT_TYPED_SFIELD(sfNetworkID, "NetworkID", UINT32, 1);
@@ -361,6 +362,7 @@ CONSTRUCT_UNTYPED_SFIELD(sfImportVLKey, "ImportVLKey", OBJECT,
CONSTRUCT_UNTYPED_SFIELD(sfHookEmission, "HookEmission", OBJECT, 93);
CONSTRUCT_UNTYPED_SFIELD(sfMintURIToken, "MintURIToken", OBJECT, 92);
CONSTRUCT_UNTYPED_SFIELD(sfAmountEntry, "AmountEntry", OBJECT, 91);
CONSTRUCT_UNTYPED_SFIELD(sfExportedTxn, "ExportedTxn", OBJECT, 90);
// array of objects
// ARRAY/1 is reserved for end of array

View File

@@ -141,6 +141,7 @@ transResults()
MAKE_ERROR(telNON_LOCAL_EMITTED_TXN, "Emitted transaction cannot be applied because it was not generated locally."),
MAKE_ERROR(telIMPORT_VL_KEY_NOT_RECOGNISED, "Import vl key was not recognized."),
MAKE_ERROR(telCAN_NOT_QUEUE_IMPORT, "Import transaction was not able to be directly applied and cannot be queued."),
MAKE_ERROR(telSHADOW_TICKET_REQUIRED, "The imported transaction uses a TicketSequence but no shadow ticket exists."),
MAKE_ERROR(temMALFORMED, "Malformed transaction."),
MAKE_ERROR(temBAD_AMOUNT, "Can only send positive amounts."),
MAKE_ERROR(temBAD_CURRENCY, "Malformed: Bad currency."),

View File

@@ -490,6 +490,26 @@ TxFormats::TxFormats()
{sfStartTime, soeOPTIONAL},
},
commonFields);
add(jss::ExportSign,
ttEXPORT_SIGN,
{
{sfSigner, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
{sfTransactionHash, soeREQUIRED},
},
commonFields);
add(jss::Export,
ttEXPORT,
{
{sfTransactionHash, soeREQUIRED},
{sfExportedTxn, soeREQUIRED},
{sfSigners, soeREQUIRED},
{sfLedgerSequence, soeREQUIRED},
},
commonFields);
}
TxFormats const&

View File

@@ -140,6 +140,9 @@ JSS(HookState); // ledger type.
JSS(HookStateData); // field.
JSS(HookStateKey); // field.
JSS(EmittedTxn); // ledger type.
JSS(ExportedTxn);
JSS(Export);
JSS(ExportSign);
JSS(SignerList); // ledger type.
JSS(SignerListSet); // transaction type.
JSS(SigningPubKey); // field.