mirror of
https://github.com/XRPLF/rippled.git
synced 2026-04-04 11:02:39 +00:00
Compare commits
4 Commits
ximinez/ac
...
a1q123456/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bbb14696e | ||
|
|
df39ac33d2 | ||
|
|
96fdc711c0 | ||
|
|
a1344b91c3 |
26
.github/workflows/reusable-build-test-config.yml
vendored
26
.github/workflows/reusable-build-test-config.yml
vendored
@@ -153,19 +153,6 @@ jobs:
|
||||
${CMAKE_ARGS} \
|
||||
..
|
||||
|
||||
- name: Build the binary
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
CMAKE_TARGET: ${{ inputs.cmake_target }}
|
||||
run: |
|
||||
cmake \
|
||||
--build . \
|
||||
--config "${BUILD_TYPE}" \
|
||||
--parallel "${BUILD_NPROC}" \
|
||||
--target "${CMAKE_TARGET}"
|
||||
|
||||
- name: Check protocol autogen files are up-to-date
|
||||
env:
|
||||
MESSAGE: |
|
||||
@@ -189,6 +176,19 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Build the binary
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
CMAKE_TARGET: ${{ inputs.cmake_target }}
|
||||
run: |
|
||||
cmake \
|
||||
--build . \
|
||||
--config "${BUILD_TYPE}" \
|
||||
--parallel "${BUILD_NPROC}" \
|
||||
--target "${CMAKE_TARGET}"
|
||||
|
||||
- name: Show ccache statistics
|
||||
if: ${{ inputs.ccache_enabled }}
|
||||
run: |
|
||||
|
||||
@@ -108,11 +108,10 @@ target_link_libraries(
|
||||
)
|
||||
|
||||
# Level 05
|
||||
## Set up code generation for protocol_autogen module
|
||||
## Set up code generation for protocol_autogen module.
|
||||
## Generation runs at configure time (when the stamp is stale),
|
||||
## so generated files are always present before add_module GLOBs them.
|
||||
include(XrplProtocolAutogen)
|
||||
# Must call setup_protocol_autogen before add_module so that:
|
||||
# 1. Stale generated files are cleared before GLOB runs
|
||||
# 2. Output file list is known for custom commands
|
||||
setup_protocol_autogen()
|
||||
|
||||
add_module(xrpl protocol_autogen)
|
||||
@@ -121,11 +120,6 @@ target_link_libraries(
|
||||
PUBLIC xrpl.libxrpl.protocol
|
||||
)
|
||||
|
||||
# Ensure code generation runs before compiling protocol_autogen
|
||||
if(TARGET protocol_autogen_generate)
|
||||
add_dependencies(xrpl.libxrpl.protocol_autogen protocol_autogen_generate)
|
||||
endif()
|
||||
|
||||
# Level 06
|
||||
add_module(xrpl core)
|
||||
target_link_libraries(
|
||||
|
||||
@@ -15,7 +15,6 @@ set(CODEGEN_VENV_DIR
|
||||
)
|
||||
|
||||
# Function to set up code generation for protocol_autogen module
|
||||
# This runs at configure time to generate C++ wrapper classes from macro files
|
||||
function(setup_protocol_autogen)
|
||||
# Directory paths
|
||||
set(MACRO_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include/xrpl/protocol/detail")
|
||||
@@ -25,7 +24,7 @@ function(setup_protocol_autogen)
|
||||
set(AUTOGEN_TEST_DIR
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/tests/libxrpl/protocol_autogen"
|
||||
)
|
||||
set(SCRIPTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/scripts")
|
||||
set(SCRIPTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/scripts/codegen")
|
||||
|
||||
# Input macro files
|
||||
set(TRANSACTIONS_MACRO "${MACRO_DIR}/transactions.macro")
|
||||
@@ -43,6 +42,7 @@ function(setup_protocol_autogen)
|
||||
set(LEDGER_TEST_TEMPLATE
|
||||
"${SCRIPTS_DIR}/templates/LedgerEntryTests.cpp.mako"
|
||||
)
|
||||
set(UPDATE_STAMP_SCRIPT "${SCRIPTS_DIR}/update_codegen_stamp.py")
|
||||
|
||||
# Check if code generation is disabled
|
||||
if(XRPL_NO_CODEGEN)
|
||||
@@ -60,7 +60,33 @@ function(setup_protocol_autogen)
|
||||
file(MAKE_DIRECTORY "${AUTOGEN_TEST_DIR}/ledger_entries")
|
||||
file(MAKE_DIRECTORY "${AUTOGEN_TEST_DIR}/transactions")
|
||||
|
||||
# Find Python3 - check if already found by Conan or find it ourselves
|
||||
# === Stamp file check ===
|
||||
# All input files whose content affects code generation output.
|
||||
set(STAMP_FILE "${CMAKE_CURRENT_SOURCE_DIR}/scripts/codegen/.codegen_stamp")
|
||||
set(ALL_INPUT_FILES
|
||||
"${TRANSACTIONS_MACRO}"
|
||||
"${LEDGER_ENTRIES_MACRO}"
|
||||
"${SFIELDS_MACRO}"
|
||||
"${GENERATE_TX_SCRIPT}"
|
||||
"${GENERATE_LEDGER_SCRIPT}"
|
||||
"${REQUIREMENTS_FILE}"
|
||||
"${MACRO_PARSER_COMMON}"
|
||||
"${TX_TEMPLATE}"
|
||||
"${TX_TEST_TEMPLATE}"
|
||||
"${LEDGER_TEMPLATE}"
|
||||
"${LEDGER_TEST_TEMPLATE}"
|
||||
)
|
||||
|
||||
# Tell CMake to reconfigure automatically when any input file changes.
|
||||
# The reconfigure itself is cheap — it runs the stamp check below
|
||||
# which only invokes stdlib Python (no venv needed).
|
||||
set_property(
|
||||
DIRECTORY
|
||||
APPEND
|
||||
PROPERTY CMAKE_CONFIGURE_DEPENDS ${ALL_INPUT_FILES}
|
||||
)
|
||||
|
||||
# Find Python3 (needed for stamp check; no venv required).
|
||||
if(NOT Python3_EXECUTABLE)
|
||||
find_package(Python3 COMPONENTS Interpreter QUIET)
|
||||
endif()
|
||||
@@ -79,19 +105,45 @@ function(setup_protocol_autogen)
|
||||
return()
|
||||
endif()
|
||||
|
||||
message(STATUS "Using Python3 for code generation: ${Python3_EXECUTABLE}")
|
||||
# Check whether the stamp is up-to-date (stdlib-only, no venv).
|
||||
execute_process(
|
||||
COMMAND
|
||||
${Python3_EXECUTABLE} "${UPDATE_STAMP_SCRIPT}" --check
|
||||
"${STAMP_FILE}" ${ALL_INPUT_FILES}
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
RESULT_VARIABLE STAMP_CHECK_RESULT
|
||||
)
|
||||
|
||||
# Set up Python virtual environment for code generation
|
||||
# ------------------------------------------------------------------
|
||||
# Fast path: stamp matches — generated files are up to date.
|
||||
# ------------------------------------------------------------------
|
||||
if(STAMP_CHECK_RESULT EQUAL 0)
|
||||
message(
|
||||
STATUS
|
||||
"Protocol autogen: inputs unchanged (stamp matches), skipping generation"
|
||||
)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Slow path: stamp mismatch — run generation at configure time.
|
||||
# ------------------------------------------------------------------
|
||||
message(
|
||||
STATUS
|
||||
"Protocol autogen: inputs changed, running code generation..."
|
||||
)
|
||||
|
||||
# Set up Python virtual environment for code generation.
|
||||
if(CODEGEN_VENV_DIR)
|
||||
# User-provided venv - skip automatic setup
|
||||
# User-provided venv - skip automatic setup.
|
||||
set(VENV_DIR "${CODEGEN_VENV_DIR}")
|
||||
message(STATUS "Using user-provided Python venv: ${VENV_DIR}")
|
||||
else()
|
||||
# Use default venv in build directory
|
||||
# Use default venv in build directory.
|
||||
set(VENV_DIR "${CMAKE_CURRENT_BINARY_DIR}/codegen_venv")
|
||||
endif()
|
||||
|
||||
# Determine the Python executable path in the venv
|
||||
# Determine the Python/pip executables inside the venv.
|
||||
if(WIN32)
|
||||
set(VENV_PYTHON "${VENV_DIR}/Scripts/python.exe")
|
||||
set(VENV_PIP "${VENV_DIR}/Scripts/pip.exe")
|
||||
@@ -100,9 +152,9 @@ function(setup_protocol_autogen)
|
||||
set(VENV_PIP "${VENV_DIR}/bin/pip")
|
||||
endif()
|
||||
|
||||
# Only auto-setup venv if not user-provided
|
||||
# Create or update the virtual environment if needed.
|
||||
if(NOT CODEGEN_VENV_DIR)
|
||||
# Check if venv needs to be created or updated
|
||||
# Check if venv needs to be created or updated.
|
||||
set(VENV_NEEDS_UPDATE FALSE)
|
||||
if(NOT EXISTS "${VENV_PYTHON}")
|
||||
set(VENV_NEEDS_UPDATE TRUE)
|
||||
@@ -122,8 +174,9 @@ function(setup_protocol_autogen)
|
||||
)
|
||||
endif()
|
||||
|
||||
# Create/update virtual environment if needed
|
||||
# Create/update virtual environment if needed.
|
||||
if(VENV_NEEDS_UPDATE)
|
||||
# Create the venv.
|
||||
message(
|
||||
STATUS
|
||||
"Setting up Python virtual environment at ${VENV_DIR}"
|
||||
@@ -140,7 +193,7 @@ function(setup_protocol_autogen)
|
||||
)
|
||||
endif()
|
||||
|
||||
# Check pip index URL configuration
|
||||
# Warn if pip is configured with a non-default index (may need VPN).
|
||||
execute_process(
|
||||
COMMAND ${VENV_PIP} config get global.index-url
|
||||
OUTPUT_VARIABLE PIP_INDEX_URL
|
||||
@@ -162,6 +215,7 @@ function(setup_protocol_autogen)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Install dependencies.
|
||||
message(STATUS "Installing Python dependencies...")
|
||||
execute_process(
|
||||
COMMAND ${VENV_PIP} install --upgrade pip
|
||||
@@ -185,125 +239,56 @@ function(setup_protocol_autogen)
|
||||
)
|
||||
endif()
|
||||
|
||||
# Mark requirements as installed
|
||||
# Mark requirements as installed.
|
||||
file(TOUCH "${VENV_DIR}/.requirements_installed")
|
||||
message(STATUS "Python virtual environment ready")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# At configure time - get list of output files for transactions
|
||||
# Generate transaction classes.
|
||||
execute_process(
|
||||
COMMAND
|
||||
${VENV_PYTHON} "${GENERATE_TX_SCRIPT}" "${TRANSACTIONS_MACRO}"
|
||||
--header-dir "${AUTOGEN_HEADER_DIR}/transactions" --test-dir
|
||||
"${AUTOGEN_TEST_DIR}/transactions" --list-outputs
|
||||
OUTPUT_VARIABLE TX_OUTPUT_FILES
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
RESULT_VARIABLE TX_LIST_RESULT
|
||||
ERROR_VARIABLE TX_LIST_ERROR
|
||||
)
|
||||
if(NOT TX_LIST_RESULT EQUAL 0)
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Failed to list transaction output files:\n${TX_LIST_ERROR}"
|
||||
)
|
||||
endif()
|
||||
# Convert newline-separated list to CMake list
|
||||
string(REPLACE "\\" "/" TX_OUTPUT_FILES "${TX_OUTPUT_FILES}")
|
||||
string(REPLACE "\n" ";" TX_OUTPUT_FILES "${TX_OUTPUT_FILES}")
|
||||
|
||||
# At configure time - get list of output files for ledger entries
|
||||
execute_process(
|
||||
COMMAND
|
||||
${VENV_PYTHON} "${GENERATE_LEDGER_SCRIPT}" "${LEDGER_ENTRIES_MACRO}"
|
||||
--header-dir "${AUTOGEN_HEADER_DIR}/ledger_entries" --test-dir
|
||||
"${AUTOGEN_TEST_DIR}/ledger_entries" --list-outputs
|
||||
OUTPUT_VARIABLE LEDGER_OUTPUT_FILES
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
RESULT_VARIABLE LEDGER_LIST_RESULT
|
||||
ERROR_VARIABLE LEDGER_LIST_ERROR
|
||||
)
|
||||
if(NOT LEDGER_LIST_RESULT EQUAL 0)
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Failed to list ledger entry output files:\n${LEDGER_LIST_ERROR}"
|
||||
)
|
||||
endif()
|
||||
# Convert newline-separated list to CMake list
|
||||
string(REPLACE "\\" "/" LEDGER_OUTPUT_FILES "${LEDGER_OUTPUT_FILES}")
|
||||
string(REPLACE "\n" ";" LEDGER_OUTPUT_FILES "${LEDGER_OUTPUT_FILES}")
|
||||
|
||||
# Custom command to generate transaction classes at build time
|
||||
add_custom_command(
|
||||
OUTPUT ${TX_OUTPUT_FILES}
|
||||
COMMAND
|
||||
${VENV_PYTHON} "${GENERATE_TX_SCRIPT}" "${TRANSACTIONS_MACRO}"
|
||||
--header-dir "${AUTOGEN_HEADER_DIR}/transactions" --test-dir
|
||||
"${AUTOGEN_TEST_DIR}/transactions" --sfields-macro
|
||||
"${SFIELDS_MACRO}"
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
DEPENDS
|
||||
"${TRANSACTIONS_MACRO}"
|
||||
"${SFIELDS_MACRO}"
|
||||
"${GENERATE_TX_SCRIPT}"
|
||||
"${MACRO_PARSER_COMMON}"
|
||||
"${TX_TEMPLATE}"
|
||||
"${TX_TEST_TEMPLATE}"
|
||||
"${REQUIREMENTS_FILE}"
|
||||
COMMENT "Generating transaction classes from transactions.macro..."
|
||||
VERBATIM
|
||||
RESULT_VARIABLE TX_RESULT
|
||||
ERROR_VARIABLE TX_ERROR
|
||||
)
|
||||
if(NOT TX_RESULT EQUAL 0)
|
||||
message(FATAL_ERROR "Transaction code generation failed:\n${TX_ERROR}")
|
||||
endif()
|
||||
|
||||
# Custom command to generate ledger entry classes at build time
|
||||
add_custom_command(
|
||||
OUTPUT ${LEDGER_OUTPUT_FILES}
|
||||
# Generate ledger entry classes.
|
||||
execute_process(
|
||||
COMMAND
|
||||
${VENV_PYTHON} "${GENERATE_LEDGER_SCRIPT}" "${LEDGER_ENTRIES_MACRO}"
|
||||
--header-dir "${AUTOGEN_HEADER_DIR}/ledger_entries" --test-dir
|
||||
"${AUTOGEN_TEST_DIR}/ledger_entries" --sfields-macro
|
||||
"${SFIELDS_MACRO}"
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
DEPENDS
|
||||
"${LEDGER_ENTRIES_MACRO}"
|
||||
"${SFIELDS_MACRO}"
|
||||
"${GENERATE_LEDGER_SCRIPT}"
|
||||
"${MACRO_PARSER_COMMON}"
|
||||
"${LEDGER_TEMPLATE}"
|
||||
"${LEDGER_TEST_TEMPLATE}"
|
||||
"${REQUIREMENTS_FILE}"
|
||||
COMMENT "Generating ledger entry classes from ledger_entries.macro..."
|
||||
VERBATIM
|
||||
RESULT_VARIABLE LEDGER_RESULT
|
||||
ERROR_VARIABLE LEDGER_ERROR
|
||||
)
|
||||
if(NOT LEDGER_RESULT EQUAL 0)
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Ledger entry code generation failed:\n${LEDGER_ERROR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
# Create a custom target that depends on all generated files
|
||||
add_custom_target(
|
||||
protocol_autogen_generate
|
||||
DEPENDS ${TX_OUTPUT_FILES} ${LEDGER_OUTPUT_FILES}
|
||||
COMMENT "Protocol autogen code generation"
|
||||
# Update the stamp file so subsequent configures skip generation.
|
||||
execute_process(
|
||||
COMMAND
|
||||
${Python3_EXECUTABLE} "${UPDATE_STAMP_SCRIPT}" --update
|
||||
"${STAMP_FILE}" ${ALL_INPUT_FILES}
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
RESULT_VARIABLE STAMP_RESULT
|
||||
)
|
||||
if(NOT STAMP_RESULT EQUAL 0)
|
||||
message(WARNING "Failed to update codegen stamp file")
|
||||
endif()
|
||||
|
||||
# Extract test files from output lists (files ending in Tests.cpp)
|
||||
set(PROTOCOL_AUTOGEN_TEST_SOURCES "")
|
||||
foreach(FILE ${TX_OUTPUT_FILES} ${LEDGER_OUTPUT_FILES})
|
||||
if(FILE MATCHES "Tests\\.cpp$")
|
||||
list(APPEND PROTOCOL_AUTOGEN_TEST_SOURCES "${FILE}")
|
||||
endif()
|
||||
endforeach()
|
||||
# Export test sources to parent scope for use in test CMakeLists.txt
|
||||
set(PROTOCOL_AUTOGEN_TEST_SOURCES
|
||||
"${PROTOCOL_AUTOGEN_TEST_SOURCES}"
|
||||
CACHE INTERNAL
|
||||
"Generated protocol_autogen test sources"
|
||||
)
|
||||
|
||||
# Register dependencies so CMake reconfigures when macro files change
|
||||
# (to update the list of output files)
|
||||
set_property(
|
||||
DIRECTORY
|
||||
APPEND
|
||||
PROPERTY
|
||||
CMAKE_CONFIGURE_DEPENDS
|
||||
"${TRANSACTIONS_MACRO}"
|
||||
"${LEDGER_ENTRIES_MACRO}"
|
||||
)
|
||||
message(STATUS "Protocol autogen: code generation complete")
|
||||
endfunction()
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_BASICS_CANPROCESS_H_INCLUDED
|
||||
#define RIPPLE_BASICS_CANPROCESS_H_INCLUDED
|
||||
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
|
||||
/** RAII class to check if an Item is already being processed on another thread,
|
||||
* as indicated by it's presence in a Collection.
|
||||
*
|
||||
* If the Item is not in the Collection, it will be added under lock in the
|
||||
* ctor, and removed under lock in the dtor. The object will be considered
|
||||
* "usable" and evaluate to `true`.
|
||||
*
|
||||
* If the Item is in the Collection, no changes will be made to the collection,
|
||||
* and the CanProcess object will be considered "unusable".
|
||||
*
|
||||
* It's up to the caller to decide what "usable" and "unusable" mean. (e.g.
|
||||
* Process or skip a block of code, or set a flag.)
|
||||
*
|
||||
* The current use is to avoid lock contention that would be involved in
|
||||
* processing something associated with the Item.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* void IncomingLedgers::acquireAsync(LedgerHash const& hash, ...)
|
||||
* {
|
||||
* if (CanProcess check{acquiresMutex_, pendingAcquires_, hash})
|
||||
* {
|
||||
* acquire(hash, ...);
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* bool
|
||||
* NetworkOPsImp::recvValidation(
|
||||
* std::shared_ptr<STValidation> const& val,
|
||||
* std::string const& source)
|
||||
* {
|
||||
* CanProcess check(
|
||||
* validationsMutex_, pendingValidations_, val->getLedgerHash());
|
||||
* BypassAccept bypassAccept =
|
||||
* check ? BypassAccept::no : BypassAccept::yes;
|
||||
* handleNewValidation(app_, val, source, bypassAccept, m_journal);
|
||||
* }
|
||||
*
|
||||
*/
|
||||
class CanProcess
|
||||
{
|
||||
public:
|
||||
template <class Mutex, class Collection, class Item>
|
||||
CanProcess(Mutex& mtx, Collection& collection, Item const& item)
|
||||
: cleanup_(insert(mtx, collection, item))
|
||||
{
|
||||
}
|
||||
|
||||
~CanProcess()
|
||||
{
|
||||
if (cleanup_)
|
||||
cleanup_();
|
||||
}
|
||||
|
||||
CanProcess(CanProcess const&) = delete;
|
||||
|
||||
CanProcess&
|
||||
operator=(CanProcess const&) = delete;
|
||||
|
||||
explicit
|
||||
operator bool() const
|
||||
{
|
||||
return static_cast<bool>(cleanup_);
|
||||
}
|
||||
|
||||
private:
|
||||
template <bool useIterator, class Mutex, class Collection, class Item>
|
||||
std::function<void()>
|
||||
doInsert(Mutex& mtx, Collection& collection, Item const& item)
|
||||
{
|
||||
std::unique_lock<Mutex> lock(mtx);
|
||||
// TODO: Use structured binding once LLVM 16 is the minimum supported
|
||||
// version. See also: https://github.com/llvm/llvm-project/issues/48582
|
||||
// https://github.com/llvm/llvm-project/commit/127bf44385424891eb04cff8e52d3f157fc2cb7c
|
||||
auto const insertResult = collection.insert(item);
|
||||
auto const it = insertResult.first;
|
||||
if (!insertResult.second)
|
||||
return {};
|
||||
if constexpr (useIterator)
|
||||
return [&, it]() {
|
||||
std::unique_lock<Mutex> lock(mtx);
|
||||
collection.erase(it);
|
||||
};
|
||||
else
|
||||
return [&]() {
|
||||
std::unique_lock<Mutex> lock(mtx);
|
||||
collection.erase(item);
|
||||
};
|
||||
}
|
||||
|
||||
// Generic insert() function doesn't use iterators because they may get
|
||||
// invalidated
|
||||
template <class Mutex, class Collection, class Item>
|
||||
std::function<void()>
|
||||
insert(Mutex& mtx, Collection& collection, Item const& item)
|
||||
{
|
||||
return doInsert<false>(mtx, collection, item);
|
||||
}
|
||||
|
||||
// Specialize insert() for std::set, which does not invalidate iterators for
|
||||
// insert and erase
|
||||
template <class Mutex, class Item>
|
||||
std::function<void()>
|
||||
insert(Mutex& mtx, std::set<Item>& collection, Item const& item)
|
||||
{
|
||||
return doInsert<true>(mtx, collection, item);
|
||||
}
|
||||
|
||||
// If set, then the item is "usable"
|
||||
std::function<void()> cleanup_;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -199,7 +199,7 @@ public:
|
||||
|
||||
/** Add a suppression peer and get message's relay status.
|
||||
* Return pair:
|
||||
* element 1: true if the key is added.
|
||||
* element 1: true if the peer is added.
|
||||
* element 2: optional is seated to the relay time point or
|
||||
* is unseated if has not relayed yet. */
|
||||
std::pair<bool, std::optional<Stopwatch::time_point>>
|
||||
|
||||
@@ -35,8 +35,6 @@ struct LedgerHeader
|
||||
|
||||
// If validated is false, it means "not yet validated."
|
||||
// Once validated is true, it will never be set false at a later time.
|
||||
// NOTE: If you are accessing this directly, you are probably doing it
|
||||
// wrong. Use LedgerMaster::isValidated().
|
||||
// VFALCO TODO Make this not mutable
|
||||
bool mutable validated = false;
|
||||
bool accepted = false;
|
||||
|
||||
@@ -6,15 +6,15 @@ This directory contains auto-generated C++ wrapper classes for XRP Ledger protoc
|
||||
|
||||
The files in this directory are automatically generated at **CMake configure time** from macro definition files:
|
||||
|
||||
- **Transaction classes** (in `transactions/`): Generated from `include/xrpl/protocol/detail/transactions.macro` by `scripts/generate_tx_classes.py`
|
||||
- **Ledger entry classes** (in `ledger_entries/`): Generated from `include/xrpl/protocol/detail/ledger_entries.macro` by `scripts/generate_ledger_classes.py`
|
||||
- **Transaction classes** (in `transactions/`): Generated from `include/xrpl/protocol/detail/transactions.macro` by `scripts/codegen/generate_tx_classes.py`
|
||||
- **Ledger entry classes** (in `ledger_entries/`): Generated from `include/xrpl/protocol/detail/ledger_entries.macro` by `scripts/codegen/generate_ledger_classes.py`
|
||||
|
||||
## Generation Process
|
||||
|
||||
The generation happens automatically when you **configure** the project (not during build). When you run CMake, the system:
|
||||
|
||||
1. Creates a Python virtual environment in the build directory (`codegen_venv`)
|
||||
2. Installs Python dependencies from `scripts/requirements.txt` into the venv (only if needed)
|
||||
2. Installs Python dependencies from `scripts/codegen/requirements.txt` into the venv (only if needed)
|
||||
3. Runs the Python generation scripts using the venv Python interpreter
|
||||
4. Parses the macro files to extract type definitions
|
||||
5. Generates type-safe C++ wrapper classes using Mako templates
|
||||
@@ -26,7 +26,7 @@ The code is regenerated when:
|
||||
|
||||
- You run CMake configure for the first time
|
||||
- The Python virtual environment doesn't exist
|
||||
- `scripts/requirements.txt` has been modified
|
||||
- `scripts/codegen/requirements.txt` has been modified
|
||||
|
||||
To force regeneration, delete the build directory and reconfigure.
|
||||
|
||||
@@ -55,9 +55,9 @@ The generated `.h` files **are checked into version control**. This means:
|
||||
To modify the generated classes:
|
||||
|
||||
- Edit the macro files in `include/xrpl/protocol/detail/`
|
||||
- Edit the Mako templates in `scripts/templates/`
|
||||
- Edit the generation scripts in `scripts/`
|
||||
- Update Python dependencies in `scripts/requirements.txt`
|
||||
- Edit the Mako templates in `scripts/codegen/templates/`
|
||||
- Edit the generation scripts in `scripts/codegen/`
|
||||
- Update Python dependencies in `scripts/codegen/requirements.txt`
|
||||
- Run CMake configure to regenerate
|
||||
|
||||
## Adding Common Fields
|
||||
@@ -73,7 +73,7 @@ Base classes:
|
||||
|
||||
Templates (update to pass required common fields to base class constructors):
|
||||
|
||||
- `scripts/templates/Transaction.h.mako`
|
||||
- `scripts/templates/LedgerEntry.h.mako`
|
||||
- `scripts/codegen/templates/Transaction.h.mako`
|
||||
- `scripts/codegen/templates/LedgerEntry.h.mako`
|
||||
|
||||
These files are **not auto-generated** and must be updated by hand.
|
||||
|
||||
@@ -185,7 +185,7 @@ public:
|
||||
virtual bool
|
||||
isFull() = 0;
|
||||
virtual void
|
||||
setMode(OperatingMode om, char const* reason) = 0;
|
||||
setMode(OperatingMode om) = 0;
|
||||
virtual bool
|
||||
isBlocked() = 0;
|
||||
virtual bool
|
||||
|
||||
4
scripts/codegen/.codegen_stamp
Normal file
4
scripts/codegen/.codegen_stamp
Normal file
@@ -0,0 +1,4 @@
|
||||
# Auto-generated by protocol autogen - do not edit manually.
|
||||
# This file tracks input hashes to avoid unnecessary code regeneration.
|
||||
# It should be checked into version control alongside the generated files.
|
||||
COMBINED_HASH=24a9168ac6a450f09fa4e2ab288d06624a368041e91fbc7741101d3565d1e601
|
||||
@@ -138,28 +138,11 @@ def main():
|
||||
"--sfields-macro",
|
||||
help="Path to sfields.macro (default: auto-detect from macro_path)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--list-outputs",
|
||||
action="store_true",
|
||||
help="List output files without generating (one per line)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse the macro file to get ledger entry names
|
||||
entries = parse_macro_file(args.macro_path)
|
||||
|
||||
# If --list-outputs, just print the output file paths and exit
|
||||
if args.list_outputs:
|
||||
header_dir = Path(args.header_dir)
|
||||
for entry in entries:
|
||||
print(header_dir / f"{entry['name']}.h")
|
||||
if args.test_dir:
|
||||
test_dir = Path(args.test_dir)
|
||||
for entry in entries:
|
||||
print(test_dir / f"{entry['name']}Tests.cpp")
|
||||
return
|
||||
|
||||
# Auto-detect sfields.macro path if not provided
|
||||
if args.sfields_macro:
|
||||
sfields_path = Path(args.sfields_macro)
|
||||
@@ -147,28 +147,11 @@ def main():
|
||||
"--sfields-macro",
|
||||
help="Path to sfields.macro (default: auto-detect from macro_path)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--list-outputs",
|
||||
action="store_true",
|
||||
help="List output files without generating (one per line)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse the macro file to get transaction names
|
||||
transactions = parse_macro_file(args.macro_path)
|
||||
|
||||
# If --list-outputs, just print the output file paths and exit
|
||||
if args.list_outputs:
|
||||
header_dir = Path(args.header_dir)
|
||||
for tx in transactions:
|
||||
print(header_dir / f"{tx['name']}.h")
|
||||
if args.test_dir:
|
||||
test_dir = Path(args.test_dir)
|
||||
for tx in transactions:
|
||||
print(test_dir / f"{tx['name']}Tests.cpp")
|
||||
return
|
||||
|
||||
# Auto-detect sfields.macro path if not provided
|
||||
if args.sfields_macro:
|
||||
sfields_path = Path(args.sfields_macro)
|
||||
83
scripts/codegen/update_codegen_stamp.py
Normal file
83
scripts/codegen/update_codegen_stamp.py
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Check or update the codegen stamp file.
|
||||
|
||||
Uses only the Python standard library (hashlib, pathlib, sys) so it can
|
||||
run without a virtual environment.
|
||||
|
||||
Modes:
|
||||
--check Exit 0 if stamp is up-to-date, exit 1 if stale/missing.
|
||||
--update Recompute the hash and write it to the stamp file.
|
||||
|
||||
Usage:
|
||||
python update_codegen_stamp.py --check <stamp_file> <input_files...>
|
||||
python update_codegen_stamp.py --update <stamp_file> <input_files...>
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def compute_combined_hash(input_files: list[str]) -> str:
|
||||
"""Compute a combined SHA-256 hash of all input files.
|
||||
|
||||
Algorithm: compute each file's SHA-256 hex digest, concatenate them
|
||||
all, then SHA-256 the concatenation.
|
||||
"""
|
||||
parts = []
|
||||
for filepath in input_files:
|
||||
if not Path(filepath).exists():
|
||||
print(f"Error: input file not found: {filepath}", file=sys.stderr)
|
||||
raise FileNotFoundError(f"Input file not found: {filepath}")
|
||||
file_hash = hashlib.sha256(Path(filepath).read_bytes()).hexdigest()
|
||||
parts.append(file_hash)
|
||||
|
||||
combined = "".join(parts)
|
||||
return hashlib.sha256(combined.encode()).hexdigest()
|
||||
|
||||
|
||||
def read_stamp_hash(stamp_file: str) -> str:
|
||||
"""Read the COMBINED_HASH from an existing stamp file, or '' if missing."""
|
||||
path = Path(stamp_file)
|
||||
if not path.exists():
|
||||
return ""
|
||||
for line in path.read_text(encoding="utf-8").splitlines():
|
||||
if line.startswith("COMBINED_HASH="):
|
||||
return line.split("=", 1)[1]
|
||||
return ""
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 4 or sys.argv[1] not in ("--check", "--update"):
|
||||
print(
|
||||
f"Usage: {sys.argv[0]} --check|--update <stamp_file> <input_files...>",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(2)
|
||||
|
||||
mode = sys.argv[1]
|
||||
stamp_file = sys.argv[2]
|
||||
input_files = sys.argv[3:]
|
||||
|
||||
current_hash = compute_combined_hash(input_files)
|
||||
|
||||
if mode == "--check":
|
||||
stamp_hash = read_stamp_hash(stamp_file)
|
||||
if current_hash == stamp_hash:
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
# --update
|
||||
with open(stamp_file, "w", encoding="utf-8") as fp:
|
||||
fp.write(
|
||||
"# Auto-generated by protocol autogen - do not edit manually.\n"
|
||||
"# This file tracks input hashes to avoid unnecessary code regeneration.\n"
|
||||
"# It should be checked into version control alongside the generated files.\n"
|
||||
)
|
||||
fp.write(f"COMBINED_HASH={current_hash}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -85,12 +85,7 @@ public:
|
||||
}
|
||||
|
||||
virtual void
|
||||
acquireAsync(
|
||||
JobType type,
|
||||
std::string const& name,
|
||||
uint256 const& hash,
|
||||
std::uint32_t seq,
|
||||
InboundLedger::Reason reason) override
|
||||
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) override
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012-2016 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpl/basics/CanProcess.h>
|
||||
#include <xrpl/beast/unit_test.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
struct CanProcess_test : beast::unit_test::suite
|
||||
{
|
||||
template <class Mutex, class Collection, class Item>
|
||||
void
|
||||
test(
|
||||
std::string const& name,
|
||||
Mutex& mtx,
|
||||
Collection& collection,
|
||||
std::vector<Item> const& items)
|
||||
{
|
||||
testcase(name);
|
||||
|
||||
if (!BEAST_EXPECT(!items.empty()))
|
||||
return;
|
||||
if (!BEAST_EXPECT(collection.empty()))
|
||||
return;
|
||||
|
||||
// CanProcess objects can't be copied or moved. To make that easier,
|
||||
// store shared_ptrs
|
||||
std::vector<std::shared_ptr<CanProcess>> trackers;
|
||||
// Fill up the vector with two CanProcess for each Item. The first
|
||||
// inserts the item into the collection and is "good". The second does
|
||||
// not and is "bad".
|
||||
for (int i = 0; i < items.size(); ++i)
|
||||
{
|
||||
{
|
||||
auto const& good =
|
||||
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
|
||||
BEAST_EXPECT(*good);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
|
||||
BEAST_EXPECT(collection.size() == i + 1);
|
||||
{
|
||||
auto const& bad =
|
||||
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
|
||||
BEAST_EXPECT(!*bad);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == 2 * (i + 1));
|
||||
BEAST_EXPECT(collection.size() == i + 1);
|
||||
}
|
||||
BEAST_EXPECT(collection.size() == items.size());
|
||||
// Now remove the items from the vector<CanProcess> two at a time, and
|
||||
// try to get another CanProcess for that item.
|
||||
for (int i = 0; i < items.size(); ++i)
|
||||
{
|
||||
// Remove the "bad" one in the second position
|
||||
// This will have no effect on the collection
|
||||
{
|
||||
auto const iter = trackers.begin() + 1;
|
||||
BEAST_EXPECT(!**iter);
|
||||
trackers.erase(iter);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
|
||||
BEAST_EXPECT(collection.size() == items.size());
|
||||
{
|
||||
// Append a new "bad" one
|
||||
auto const& bad =
|
||||
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
|
||||
BEAST_EXPECT(!*bad);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == 2 * items.size());
|
||||
BEAST_EXPECT(collection.size() == items.size());
|
||||
|
||||
// Remove the "good" one from the front
|
||||
{
|
||||
auto const iter = trackers.begin();
|
||||
BEAST_EXPECT(**iter);
|
||||
trackers.erase(iter);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
|
||||
BEAST_EXPECT(collection.size() == items.size() - 1);
|
||||
{
|
||||
// Append a new "good" one
|
||||
auto const& good =
|
||||
trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
|
||||
BEAST_EXPECT(*good);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == 2 * items.size());
|
||||
BEAST_EXPECT(collection.size() == items.size());
|
||||
}
|
||||
// Now remove them all two at a time
|
||||
for (int i = items.size() - 1; i >= 0; --i)
|
||||
{
|
||||
// Remove the "bad" one from the front
|
||||
{
|
||||
auto const iter = trackers.begin();
|
||||
BEAST_EXPECT(!**iter);
|
||||
trackers.erase(iter);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
|
||||
BEAST_EXPECT(collection.size() == i + 1);
|
||||
// Remove the "good" one now in front
|
||||
{
|
||||
auto const iter = trackers.begin();
|
||||
BEAST_EXPECT(**iter);
|
||||
trackers.erase(iter);
|
||||
}
|
||||
BEAST_EXPECT(trackers.size() == 2 * i);
|
||||
BEAST_EXPECT(collection.size() == i);
|
||||
}
|
||||
BEAST_EXPECT(trackers.empty());
|
||||
BEAST_EXPECT(collection.empty());
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
{
|
||||
std::mutex m;
|
||||
std::set<int> collection;
|
||||
std::vector<int> const items{1, 2, 3, 4, 5};
|
||||
test("set of int", m, collection, items);
|
||||
}
|
||||
{
|
||||
std::mutex m;
|
||||
std::set<std::string> collection;
|
||||
std::vector<std::string> const items{"one", "two", "three", "four", "five"};
|
||||
test("set of string", m, collection, items);
|
||||
}
|
||||
{
|
||||
std::mutex m;
|
||||
std::unordered_set<char> collection;
|
||||
std::vector<char> const items{'1', '2', '3', '4', '5'};
|
||||
test("unorderd_set of char", m, collection, items);
|
||||
}
|
||||
{
|
||||
std::mutex m;
|
||||
std::unordered_set<std::uint64_t> collection;
|
||||
std::vector<std::uint64_t> const items{100u, 1000u, 150u, 4u, 0u};
|
||||
test("unordered_set of uint64_t", m, collection, items);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(CanProcess, ripple_basics, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
@@ -32,20 +32,11 @@ xrpl_add_test(json)
|
||||
target_link_libraries(xrpl.test.json PRIVATE xrpl.imports.test)
|
||||
add_dependencies(xrpl.tests xrpl.test.json)
|
||||
|
||||
# protocol_autogen tests use explicit source list (not GLOB) because sources are generated
|
||||
# Mark generated sources so CMake knows they'll be created at build time
|
||||
set_source_files_properties(
|
||||
${PROTOCOL_AUTOGEN_TEST_SOURCES}
|
||||
PROPERTIES GENERATED TRUE
|
||||
)
|
||||
add_executable(xrpl.test.protocol_autogen ${PROTOCOL_AUTOGEN_TEST_SOURCES})
|
||||
# protocol_autogen tests — sources are checked into git so GLOB works.
|
||||
# Code generation runs at configure time when inputs change.
|
||||
xrpl_add_test(protocol_autogen)
|
||||
target_link_libraries(xrpl.test.protocol_autogen PRIVATE xrpl.imports.test)
|
||||
add_dependencies(xrpl.tests xrpl.test.protocol_autogen)
|
||||
add_test(NAME xrpl.test.protocol_autogen COMMAND xrpl.test.protocol_autogen)
|
||||
# Ensure code generation runs before compiling tests
|
||||
if(TARGET protocol_autogen_generate)
|
||||
add_dependencies(xrpl.test.protocol_autogen protocol_autogen_generate)
|
||||
endif()
|
||||
|
||||
# Network unit tests are currently not supported on Windows
|
||||
if(NOT WIN32)
|
||||
|
||||
@@ -105,8 +105,10 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
|
||||
// Tell the ledger acquire system that we need the consensus ledger
|
||||
acquiringLedger_ = hash;
|
||||
|
||||
app_.getInboundLedgers().acquireAsync(
|
||||
jtADVANCE, "GetConsL1", hash, 0, InboundLedger::Reason::CONSENSUS);
|
||||
app_.getJobQueue().addJob(jtADVANCE, "GetConsL1", [id = hash, &app = app_, this]() {
|
||||
JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger1 started";
|
||||
app.getInboundLedgers().acquireAsync(id, 0, InboundLedger::Reason::CONSENSUS);
|
||||
});
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -996,7 +998,7 @@ void
|
||||
RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
|
||||
{
|
||||
if ((positions == 0u) && app_.getOPs().isFull())
|
||||
app_.getOPs().setMode(OperatingMode::CONNECTED, "updateOperatingMode: no positions");
|
||||
app_.getOPs().setMode(OperatingMode::CONNECTED);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -116,8 +116,12 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
|
||||
{
|
||||
JLOG(j_.warn()) << "Need validated ledger for preferred ledger analysis " << hash;
|
||||
|
||||
app_.getInboundLedgers().acquireAsync(
|
||||
jtADVANCE, "GetConsL2", hash, 0, InboundLedger::Reason::CONSENSUS);
|
||||
Application* pApp = &app_;
|
||||
|
||||
app_.getJobQueue().addJob(jtADVANCE, "GetConsL2", [pApp, hash, this]() {
|
||||
JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger2 started";
|
||||
pApp->getInboundLedgers().acquireAsync(hash, 0, InboundLedger::Reason::CONSENSUS);
|
||||
});
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,7 @@ public:
|
||||
// Queue. TODO review whether all callers of acquire() can use this
|
||||
// instead. Inbound ledger acquisition is asynchronous anyway.
|
||||
virtual void
|
||||
acquireAsync(
|
||||
JobType type,
|
||||
std::string const& name,
|
||||
uint256 const& hash,
|
||||
std::uint32_t seq,
|
||||
InboundLedger::Reason reason) = 0;
|
||||
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) = 0;
|
||||
|
||||
virtual std::shared_ptr<InboundLedger>
|
||||
find(LedgerHash const& hash) = 0;
|
||||
|
||||
@@ -359,14 +359,7 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&)
|
||||
|
||||
if (!wasProgress)
|
||||
{
|
||||
if (checkLocal())
|
||||
{
|
||||
// Done. Something else (probably consensus) built the ledger
|
||||
// locally while waiting for data (or possibly before requesting)
|
||||
XRPL_ASSERT(isDone(), "ripple::InboundLedger::onTimer : done");
|
||||
JLOG(journal_.info()) << "Finished while waiting " << hash_;
|
||||
return;
|
||||
}
|
||||
checkLocal();
|
||||
|
||||
mByHash = true;
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
#include <xrpld/app/ledger/LedgerMaster.h>
|
||||
#include <xrpld/app/main/Application.h>
|
||||
|
||||
#include <xrpl/basics/CanProcess.h>
|
||||
#include <xrpl/basics/DecayingSample.h>
|
||||
#include <xrpl/basics/scope.h>
|
||||
#include <xrpl/beast/container/aged_map.h>
|
||||
@@ -59,15 +58,12 @@ public:
|
||||
(reason != InboundLedger::Reason::CONSENSUS))
|
||||
return {};
|
||||
|
||||
std::stringstream ss;
|
||||
|
||||
bool isNew = true;
|
||||
std::shared_ptr<InboundLedger> inbound;
|
||||
{
|
||||
ScopedLockType sl(mLock);
|
||||
if (stopping_)
|
||||
{
|
||||
JLOG(j_.debug()) << "Abort(stopping): " << ss.str();
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -86,61 +82,47 @@ public:
|
||||
++mCounter;
|
||||
}
|
||||
}
|
||||
ss << " IsNew: " << (isNew ? "true" : "false");
|
||||
|
||||
if (inbound->isFailed())
|
||||
{
|
||||
JLOG(j_.debug()) << "Abort(failed): " << ss.str();
|
||||
return {};
|
||||
}
|
||||
|
||||
if (!isNew)
|
||||
inbound->update(seq);
|
||||
|
||||
if (!inbound->isComplete())
|
||||
{
|
||||
JLOG(j_.debug()) << "InProgress: " << ss.str();
|
||||
return {};
|
||||
}
|
||||
|
||||
JLOG(j_.debug()) << "Complete: " << ss.str();
|
||||
return inbound->getLedger();
|
||||
};
|
||||
using namespace std::chrono_literals;
|
||||
return perf::measureDurationAndLog(doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
|
||||
std::shared_ptr<Ledger const> ledger =
|
||||
perf::measureDurationAndLog(doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
|
||||
|
||||
return ledger;
|
||||
}
|
||||
|
||||
void
|
||||
acquireAsync(
|
||||
JobType type,
|
||||
std::string const& name,
|
||||
uint256 const& hash,
|
||||
std::uint32_t seq,
|
||||
InboundLedger::Reason reason) override
|
||||
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) override
|
||||
{
|
||||
if (auto check = std::make_shared<CanProcess const>(acquiresMutex_, pendingAcquires_, hash);
|
||||
*check)
|
||||
std::unique_lock lock(acquiresMutex_);
|
||||
try
|
||||
{
|
||||
app_.getJobQueue().addJob(type, name, [check, name, hash, seq, reason, this]() {
|
||||
JLOG(j_.debug()) << "JOB acquireAsync " << name << " started ";
|
||||
try
|
||||
{
|
||||
acquire(hash, seq, reason);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.warn()) << "Exception thrown for acquiring new "
|
||||
"inbound ledger "
|
||||
<< hash << ": " << e.what();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
JLOG(j_.warn()) << "Unknown exception thrown for acquiring new "
|
||||
"inbound ledger "
|
||||
<< hash;
|
||||
}
|
||||
});
|
||||
if (pendingAcquires_.contains(hash))
|
||||
return;
|
||||
pendingAcquires_.insert(hash);
|
||||
scope_unlock unlock(lock);
|
||||
acquire(hash, seq, reason);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.warn()) << "Exception thrown for acquiring new inbound ledger " << hash << ": "
|
||||
<< e.what();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
JLOG(j_.warn()) << "Unknown exception thrown for acquiring new inbound ledger " << hash;
|
||||
}
|
||||
pendingAcquires_.erase(hash);
|
||||
}
|
||||
|
||||
std::shared_ptr<InboundLedger>
|
||||
|
||||
@@ -923,9 +923,8 @@ LedgerMaster::checkAccept(std::shared_ptr<Ledger const> const& ledger)
|
||||
return;
|
||||
}
|
||||
|
||||
JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq << " ("
|
||||
<< to_short_string(ledger->header().hash) << ") with >= " << minVal
|
||||
<< " validations";
|
||||
JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq
|
||||
<< " with >= " << minVal << " validations";
|
||||
|
||||
ledger->setValidated();
|
||||
ledger->setFull();
|
||||
|
||||
@@ -13,8 +13,7 @@ TimeoutCounter::TimeoutCounter(
|
||||
QueueJobParameter&& jobParameter,
|
||||
beast::Journal journal)
|
||||
: app_(app)
|
||||
, sink_(journal, to_short_string(hash) + " ")
|
||||
, journal_(sink_)
|
||||
, journal_(journal)
|
||||
, hash_(hash)
|
||||
, timeouts_(0)
|
||||
, complete_(false)
|
||||
@@ -34,7 +33,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
|
||||
{
|
||||
if (isDone())
|
||||
return;
|
||||
JLOG(journal_.debug()) << "Setting timer for " << timerInterval_.count() << "ms";
|
||||
timer_.expires_after(timerInterval_);
|
||||
timer_.async_wait([wptr = pmDowncast()](boost::system::error_code const& ec) {
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
@@ -42,10 +40,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
|
||||
|
||||
if (auto ptr = wptr.lock())
|
||||
{
|
||||
JLOG(ptr->journal_.debug())
|
||||
<< "timer: ec: " << ec
|
||||
<< " (operation_aborted: " << boost::asio::error::operation_aborted << " - "
|
||||
<< (ec == boost::asio::error::operation_aborted ? "aborted" : "other") << ")";
|
||||
ScopedLockType sl(ptr->mtx_);
|
||||
ptr->queueJob(sl);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <xrpl/beast/utility/WrappedSink.h>
|
||||
#include <xrpl/core/Job.h>
|
||||
|
||||
#include <boost/asio/basic_waitable_timer.hpp>
|
||||
@@ -104,7 +103,6 @@ protected:
|
||||
// Used in this class for access to boost::asio::io_context and
|
||||
// xrpl::Overlay. Used in subtypes for the kitchen sink.
|
||||
Application& app_;
|
||||
beast::WrappedSink sink_;
|
||||
beast::Journal journal_;
|
||||
mutable std::recursive_mutex mtx_;
|
||||
|
||||
|
||||
@@ -30,10 +30,10 @@
|
||||
#include <xrpld/rpc/MPTokenIssuanceID.h>
|
||||
#include <xrpld/rpc/ServerHandler.h>
|
||||
|
||||
#include <xrpl/basics/CanProcess.h>
|
||||
#include <xrpl/basics/UptimeClock.h>
|
||||
#include <xrpl/basics/mulDiv.h>
|
||||
#include <xrpl/basics/safe_cast.h>
|
||||
#include <xrpl/basics/scope.h>
|
||||
#include <xrpl/beast/utility/rngfill.h>
|
||||
#include <xrpl/core/HashRouter.h>
|
||||
#include <xrpl/core/NetworkIDService.h>
|
||||
@@ -401,7 +401,7 @@ public:
|
||||
isFull() override;
|
||||
|
||||
void
|
||||
setMode(OperatingMode om, char const* reason) override;
|
||||
setMode(OperatingMode om) override;
|
||||
|
||||
bool
|
||||
isBlocked() override;
|
||||
@@ -839,7 +839,7 @@ NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
|
||||
inline void
|
||||
NetworkOPsImp::setStandAlone()
|
||||
{
|
||||
setMode(OperatingMode::FULL, "setStandAlone");
|
||||
setMode(OperatingMode::FULL);
|
||||
}
|
||||
|
||||
inline void
|
||||
@@ -982,7 +982,7 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
{
|
||||
if (mMode != OperatingMode::DISCONNECTED)
|
||||
{
|
||||
setMode(OperatingMode::DISCONNECTED, "Heartbeat: insufficient peers");
|
||||
setMode(OperatingMode::DISCONNECTED);
|
||||
std::stringstream ss;
|
||||
ss << "Node count (" << numPeers << ") has fallen "
|
||||
<< "below required minimum (" << minPeerCount_ << ").";
|
||||
@@ -1006,7 +1006,7 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
|
||||
if (mMode == OperatingMode::DISCONNECTED)
|
||||
{
|
||||
setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers");
|
||||
setMode(OperatingMode::CONNECTED);
|
||||
JLOG(m_journal.info()) << "Node count (" << numPeers << ") is sufficient.";
|
||||
CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers << " peers. ";
|
||||
}
|
||||
@@ -1017,11 +1017,11 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
|
||||
if (mMode == OperatingMode::SYNCING)
|
||||
{
|
||||
setMode(OperatingMode::SYNCING, "Heartbeat: check syncing");
|
||||
setMode(OperatingMode::SYNCING);
|
||||
}
|
||||
else if (mMode == OperatingMode::CONNECTED)
|
||||
{
|
||||
setMode(OperatingMode::CONNECTED, "Heartbeat: check connected");
|
||||
setMode(OperatingMode::CONNECTED);
|
||||
}
|
||||
auto newMode = mMode.load();
|
||||
if (origMode != newMode)
|
||||
@@ -1726,7 +1726,7 @@ void
|
||||
NetworkOPsImp::setAmendmentBlocked()
|
||||
{
|
||||
amendmentBlocked_ = true;
|
||||
setMode(OperatingMode::CONNECTED, "setAmendmentBlocked");
|
||||
setMode(OperatingMode::CONNECTED);
|
||||
}
|
||||
|
||||
inline bool
|
||||
@@ -1757,7 +1757,7 @@ void
|
||||
NetworkOPsImp::setUNLBlocked()
|
||||
{
|
||||
unlBlocked_ = true;
|
||||
setMode(OperatingMode::CONNECTED, "setUNLBlocked");
|
||||
setMode(OperatingMode::CONNECTED);
|
||||
}
|
||||
|
||||
inline void
|
||||
@@ -1857,7 +1857,7 @@ NetworkOPsImp::checkLastClosedLedger(Overlay::PeerSequence const& peerList, uint
|
||||
|
||||
if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
|
||||
{
|
||||
setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger");
|
||||
setMode(OperatingMode::CONNECTED);
|
||||
}
|
||||
|
||||
if (consensus)
|
||||
@@ -1945,8 +1945,8 @@ NetworkOPsImp::beginConsensus(
|
||||
// this shouldn't happen unless we jump ledgers
|
||||
if (mMode == OperatingMode::FULL)
|
||||
{
|
||||
JLOG(m_journal.warn()) << "beginConsensus Don't have LCL, going to tracking";
|
||||
setMode(OperatingMode::TRACKING, "beginConsensus: No LCL");
|
||||
JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
|
||||
setMode(OperatingMode::TRACKING);
|
||||
CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
|
||||
}
|
||||
|
||||
@@ -2074,7 +2074,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
|
||||
// validations we have for LCL. If the ledger is good enough, go to
|
||||
// TRACKING - TODO
|
||||
if (!needNetworkLedger_)
|
||||
setMode(OperatingMode::TRACKING, "endConsensus: check tracking");
|
||||
setMode(OperatingMode::TRACKING);
|
||||
}
|
||||
|
||||
if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::TRACKING)) &&
|
||||
@@ -2087,7 +2087,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
|
||||
if (registry_.get().getTimeKeeper().now() <
|
||||
(current->header().parentCloseTime + 2 * current->header().closeTimeResolution))
|
||||
{
|
||||
setMode(OperatingMode::FULL, "endConsensus: check full");
|
||||
setMode(OperatingMode::FULL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2099,7 +2099,7 @@ NetworkOPsImp::consensusViewChange()
|
||||
{
|
||||
if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
|
||||
{
|
||||
setMode(OperatingMode::CONNECTED, "consensusViewChange");
|
||||
setMode(OperatingMode::CONNECTED);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2403,7 +2403,7 @@ NetworkOPsImp::pubPeerStatus(std::function<Json::Value(void)> const& func)
|
||||
}
|
||||
|
||||
void
|
||||
NetworkOPsImp::setMode(OperatingMode om, char const* reason)
|
||||
NetworkOPsImp::setMode(OperatingMode om)
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
if (om == OperatingMode::CONNECTED)
|
||||
@@ -2423,12 +2423,11 @@ NetworkOPsImp::setMode(OperatingMode om, char const* reason)
|
||||
if (mMode == om)
|
||||
return;
|
||||
|
||||
auto const sink = om < mMode ? m_journal.warn() : m_journal.info();
|
||||
mMode = om;
|
||||
|
||||
accounting_.mode(om);
|
||||
|
||||
JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason;
|
||||
JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
|
||||
pubServer();
|
||||
}
|
||||
|
||||
@@ -2437,24 +2436,36 @@ NetworkOPsImp::recvValidation(std::shared_ptr<STValidation> const& val, std::str
|
||||
{
|
||||
JLOG(m_journal.trace()) << "recvValidation " << val->getLedgerHash() << " from " << source;
|
||||
|
||||
std::unique_lock lock(validationsMutex_);
|
||||
BypassAccept bypassAccept = BypassAccept::no;
|
||||
try
|
||||
{
|
||||
CanProcess const check(validationsMutex_, pendingValidations_, val->getLedgerHash());
|
||||
try
|
||||
if (pendingValidations_.contains(val->getLedgerHash()))
|
||||
{
|
||||
BypassAccept bypassAccept = check ? BypassAccept::no : BypassAccept::yes;
|
||||
handleNewValidation(registry_.app(), val, source, bypassAccept, m_journal);
|
||||
bypassAccept = BypassAccept::yes;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
else
|
||||
{
|
||||
JLOG(m_journal.warn()) << "Exception thrown for handling new validation "
|
||||
<< val->getLedgerHash() << ": " << e.what();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
JLOG(m_journal.warn())
|
||||
<< "Unknown exception thrown for handling new validation " << val->getLedgerHash();
|
||||
pendingValidations_.insert(val->getLedgerHash());
|
||||
}
|
||||
scope_unlock unlock(lock);
|
||||
handleNewValidation(registry_.get().getApp(), val, source, bypassAccept, m_journal);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(m_journal.warn()) << "Exception thrown for handling new validation "
|
||||
<< val->getLedgerHash() << ": " << e.what();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
JLOG(m_journal.warn()) << "Unknown exception thrown for handling new validation "
|
||||
<< val->getLedgerHash();
|
||||
}
|
||||
if (bypassAccept == BypassAccept::no)
|
||||
{
|
||||
pendingValidations_.erase(val->getLedgerHash());
|
||||
}
|
||||
lock.unlock();
|
||||
|
||||
pubValidation(val);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user