mirror of
https://github.com/Xahau/xahaud.git
synced 2026-01-29 19:15:16 +00:00
Compare commits
4 Commits
featRNG2
...
ccache-100
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b1ac4b7b3 | ||
|
|
10cf8d8a01 | ||
|
|
693b42a530 | ||
|
|
6e78104e3d |
63
.github/actions/xahau-ga-build/action.yml
vendored
63
.github/actions/xahau-ga-build/action.yml
vendored
@@ -75,17 +75,37 @@ runs:
|
||||
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
|
||||
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore ccache directory for main branch
|
||||
if: inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true'
|
||||
id: ccache-restore
|
||||
uses: ./.github/actions/xahau-ga-cache-restore
|
||||
with:
|
||||
path: ~/.ccache-main
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
cache-type: ccache-main
|
||||
|
||||
- name: Restore ccache directory for current branch
|
||||
if: inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
|
||||
id: ccache-restore-current-branch
|
||||
uses: ./.github/actions/xahau-ga-cache-restore
|
||||
with:
|
||||
path: ~/.ccache-current
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
cache-type: ccache-current
|
||||
|
||||
- name: Configure ccache
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
# Create cache directories
|
||||
mkdir -p ~/.ccache-cache
|
||||
|
||||
# Keep config separate from cache_dir so configs aren't swapped when CCACHE_DIR changes between steps
|
||||
mkdir -p ~/.config/ccache
|
||||
export CCACHE_CONFIGPATH="$HOME/.config/ccache/ccache.conf"
|
||||
echo "CCACHE_CONFIGPATH=$CCACHE_CONFIGPATH" >> $GITHUB_ENV
|
||||
mkdir -p ~/.ccache-main ~/.ccache-current
|
||||
|
||||
# Keep config separate from cache_dir so configs aren't swapped when CCACHE_DIR changes between steps
|
||||
mkdir -p ~/.config/ccache
|
||||
@@ -96,9 +116,20 @@ runs:
|
||||
ccache --set-config=max_size=${{ inputs.ccache_max_size }}
|
||||
ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
|
||||
ccache --set-config=compiler_check=${{ inputs.ccache_compiler_check }}
|
||||
ccache --set-config=cache_dir="$HOME/.ccache-cache"
|
||||
echo "CCACHE_DIR=$HOME/.ccache-cache" >> $GITHUB_ENV
|
||||
echo "📦 using ~/.ccache-cache as ccache cache directory"
|
||||
|
||||
# Determine if we're on the main branch
|
||||
if [ "${{ steps.safe-branch.outputs.name }}" = "${{ inputs.main_branch }}" ]; then
|
||||
# Main branch: use main branch cache only
|
||||
ccache --set-config=cache_dir="$HOME/.ccache-main"
|
||||
echo "CCACHE_DIR=$HOME/.ccache-main" >> $GITHUB_ENV
|
||||
echo "📦 Main branch: using ~/.ccache-main"
|
||||
else
|
||||
# Feature branch: use current branch cache with main as secondary (read-only fallback)
|
||||
ccache --set-config=cache_dir="$HOME/.ccache-current"
|
||||
ccache --set-config=secondary_storage="file:$HOME/.ccache-main"
|
||||
echo "CCACHE_DIR=$HOME/.ccache-current" >> $GITHUB_ENV
|
||||
echo "📦 Feature branch: using ~/.ccache-current with ~/.ccache-main as secondary"
|
||||
fi
|
||||
|
||||
# Print config for verification
|
||||
echo "=== ccache configuration ==="
|
||||
@@ -213,3 +244,17 @@ runs:
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
shell: bash
|
||||
run: ccache -s
|
||||
|
||||
- name: Save ccache directory for main branch
|
||||
if: success() && inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.ccache-main
|
||||
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
|
||||
|
||||
- name: Save ccache directory for current branch
|
||||
if: success() && inputs.gha_cache_enabled == 'true' && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.ccache-current
|
||||
key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }}
|
||||
|
||||
31
.github/actions/xahau-ga-dependencies/action.yml
vendored
31
.github/actions/xahau-ga-dependencies/action.yml
vendored
@@ -17,6 +17,10 @@ inputs:
|
||||
description: 'Cache version for invalidation'
|
||||
required: false
|
||||
default: '1'
|
||||
gha_cache_enabled:
|
||||
description: 'Whether to use actions/cache (disable for self-hosted with volume mounts)'
|
||||
required: false
|
||||
default: 'true'
|
||||
main_branch:
|
||||
description: 'Main branch name for restore keys'
|
||||
required: false
|
||||
@@ -59,14 +63,18 @@ outputs:
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Configure Conan cache paths
|
||||
if: inputs.os == 'Linux'
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /.conan-cache/conan2 /.conan-cache/conan2_download /.conan-cache/conan2_sources
|
||||
echo 'core.cache:storage_path=/.conan-cache/conan2' > ~/.conan2/global.conf
|
||||
echo 'core.download:download_cache=/.conan-cache/conan2_download' >> ~/.conan2/global.conf
|
||||
echo 'core.sources:download_cache=/.conan-cache/conan2_sources' >> ~/.conan2/global.conf
|
||||
- name: Restore Conan cache
|
||||
if: inputs.gha_cache_enabled == 'true'
|
||||
id: cache-restore-conan
|
||||
uses: ./.github/actions/xahau-ga-cache-restore
|
||||
with:
|
||||
path: ~/.conan2
|
||||
# Note: compiler-id format is compiler-version-stdlib[-gccversion]
|
||||
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-${{ inputs.configuration }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
cache-type: Conan
|
||||
|
||||
- name: Configure Conan cache paths
|
||||
if: inputs.gha_cache_enabled == 'false'
|
||||
@@ -153,3 +161,10 @@ runs:
|
||||
--build missing \
|
||||
--settings build_type=${{ inputs.configuration }} \
|
||||
..
|
||||
|
||||
- name: Save Conan cache
|
||||
if: success() && inputs.gha_cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.conan2
|
||||
key: ${{ steps.cache-restore-conan.outputs.cache-primary-key }}
|
||||
|
||||
71
.github/workflows/xahau-ga-macos.yml
vendored
71
.github/workflows/xahau-ga-macos.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
- Ninja
|
||||
configuration:
|
||||
- Debug
|
||||
runs-on: [self-hosted, macOS]
|
||||
runs-on: macos-15
|
||||
env:
|
||||
build_dir: .build
|
||||
# Bump this number to invalidate all caches globally.
|
||||
@@ -30,29 +30,61 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Add Homebrew to PATH
|
||||
- name: Get commit message
|
||||
id: get-commit-message
|
||||
uses: ./.github/actions/xahau-ga-get-commit-message
|
||||
with:
|
||||
event-name: ${{ github.event_name }}
|
||||
head-commit-message: ${{ github.event.head_commit.message }}
|
||||
pr-head-sha: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Install Conan
|
||||
run: |
|
||||
echo "/opt/homebrew/bin" >> "$GITHUB_PATH"
|
||||
echo "/opt/homebrew/sbin" >> "$GITHUB_PATH"
|
||||
brew install conan
|
||||
# Verify Conan 2 is installed
|
||||
conan --version
|
||||
|
||||
- name: Install Coreutils
|
||||
run: |
|
||||
brew install coreutils
|
||||
echo "Num proc: $(nproc)"
|
||||
|
||||
# To isolate environments for each Runner, instead of installing globally with brew,
|
||||
# use mise to isolate environments for each Runner directory.
|
||||
- name: Setup toolchain (mise)
|
||||
uses: jdx/mise-action@v2
|
||||
with:
|
||||
install: true
|
||||
- name: Install Ninja
|
||||
if: matrix.generator == 'Ninja'
|
||||
run: brew install ninja
|
||||
|
||||
- name: Install tools via mise
|
||||
- name: Install Python
|
||||
run: |
|
||||
mise install
|
||||
mise use cmake@3.23.1 python@3.12 pipx@latest conan@2 ninja@latest ccache@latest
|
||||
mise reshim
|
||||
echo "$HOME/.local/share/mise/shims" >> "$GITHUB_PATH"
|
||||
if which python3 > /dev/null 2>&1; then
|
||||
echo "Python 3 executable exists"
|
||||
python3 --version
|
||||
else
|
||||
brew install python@3.12
|
||||
fi
|
||||
# Create 'python' symlink if it doesn't exist (for tools expecting 'python')
|
||||
if ! which python > /dev/null 2>&1; then
|
||||
sudo ln -sf $(which python3) /usr/local/bin/python
|
||||
fi
|
||||
|
||||
- name: Install CMake
|
||||
run: |
|
||||
# Install CMake 3.x to match local dev environments
|
||||
# With Conan 2 and the policy args passed to CMake, newer versions
|
||||
# can have issues with dependencies that require cmake_minimum_required < 3.5
|
||||
brew uninstall cmake --ignore-dependencies 2>/dev/null || true
|
||||
|
||||
# Download and install CMake 3.31.7 directly
|
||||
curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz
|
||||
tar -xzf cmake.tar.gz
|
||||
|
||||
# Move the entire CMake.app to /Applications
|
||||
sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/
|
||||
|
||||
echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH
|
||||
/Applications/CMake.app/Contents/bin/cmake --version
|
||||
|
||||
- name: Install ccache
|
||||
run: brew install ccache
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
@@ -66,14 +98,6 @@ jobs:
|
||||
echo "---- Full Environment ----"
|
||||
env
|
||||
|
||||
- name: Get commit message
|
||||
id: get-commit-message
|
||||
uses: ./.github/actions/xahau-ga-get-commit-message
|
||||
with:
|
||||
event-name: ${{ github.event_name }}
|
||||
head-commit-message: ${{ github.event.head_commit.message }}
|
||||
pr-head-sha: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Detect compiler version
|
||||
id: detect-compiler
|
||||
run: |
|
||||
@@ -105,7 +129,6 @@ jobs:
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
stdlib: libcxx
|
||||
ccache_max_size: '100G'
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
|
||||
4
.github/workflows/xahau-ga-nix.yml
vendored
4
.github/workflows/xahau-ga-nix.yml
vendored
@@ -181,7 +181,8 @@ jobs:
|
||||
image: ubuntu:24.04
|
||||
volumes:
|
||||
- /home/runner/.conan-cache:/.conan-cache
|
||||
- /home/runner/.ccache-cache:/github/home/.ccache-cache
|
||||
- /home/runner/.ccache-main:/github/home/.ccache-main
|
||||
- /home/runner/.ccache-current:/github/home/.ccache-current
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
@@ -324,6 +325,7 @@ jobs:
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
stdlib: ${{ matrix.stdlib }}
|
||||
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
|
||||
gha_cache_enabled: 'false' # Disable caching for self hosted runner
|
||||
ccache_max_size: '100G'
|
||||
|
||||
- name: Set artifact name
|
||||
|
||||
@@ -48,9 +48,13 @@ target_sources (xrpl_core PRIVATE
|
||||
src/ripple/beast/net/impl/IPAddressV6.cpp
|
||||
src/ripple/beast/net/impl/IPEndpoint.cpp
|
||||
src/ripple/beast/utility/src/beast_Journal.cpp
|
||||
src/ripple/beast/utility/src/beast_PropertyStream.cpp
|
||||
# Enhanced logging - compiles to empty when BEAST_ENHANCED_LOGGING is not defined
|
||||
src/ripple/beast/utility/src/beast_EnhancedLogging.cpp)
|
||||
src/ripple/beast/utility/src/beast_PropertyStream.cpp)
|
||||
|
||||
# Conditionally add enhanced logging source when BEAST_ENHANCED_LOGGING is enabled
|
||||
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
|
||||
target_sources(xrpl_core PRIVATE
|
||||
src/ripple/beast/utility/src/beast_EnhancedLogging.cpp)
|
||||
endif()
|
||||
|
||||
#[===============================[
|
||||
core sources
|
||||
@@ -158,16 +162,12 @@ target_link_libraries (xrpl_core
|
||||
date::date
|
||||
Ripple::opts)
|
||||
|
||||
# date-tz for enhanced logging (always linked, code is #ifdef guarded)
|
||||
if(TARGET date::date-tz)
|
||||
target_link_libraries(xrpl_core PUBLIC date::date-tz)
|
||||
# Link date-tz library when enhanced logging is enabled
|
||||
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
|
||||
if(TARGET date::date-tz)
|
||||
target_link_libraries(xrpl_core PUBLIC date::date-tz)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# BEAST_ENHANCED_LOGGING: enable for Debug builds OR when explicitly requested
|
||||
# Uses generator expression so it works with multi-config generators (Xcode, VS, Ninja Multi-Config)
|
||||
target_compile_definitions(xrpl_core PUBLIC
|
||||
$<$<OR:$<CONFIG:Debug>,$<BOOL:${BEAST_ENHANCED_LOGGING}>>:BEAST_ENHANCED_LOGGING=1>
|
||||
)
|
||||
#[=================================[
|
||||
main/core headers installation
|
||||
#]=================================]
|
||||
|
||||
@@ -37,11 +37,20 @@ endif() #git
|
||||
set(SOURCE_ROOT_PATH "${CMAKE_CURRENT_SOURCE_DIR}/src/")
|
||||
add_definitions(-DSOURCE_ROOT_PATH="${SOURCE_ROOT_PATH}")
|
||||
|
||||
# BEAST_ENHANCED_LOGGING - adds file:line numbers and formatting to logs
|
||||
# Automatically enabled for Debug builds via generator expression
|
||||
# Can be explicitly controlled with -DBEAST_ENHANCED_LOGGING=ON/OFF
|
||||
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages (auto: Debug=ON, Release=OFF)" OFF)
|
||||
message(STATUS "BEAST_ENHANCED_LOGGING option: ${BEAST_ENHANCED_LOGGING}")
|
||||
# BEAST_ENHANCED_LOGGING option - adds file:line numbers and formatting to logs
|
||||
# Default to ON for Debug builds, OFF for Release
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" ON)
|
||||
else()
|
||||
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" OFF)
|
||||
endif()
|
||||
|
||||
if(BEAST_ENHANCED_LOGGING)
|
||||
add_definitions(-DBEAST_ENHANCED_LOGGING=1)
|
||||
message(STATUS "Log line numbers enabled")
|
||||
else()
|
||||
message(STATUS "Log line numbers disabled")
|
||||
endif()
|
||||
|
||||
if(thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
|
||||
@@ -471,10 +471,6 @@ ManifestCache::applyManifest(Manifest m)
|
||||
|
||||
auto masterKey = m.masterKey;
|
||||
map_.emplace(std::move(masterKey), std::move(m));
|
||||
|
||||
// Increment sequence to invalidate cached manifest messages
|
||||
seq_++;
|
||||
|
||||
return ManifestDisposition::accepted;
|
||||
}
|
||||
|
||||
|
||||
@@ -360,8 +360,7 @@ Logs::format(
|
||||
if (!partition.empty())
|
||||
{
|
||||
#ifdef BEAST_ENHANCED_LOGGING
|
||||
if (beast::detail::should_log_use_colors())
|
||||
output += beast::detail::get_log_highlight_color();
|
||||
output += beast::detail::get_log_highlight_color();
|
||||
#endif
|
||||
output += partition + ":";
|
||||
}
|
||||
@@ -393,8 +392,7 @@ Logs::format(
|
||||
}
|
||||
|
||||
#ifdef BEAST_ENHANCED_LOGGING
|
||||
if (beast::detail::should_log_use_colors())
|
||||
output += "\033[0m";
|
||||
output += "\033[0m";
|
||||
#endif
|
||||
|
||||
output += message;
|
||||
|
||||
@@ -41,14 +41,6 @@ get_log_highlight_color();
|
||||
constexpr const char*
|
||||
strip_source_root(const char* file)
|
||||
{
|
||||
// Handle relative paths from build/ directory (common with ccache)
|
||||
// e.g., "../src/ripple/..." -> "ripple/..."
|
||||
if (file && file[0] == '.' && file[1] == '.' && file[2] == '/' &&
|
||||
file[3] == 's' && file[4] == 'r' && file[5] == 'c' && file[6] == '/')
|
||||
{
|
||||
return file + 7; // skip "../src/"
|
||||
}
|
||||
|
||||
#ifdef SOURCE_ROOT_PATH
|
||||
constexpr const char* sourceRoot = SOURCE_ROOT_PATH;
|
||||
constexpr auto strlen_constexpr = [](const char* s) constexpr
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifdef BEAST_ENHANCED_LOGGING
|
||||
|
||||
#include <ripple/beast/utility/EnhancedLogging.h>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
@@ -114,5 +112,3 @@ log_write_location_string(std::ostream& os, const char* file, int line)
|
||||
|
||||
} // namespace detail
|
||||
} // namespace beast
|
||||
|
||||
#endif // BEAST_ENHANCED_LOGGING
|
||||
|
||||
@@ -155,43 +155,14 @@ Journal::ScopedStream::~ScopedStream()
|
||||
|
||||
#ifdef BEAST_ENHANCED_LOGGING
|
||||
// Add suffix if location is enabled
|
||||
if (file_ && detail::should_show_location() && !s.empty())
|
||||
if (file_ && detail::should_show_location() && !s.empty() && s != "\n")
|
||||
{
|
||||
// Single optimized scan from the end
|
||||
size_t const lastNonWhitespace = s.find_last_not_of(" \n\r\t");
|
||||
|
||||
// Skip if message is only whitespace (e.g., just "\n" or " \n\n")
|
||||
if (lastNonWhitespace != std::string::npos)
|
||||
{
|
||||
// Count only the trailing newlines (tiny range)
|
||||
size_t trailingNewlines = 0;
|
||||
for (size_t i = lastNonWhitespace + 1; i < s.length(); ++i)
|
||||
{
|
||||
if (s[i] == '\n')
|
||||
++trailingNewlines;
|
||||
}
|
||||
|
||||
// Build location string once
|
||||
std::ostringstream locStream;
|
||||
detail::log_write_location_string(locStream, file_, line_);
|
||||
std::string const location = locStream.str();
|
||||
|
||||
// Pre-allocate exact size → zero reallocations
|
||||
size_t const finalSize = lastNonWhitespace + 1 + 1 +
|
||||
location.length() + trailingNewlines;
|
||||
|
||||
std::string result;
|
||||
result.reserve(finalSize);
|
||||
|
||||
// Direct string ops (no ostringstream overhead)
|
||||
result.append(s, 0, lastNonWhitespace + 1);
|
||||
result.push_back(' ');
|
||||
result += location;
|
||||
if (trailingNewlines > 0)
|
||||
result.append(trailingNewlines, '\n');
|
||||
|
||||
s = std::move(result); // Move, no copy
|
||||
}
|
||||
std::ostringstream combined;
|
||||
combined << s;
|
||||
if (!s.empty() && s.back() != ' ')
|
||||
combined << " ";
|
||||
detail::log_write_location_string(combined, file_, line_);
|
||||
s = combined.str();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -479,23 +479,10 @@ private:
|
||||
to reach consensus. Update our position only on the timer, and in this
|
||||
phase.
|
||||
|
||||
If we have consensus, move to the shuffle phase.
|
||||
*/
|
||||
void
|
||||
phaseEstablish();
|
||||
|
||||
|
||||
/** Handle shuffle phase.
|
||||
|
||||
In the shuffle phase, UNLReport nodes exchange entropy to build
|
||||
a consensus entropy that is then used as an RNG source for Hooks.
|
||||
|
||||
The entropy is injected as a ttSHUFFLE psuedo into the final ledger
|
||||
|
||||
If we have consensus, move to the accepted phase.
|
||||
*/
|
||||
void
|
||||
phaseShuffle();
|
||||
phaseEstablish();
|
||||
|
||||
/** Evaluate whether pausing increases likelihood of validation.
|
||||
*
|
||||
@@ -601,10 +588,6 @@ private:
|
||||
// Peer proposed positions for the current round
|
||||
hash_map<NodeID_t, PeerPosition_t> currPeerPositions_;
|
||||
|
||||
// our and our peers' entropy as per TMShuffle, used in phaseShuffle
|
||||
std::optional<uint256> ourEntropy_;
|
||||
hash_map<NodeID_t, std::pair<uint256, uint256>> currPeerEntropy_;
|
||||
|
||||
// Recently received peer positions, available when transitioning between
|
||||
// ledgers or rounds
|
||||
hash_map<NodeID_t, std::deque<PeerPosition_t>> recentPeerPositions_;
|
||||
@@ -849,10 +832,6 @@ Consensus<Adaptor>::timerEntry(NetClock::time_point const& now)
|
||||
{
|
||||
phaseEstablish();
|
||||
}
|
||||
else if (phase_ == ConsensusPhase::shuffle)
|
||||
{
|
||||
phaseShuffle();
|
||||
}
|
||||
}
|
||||
|
||||
template <class Adaptor>
|
||||
@@ -1312,12 +1291,8 @@ Consensus<Adaptor>::phaseEstablish()
|
||||
adaptor_.updateOperatingMode(currPeerPositions_.size());
|
||||
prevProposers_ = currPeerPositions_.size();
|
||||
prevRoundTime_ = result_->roundTime.read();
|
||||
|
||||
// RHTODO: guard with amendment
|
||||
phase_ = ConsensusPhase::shuffle;
|
||||
JLOG(j_.debug()) << "transitioned to ConsensusPhase::shuffle";
|
||||
|
||||
/*
|
||||
phase_ = ConsensusPhase::accepted;
|
||||
JLOG(j_.debug()) << "transitioned to ConsensusPhase::accepted";
|
||||
adaptor_.onAccept(
|
||||
*result_,
|
||||
previousLedger_,
|
||||
@@ -1325,60 +1300,6 @@ Consensus<Adaptor>::phaseEstablish()
|
||||
rawCloseTimes_,
|
||||
mode_.get(),
|
||||
getJson(true));
|
||||
*/
|
||||
}
|
||||
|
||||
template <class Adaptor>
|
||||
void
|
||||
Consensus<Adaptor>::phaseShuffle()
|
||||
{
|
||||
// can only establish consensus if we already took a stance
|
||||
assert(result_);
|
||||
|
||||
using namespace std::chrono;
|
||||
ConsensusParms const& parms = adaptor_.parms();
|
||||
|
||||
result_->roundTime.tick(clock_.now());
|
||||
result_->proposers = currPeerPositions_.size();
|
||||
|
||||
convergePercent_ = result_->roundTime.read() * 100 /
|
||||
std::max<milliseconds>(prevRoundTime_, parms.avMIN_CONSENSUS_TIME);
|
||||
|
||||
// Give everyone a chance to take an initial position
|
||||
if (result_->roundTime.read() < parms.ledgerMIN_CONSENSUS)
|
||||
return;
|
||||
|
||||
updateOurPositions();
|
||||
|
||||
// Nothing to do if too many laggards or we don't have consensus.
|
||||
if (shouldPause() || !haveConsensus())
|
||||
return;
|
||||
|
||||
if (!haveCloseTimeConsensus_)
|
||||
{
|
||||
JLOG(j_.info()) << "We have TX consensus but not CT consensus";
|
||||
return;
|
||||
}
|
||||
|
||||
JLOG(j_.info()) << "Converge cutoff (" << currPeerPositions_.size()
|
||||
<< " participants)";
|
||||
adaptor_.updateOperatingMode(currPeerPositions_.size());
|
||||
prevProposers_ = currPeerPositions_.size();
|
||||
prevRoundTime_ = result_->roundTime.read();
|
||||
|
||||
// RHTODO: guard with amendment
|
||||
phase_ = ConsensusPhase::shuffle;
|
||||
JLOG(j_.debug()) << "transitioned to ConsensusPhase::shuffle";
|
||||
|
||||
/*
|
||||
adaptor_.onAccept(
|
||||
*result_,
|
||||
previousLedger_,
|
||||
closeResolution_,
|
||||
rawCloseTimes_,
|
||||
mode_.get(),
|
||||
getJson(true));
|
||||
*/
|
||||
}
|
||||
|
||||
template <class Adaptor>
|
||||
|
||||
@@ -87,15 +87,15 @@ to_string(ConsensusMode m)
|
||||
/** Phases of consensus for a single ledger round.
|
||||
|
||||
@code
|
||||
"close" "shuffle" "accept"
|
||||
open ------- > establish -------> shuffle ---------> accepted
|
||||
^ | |
|
||||
|---------------| |
|
||||
^ "startRound" |
|
||||
|-----------------------------------------------------|
|
||||
"close" "accept"
|
||||
open ------- > establish ---------> accepted
|
||||
^ | |
|
||||
|---------------| |
|
||||
^ "startRound" |
|
||||
|------------------------------------|
|
||||
@endcode
|
||||
|
||||
The typical transition goes from open to establish to shuffle to accepted and
|
||||
The typical transition goes from open to establish to accepted and
|
||||
then a call to startRound begins the process anew. However, if a wrong prior
|
||||
ledger is detected and recovered during the establish or accept phase,
|
||||
consensus will internally go back to open (see Consensus::handleWrongLedger).
|
||||
@@ -107,9 +107,6 @@ enum class ConsensusPhase {
|
||||
//! Establishing consensus by exchanging proposals with our peers
|
||||
establish,
|
||||
|
||||
//! Negotitate featureRNG entropy
|
||||
shuffle,
|
||||
|
||||
//! We have accepted a new last closed ledger and are waiting on a call
|
||||
//! to startRound to begin the next consensus round. No changes
|
||||
//! to consensus phase occur while in this phase.
|
||||
@@ -125,8 +122,6 @@ to_string(ConsensusPhase p)
|
||||
return "open";
|
||||
case ConsensusPhase::establish:
|
||||
return "establish";
|
||||
case ConsensusPhase::shuffle:
|
||||
return "shuffle";
|
||||
case ConsensusPhase::accepted:
|
||||
return "accepted";
|
||||
default:
|
||||
|
||||
@@ -1094,7 +1094,6 @@ trustTransferLockedBalance(
|
||||
}
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -484,61 +484,44 @@ OverlayImpl::start()
|
||||
m_peerFinder->setConfig(config);
|
||||
m_peerFinder->start();
|
||||
|
||||
auto addIps = [this](std::vector<std::string> ips, bool fixed) {
|
||||
auto addIps = [&](std::vector<std::string> bootstrapIps) -> void {
|
||||
beast::Journal const& j = app_.journal("Overlay");
|
||||
for (auto& ip : ips)
|
||||
for (auto& ip : bootstrapIps)
|
||||
{
|
||||
std::size_t pos = ip.find('#');
|
||||
if (pos != std::string::npos)
|
||||
ip.erase(pos);
|
||||
|
||||
JLOG(j.trace())
|
||||
<< "Found " << (fixed ? "fixed" : "bootstrap") << " IP: " << ip;
|
||||
JLOG(j.trace()) << "Found boostrap IP: " << ip;
|
||||
}
|
||||
|
||||
m_resolver.resolve(
|
||||
ips,
|
||||
[this, fixed](
|
||||
std::string const& name,
|
||||
bootstrapIps,
|
||||
[&](std::string const& name,
|
||||
std::vector<beast::IP::Endpoint> const& addresses) {
|
||||
std::vector<std::string> ips;
|
||||
ips.reserve(addresses.size());
|
||||
beast::Journal const& j = app_.journal("Overlay");
|
||||
std::string const base("config: ");
|
||||
|
||||
std::vector<beast::IP::Endpoint> eps;
|
||||
eps.reserve(addresses.size());
|
||||
for (auto const& addr : addresses)
|
||||
{
|
||||
auto ep = addr.port() == 0 ? addr.at_port(DEFAULT_PEER_PORT)
|
||||
: addr;
|
||||
JLOG(j.trace())
|
||||
<< "Parsed " << (fixed ? "fixed" : "bootstrap")
|
||||
<< " IP: " << ep;
|
||||
eps.push_back(ep);
|
||||
std::string addrStr = addr.port() == 0
|
||||
? to_string(addr.at_port(DEFAULT_PEER_PORT))
|
||||
: to_string(addr);
|
||||
JLOG(j.trace()) << "Parsed boostrap IP: " << addrStr;
|
||||
ips.push_back(addrStr);
|
||||
}
|
||||
|
||||
if (eps.empty())
|
||||
return;
|
||||
|
||||
if (fixed)
|
||||
{
|
||||
m_peerFinder->addFixedPeer(base + name, eps);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<std::string> strs;
|
||||
strs.reserve(eps.size());
|
||||
for (auto const& ep : eps)
|
||||
strs.push_back(to_string(ep));
|
||||
m_peerFinder->addFallbackStrings(base + name, strs);
|
||||
}
|
||||
std::string const base("config: ");
|
||||
if (!ips.empty())
|
||||
m_peerFinder->addFallbackStrings(base + name, ips);
|
||||
});
|
||||
};
|
||||
|
||||
if (!app_.config().IPS.empty())
|
||||
addIps(app_.config().IPS, false);
|
||||
addIps(app_.config().IPS);
|
||||
|
||||
if (!app_.config().IPS_FIXED.empty())
|
||||
addIps(app_.config().IPS_FIXED, true);
|
||||
addIps(app_.config().IPS_FIXED);
|
||||
|
||||
auto const timer = std::make_shared<Timer>(*this);
|
||||
std::lock_guard lock(mutex_);
|
||||
|
||||
@@ -1918,100 +1918,6 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMLedgerData> const& m)
|
||||
app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::onMessage(std::shared_ptr<protocol::TMShuffle> const& m)
|
||||
{
|
||||
protocol::TMShuffle& shuf = *m;
|
||||
|
||||
auto const sig = makeSlice(shf.signature());
|
||||
|
||||
// Preliminary check for the validity of the signature: A DER encoded
|
||||
// signature can't be longer than 72 bytes.
|
||||
if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
|
||||
(publicKeyType(makeSlice(shf.nodepubkey())) != KeyType::secp256k1))
|
||||
{
|
||||
JLOG(p_journal_.warn()) << "Shuffle: malformed";
|
||||
fee_ = Resource::feeInvalidSignature;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!stringIsUint256Sized(shf.nodeentropy()) ||
|
||||
!stringIsUint256Sized(shf.consensusentropy()) ||
|
||||
!stringIsUint256Sized(shf.previousledger()))
|
||||
{
|
||||
JLOG(p_journal_.warn()) << "Shuffle: malformed";
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
PublicKey const publicKey{makeSlice(shf.nodepubkey())};
|
||||
auto const isTrusted = app_.validators().trusted(publicKey);
|
||||
|
||||
if (!isTrusted)
|
||||
return;
|
||||
|
||||
uint256 const prevLedger{shf.previousledger()};
|
||||
uint32_t const shuffleSeq{shf.shuffleseq()};
|
||||
uint256 const nodeEntropy{shf.nodeentropy()};
|
||||
uint256 const consensusEntropy{shf.consensusentropy()};
|
||||
|
||||
uint256 const suppression = sha512Half(std::string("TMShuffle", sig));
|
||||
|
||||
if (auto [added, relayed] =
|
||||
app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
|
||||
!added)
|
||||
{
|
||||
// Count unique messages (Slots has it's own 'HashRouter'), which a peer
|
||||
// receives within IDLED seconds since the message has been relayed.
|
||||
if (reduceRelayReady() && relayed &&
|
||||
(stopwatch().now() - *relayed) < reduce_relay::IDLED)
|
||||
overlay_.updateSlotAndSquelch(
|
||||
suppression, publicKey, id_, protocol::mtSHUFFLE);
|
||||
JLOG(p_journal_.trace()) << "Shuffle: duplicate";
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isTrusted)
|
||||
{
|
||||
if (tracking_.load() == Tracking::diverged)
|
||||
{
|
||||
JLOG(p_journal_.debug())
|
||||
<< "Proposal: Dropping untrusted (peer divergence)";
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cluster() && app_.getFeeTrack().isLoadedLocal())
|
||||
{
|
||||
JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(p_journal_.trace())
|
||||
<< "Proposal: " << (isTrusted ? "trusted" : "untrusted");
|
||||
|
||||
auto proposal = RCLCxPeerPos(
|
||||
publicKey,
|
||||
sig,
|
||||
suppression,
|
||||
RCLCxPeerPos::Proposal{
|
||||
prevLedger,
|
||||
set.proposeseq(),
|
||||
proposeHash,
|
||||
closeTime,
|
||||
app_.timeKeeper().closeTime(),
|
||||
calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
|
||||
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(
|
||||
isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
|
||||
"recvPropose->checkPropose",
|
||||
[weak, isTrusted, m, proposal]() {
|
||||
if (auto peer = weak.lock())
|
||||
peer->checkPropose(isTrusted, m, proposal);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::onMessage(std::shared_ptr<protocol::TMProposeSet> const& m)
|
||||
{
|
||||
|
||||
@@ -450,12 +450,3 @@ message TMHaveTransactions
|
||||
repeated bytes hashes = 1;
|
||||
}
|
||||
|
||||
message TMShuffle
|
||||
{
|
||||
required bytes nodeEntropy = 1;
|
||||
required bytes consensusEntropy = 2;
|
||||
required uint32 shuffleSeq = 3;
|
||||
required bytes nodePubKey = 4;
|
||||
required bytes previousledger = 5;
|
||||
required bytes signature = 6; // signature of above fields
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user