mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-18 17:45:48 +00:00
Compare commits
30 Commits
nd-snap-20
...
actions-ca
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b39d0915f | ||
|
|
9ed20a4f1c | ||
|
|
89ffc1969b | ||
|
|
79fdafe638 | ||
|
|
2a10013dfc | ||
|
|
6f148a8ac7 | ||
|
|
96222baf5e | ||
|
|
74477d2c13 | ||
|
|
9378f1a0ad | ||
|
|
6fa6a96e3a | ||
|
|
b0fcd36bcd | ||
|
|
1ec31e79c9 | ||
|
|
9c8b005406 | ||
|
|
687ccf4203 | ||
|
|
83f09fd8ab | ||
|
|
15c7ad6f78 | ||
|
|
1f12b9ec5a | ||
|
|
ad0531ad6c | ||
|
|
e580f7cfc0 | ||
|
|
094f011006 | ||
|
|
39d1c43901 | ||
|
|
b3e6a902cb | ||
|
|
fa1b93bfd8 | ||
|
|
92e3a927fc | ||
|
|
8f7ebf0377 | ||
|
|
46cf6785ab | ||
|
|
3c4c9c87c5 | ||
|
|
7a790246fb | ||
|
|
1a3d2db8ef | ||
|
|
2fc912d54d |
@@ -1,31 +0,0 @@
|
||||
name: 'Configure ccache'
|
||||
description: 'Sets up ccache with consistent configuration'
|
||||
|
||||
inputs:
|
||||
max_size:
|
||||
description: 'Maximum cache size'
|
||||
required: false
|
||||
default: '2G'
|
||||
hash_dir:
|
||||
description: 'Whether to include directory paths in hash'
|
||||
required: false
|
||||
default: 'true'
|
||||
compiler_check:
|
||||
description: 'How to check compiler for changes'
|
||||
required: false
|
||||
default: 'content'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Configure ccache
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ~/.ccache
|
||||
export CONF_PATH="${CCACHE_CONFIGPATH:-${CCACHE_DIR:-$HOME/.ccache}/ccache.conf}"
|
||||
mkdir -p $(dirname "$CONF_PATH")
|
||||
echo "max_size = ${{ inputs.max_size }}" > "$CONF_PATH"
|
||||
echo "hash_dir = ${{ inputs.hash_dir }}" >> "$CONF_PATH"
|
||||
echo "compiler_check = ${{ inputs.compiler_check }}" >> "$CONF_PATH"
|
||||
ccache -p # Print config for verification
|
||||
ccache -z # Zero statistics before the build
|
||||
151
.github/actions/xahau-ga-build/action.yml
vendored
151
.github/actions/xahau-ga-build/action.yml
vendored
@@ -21,7 +21,7 @@ inputs:
|
||||
required: false
|
||||
default: ''
|
||||
compiler-id:
|
||||
description: 'Unique identifier for compiler/version combination used for cache keys'
|
||||
description: 'Unique identifier: compiler-version-stdlib[-gccversion] (e.g. clang-14-libstdcxx-gcc11, gcc-13-libstdcxx)'
|
||||
required: false
|
||||
default: ''
|
||||
cache_version:
|
||||
@@ -36,6 +36,35 @@ inputs:
|
||||
description: 'Main branch name for restore keys'
|
||||
required: false
|
||||
default: 'dev'
|
||||
stdlib:
|
||||
description: 'C++ standard library to use'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- libstdcxx
|
||||
- libcxx
|
||||
clang_gcc_toolchain:
|
||||
description: 'GCC version to use for Clang toolchain (e.g. 11, 13)'
|
||||
required: false
|
||||
default: ''
|
||||
ccache_max_size:
|
||||
description: 'Maximum ccache size'
|
||||
required: false
|
||||
default: '2G'
|
||||
ccache_hash_dir:
|
||||
description: 'Whether to include directory paths in hash'
|
||||
required: false
|
||||
default: 'true'
|
||||
ccache_compiler_check:
|
||||
description: 'How to check compiler for changes'
|
||||
required: false
|
||||
default: 'content'
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 cache storage'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 cache storage'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
@@ -51,7 +80,7 @@ runs:
|
||||
- name: Restore ccache directory
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
id: ccache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
uses: ./.github/actions/xahau-ga-cache-restore
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
|
||||
@@ -59,7 +88,33 @@ runs:
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
- name: Configure ccache
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
# Use ccache's default cache_dir (~/.ccache) - don't override it
|
||||
# This avoids tilde expansion issues when setting it explicitly
|
||||
|
||||
# Create cache directory using ccache's default
|
||||
mkdir -p ~/.ccache
|
||||
|
||||
# Configure ccache settings (but NOT cache_dir - use default)
|
||||
# This overwrites any cached config to ensure fresh configuration
|
||||
ccache --set-config=max_size=${{ inputs.ccache_max_size }}
|
||||
ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
|
||||
ccache --set-config=compiler_check=${{ inputs.ccache_compiler_check }}
|
||||
|
||||
# Note: Not setting CCACHE_DIR - let ccache use its default (~/.ccache)
|
||||
|
||||
# Print config for verification
|
||||
echo "=== ccache configuration ==="
|
||||
ccache -p
|
||||
|
||||
# Zero statistics before the build
|
||||
ccache -z
|
||||
|
||||
- name: Configure project
|
||||
shell: bash
|
||||
@@ -75,25 +130,93 @@ runs:
|
||||
if [ -n "${{ inputs.cxx }}" ]; then
|
||||
export CXX="${{ inputs.cxx }}"
|
||||
fi
|
||||
|
||||
# Configure ccache launcher args
|
||||
CCACHE_ARGS=""
|
||||
|
||||
# Create wrapper toolchain that overlays ccache on top of Conan's toolchain
|
||||
# This enables ccache for the main app build without affecting Conan dependency builds
|
||||
if [ "${{ inputs.ccache_enabled }}" = "true" ]; then
|
||||
CCACHE_ARGS="-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
|
||||
cat > wrapper_toolchain.cmake <<'EOF'
|
||||
# Include Conan's generated toolchain first (sets compiler, flags, etc.)
|
||||
# Note: CMAKE_CURRENT_LIST_DIR is the directory containing this wrapper (.build/)
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/build/generators/conan_toolchain.cmake)
|
||||
|
||||
# Overlay ccache configuration for main application build
|
||||
# This does NOT affect Conan dependency builds (already completed)
|
||||
set(CMAKE_C_COMPILER_LAUNCHER ccache CACHE STRING "C compiler launcher" FORCE)
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER ccache CACHE STRING "C++ compiler launcher" FORCE)
|
||||
EOF
|
||||
TOOLCHAIN_FILE="wrapper_toolchain.cmake"
|
||||
echo "✅ Created wrapper toolchain with ccache enabled"
|
||||
else
|
||||
TOOLCHAIN_FILE="build/generators/conan_toolchain.cmake"
|
||||
echo "ℹ️ Using Conan toolchain directly (ccache disabled)"
|
||||
fi
|
||||
|
||||
# Configure C++ standard library if specified
|
||||
# libstdcxx used for clang-14/16 to work around missing lexicographical_compare_three_way in libc++
|
||||
# libcxx can be used with clang-17+ which has full C++20 support
|
||||
# Note: -stdlib flag is Clang-specific, GCC always uses libstdc++
|
||||
CMAKE_CXX_FLAGS=""
|
||||
if [[ "${{ inputs.cxx }}" == clang* ]]; then
|
||||
# Only Clang needs the -stdlib flag
|
||||
if [ "${{ inputs.stdlib }}" = "libstdcxx" ]; then
|
||||
CMAKE_CXX_FLAGS="-stdlib=libstdc++"
|
||||
elif [ "${{ inputs.stdlib }}" = "libcxx" ]; then
|
||||
CMAKE_CXX_FLAGS="-stdlib=libc++"
|
||||
fi
|
||||
fi
|
||||
# GCC always uses libstdc++ and doesn't need/support the -stdlib flag
|
||||
|
||||
# Configure GCC toolchain for Clang if specified
|
||||
if [ -n "${{ inputs.clang_gcc_toolchain }}" ] && [[ "${{ inputs.cxx }}" == clang* ]]; then
|
||||
# Extract Clang version from compiler executable name (e.g., clang++-14 -> 14)
|
||||
clang_version=$(echo "${{ inputs.cxx }}" | grep -oE '[0-9]+$')
|
||||
|
||||
# Clang 16+ supports --gcc-install-dir (precise path specification)
|
||||
# Clang <16 only has --gcc-toolchain (uses discovery heuristics)
|
||||
if [ -n "$clang_version" ] && [ "$clang_version" -ge "16" ]; then
|
||||
# Clang 16+ uses --gcc-install-dir (canonical, precise)
|
||||
CMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS --gcc-install-dir=/usr/lib/gcc/x86_64-linux-gnu/${{ inputs.clang_gcc_toolchain }}"
|
||||
else
|
||||
# Clang 14-15 uses --gcc-toolchain (deprecated but necessary)
|
||||
# Note: This still uses discovery, so we hide newer GCC versions in the workflow
|
||||
CMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS --gcc-toolchain=/usr"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run CMake configure
|
||||
# Note: conanfile.py hardcodes 'build/generators' as the output path.
|
||||
# If we're in a 'build' folder, Conan detects this and uses just 'generators/'
|
||||
# If we're in '.build' (non-standard), Conan adds the full 'build/generators/'
|
||||
# So we get: .build/build/generators/ with our non-standard folder name
|
||||
cmake .. \
|
||||
-G "${{ inputs.generator }}" \
|
||||
$CCACHE_ARGS \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
${CMAKE_CXX_FLAGS:+-DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS"} \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=${TOOLCHAIN_FILE} \
|
||||
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }}
|
||||
|
||||
- name: Show ccache config before build
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "ccache configuration before build"
|
||||
echo "=========================================="
|
||||
ccache -p
|
||||
echo ""
|
||||
|
||||
- name: Build project
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ inputs.build_dir }}
|
||||
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc)
|
||||
|
||||
# Check for verbose build flag in commit message
|
||||
VERBOSE_FLAG=""
|
||||
if echo "${XAHAU_GA_COMMIT_MSG}" | grep -q '\[ci-ga-cmake-verbose\]'; then
|
||||
echo "🔊 [ci-ga-cmake-verbose] detected - enabling verbose output"
|
||||
VERBOSE_FLAG="-- -v"
|
||||
fi
|
||||
|
||||
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc) ${VERBOSE_FLAG}
|
||||
|
||||
- name: Show ccache statistics
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
@@ -101,8 +224,10 @@ runs:
|
||||
run: ccache -s
|
||||
|
||||
- name: Save ccache directory
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
uses: actions/cache/save@v4
|
||||
if: success() && inputs.ccache_enabled == 'true'
|
||||
uses: ./.github/actions/xahau-ga-cache-save
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
291
.github/actions/xahau-ga-cache-restore/action.yml
vendored
Normal file
291
.github/actions/xahau-ga-cache-restore/action.yml
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
name: 'Xahau Cache Restore (S3)'
|
||||
bump: 1
|
||||
description: 'Drop-in replacement for actions/cache/restore using S3 storage'
|
||||
|
||||
inputs:
|
||||
path:
|
||||
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
|
||||
required: true
|
||||
key:
|
||||
description: 'An explicit key for restoring the cache'
|
||||
required: true
|
||||
restore-keys:
|
||||
description: 'An ordered list of prefix-matched keys to use for restoring stale cache if no cache hit occurred for key'
|
||||
required: false
|
||||
default: ''
|
||||
s3-bucket:
|
||||
description: 'S3 bucket name for cache storage'
|
||||
required: false
|
||||
default: 'xahaud-github-actions-cache-niq'
|
||||
s3-region:
|
||||
description: 'S3 region'
|
||||
required: false
|
||||
default: 'us-east-1'
|
||||
fail-on-cache-miss:
|
||||
description: 'Fail the workflow if cache entry is not found'
|
||||
required: false
|
||||
default: 'false'
|
||||
lookup-only:
|
||||
description: 'Check if a cache entry exists for the given input(s) without downloading it'
|
||||
required: false
|
||||
default: 'false'
|
||||
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 access'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 access'
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
cache-hit:
|
||||
description: 'A boolean value to indicate an exact match was found for the primary key'
|
||||
value: ${{ steps.restore-cache.outputs.cache-hit }}
|
||||
cache-primary-key:
|
||||
description: 'The key that was used to restore the cache (may be from restore-keys)'
|
||||
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
|
||||
cache-matched-key:
|
||||
description: 'The key that was used to restore the cache (exact or prefix match)'
|
||||
value: ${{ steps.restore-cache.outputs.cache-matched-key }}
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Restore cache from S3
|
||||
id: restore-cache
|
||||
shell: bash
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
S3_BUCKET: ${{ inputs.s3-bucket }}
|
||||
S3_REGION: ${{ inputs.s3-region }}
|
||||
CACHE_KEY: ${{ inputs.key }}
|
||||
RESTORE_KEYS: ${{ inputs.restore-keys }}
|
||||
TARGET_PATH: ${{ inputs.path }}
|
||||
FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }}
|
||||
LOOKUP_ONLY: ${{ inputs.lookup-only }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "=========================================="
|
||||
echo "Xahau Cache Restore (S3)"
|
||||
echo "=========================================="
|
||||
echo "Target path: ${TARGET_PATH}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "S3 bucket: s3://${S3_BUCKET}"
|
||||
echo ""
|
||||
|
||||
# Normalize target path (expand tilde)
|
||||
if [[ "${TARGET_PATH}" == ~* ]]; then
|
||||
TARGET_PATH="${HOME}${TARGET_PATH:1}"
|
||||
fi
|
||||
|
||||
# Canonicalize path (Linux only - macOS realpath doesn't support -m)
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
TARGET_PATH=$(realpath -m "${TARGET_PATH}")
|
||||
fi
|
||||
|
||||
echo "Normalized target path: ${TARGET_PATH}"
|
||||
echo ""
|
||||
|
||||
# Debug: Show commit message
|
||||
echo "=========================================="
|
||||
echo "DEBUG: Cache clear tag detection"
|
||||
echo "=========================================="
|
||||
echo "Raw commit message:"
|
||||
echo "${XAHAU_GA_COMMIT_MSG}"
|
||||
echo ""
|
||||
echo "Searching for: [ci-ga-clear-cache] or [ci-ga-clear-cache:*]"
|
||||
echo ""
|
||||
|
||||
# Check for [ci-ga-clear-cache] tag in commit message (with optional search terms)
|
||||
# Examples:
|
||||
# [ci-ga-clear-cache] - Clear this job's cache
|
||||
# [ci-ga-clear-cache:ccache] - Clear only if key contains "ccache"
|
||||
# [ci-ga-clear-cache:gcc Debug] - Clear only if key contains both "gcc" AND "Debug"
|
||||
|
||||
# Extract search terms if present (e.g., "ccache" from "[ci-ga-clear-cache:ccache]")
|
||||
SEARCH_TERMS=$(echo "${XAHAU_GA_COMMIT_MSG}" | grep -o '\[ci-ga-clear-cache:[^]]*\]' | sed 's/\[ci-ga-clear-cache://;s/\]//' || echo "")
|
||||
|
||||
SHOULD_CLEAR=false
|
||||
|
||||
if [ -n "${SEARCH_TERMS}" ]; then
|
||||
# Search terms provided - check if THIS cache key matches ALL terms (AND logic)
|
||||
echo "🔍 [ci-ga-clear-cache:${SEARCH_TERMS}] detected"
|
||||
echo "Checking if cache key matches search terms..."
|
||||
echo " Cache key: ${CACHE_KEY}"
|
||||
echo " Search terms: ${SEARCH_TERMS}"
|
||||
echo ""
|
||||
|
||||
MATCHES=true
|
||||
for term in ${SEARCH_TERMS}; do
|
||||
if ! echo "${CACHE_KEY}" | grep -q "${term}"; then
|
||||
MATCHES=false
|
||||
echo " ✗ Key does not contain '${term}'"
|
||||
break
|
||||
else
|
||||
echo " ✓ Key contains '${term}'"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "${MATCHES}" = "true" ]; then
|
||||
echo ""
|
||||
echo "✅ Cache key matches all search terms - will clear cache"
|
||||
SHOULD_CLEAR=true
|
||||
else
|
||||
echo ""
|
||||
echo "⏭️ Cache key doesn't match search terms - skipping cache clear"
|
||||
fi
|
||||
elif echo "${XAHAU_GA_COMMIT_MSG}" | grep -q '\[ci-ga-clear-cache\]'; then
|
||||
# No search terms - always clear this job's cache
|
||||
echo "🗑️ [ci-ga-clear-cache] detected in commit message"
|
||||
echo "Clearing cache for key: ${CACHE_KEY}"
|
||||
SHOULD_CLEAR=true
|
||||
fi
|
||||
|
||||
if [ "${SHOULD_CLEAR}" = "true" ]; then
|
||||
echo ""
|
||||
|
||||
# Delete base layer
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "Deleting base layer: ${S3_BASE_KEY}"
|
||||
aws s3 rm "${S3_BASE_KEY}" --region "${S3_REGION}" 2>/dev/null || true
|
||||
echo "✓ Base layer deleted"
|
||||
else
|
||||
echo "ℹ️ No base layer found to delete"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Cache cleared successfully"
|
||||
echo "Build will proceed from scratch (bootstrap mode)"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Function to try restoring a cache key
|
||||
try_restore_key() {
|
||||
local key=$1
|
||||
local s3_key="s3://${S3_BUCKET}/${key}-base.tar.zst"
|
||||
|
||||
echo "Checking for key: ${key}"
|
||||
|
||||
if aws s3 ls "${s3_key}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "✓ Found cache: ${s3_key}"
|
||||
return 0
|
||||
else
|
||||
echo "✗ Not found: ${key}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Try exact match first
|
||||
MATCHED_KEY=""
|
||||
EXACT_MATCH="false"
|
||||
|
||||
if try_restore_key "${CACHE_KEY}"; then
|
||||
MATCHED_KEY="${CACHE_KEY}"
|
||||
EXACT_MATCH="true"
|
||||
echo ""
|
||||
echo "🎯 Exact cache hit for key: ${CACHE_KEY}"
|
||||
else
|
||||
# Try restore-keys (prefix matching)
|
||||
if [ -n "${RESTORE_KEYS}" ]; then
|
||||
echo ""
|
||||
echo "Primary key not found, trying restore-keys..."
|
||||
|
||||
while IFS= read -r restore_key; do
|
||||
[ -z "${restore_key}" ] && continue
|
||||
restore_key=$(echo "${restore_key}" | xargs)
|
||||
|
||||
if try_restore_key "${restore_key}"; then
|
||||
MATCHED_KEY="${restore_key}"
|
||||
EXACT_MATCH="false"
|
||||
echo ""
|
||||
echo "✓ Cache restored from fallback key: ${restore_key}"
|
||||
break
|
||||
fi
|
||||
done <<< "${RESTORE_KEYS}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if we found anything
|
||||
if [ -z "${MATCHED_KEY}" ]; then
|
||||
echo ""
|
||||
echo "❌ No cache found for key: ${CACHE_KEY}"
|
||||
|
||||
if [ "${FAIL_ON_MISS}" = "true" ]; then
|
||||
echo "fail-on-cache-miss is enabled, failing workflow"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set outputs for cache miss
|
||||
echo "cache-hit=false" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=" >> $GITHUB_OUTPUT
|
||||
|
||||
# Create empty cache directory
|
||||
mkdir -p "${TARGET_PATH}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache restore completed (bootstrap mode)"
|
||||
echo "Created empty cache directory: ${TARGET_PATH}"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If lookup-only, we're done
|
||||
if [ "${LOOKUP_ONLY}" = "true" ]; then
|
||||
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache lookup completed (lookup-only mode)"
|
||||
echo "Cache exists: ${MATCHED_KEY}"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Download and extract cache
|
||||
S3_KEY="s3://${S3_BUCKET}/${MATCHED_KEY}-base.tar.zst"
|
||||
TEMP_TARBALL="/tmp/xahau-cache-restore-$$.tar.zst"
|
||||
|
||||
echo ""
|
||||
echo "Downloading cache..."
|
||||
aws s3 cp "${S3_KEY}" "${TEMP_TARBALL}" --region "${S3_REGION}" --no-progress
|
||||
|
||||
TARBALL_SIZE=$(du -h "${TEMP_TARBALL}" | cut -f1)
|
||||
echo "✓ Downloaded: ${TARBALL_SIZE}"
|
||||
|
||||
# Create parent directory if needed
|
||||
mkdir -p "$(dirname "${TARGET_PATH}")"
|
||||
|
||||
# Remove existing target if it exists
|
||||
if [ -e "${TARGET_PATH}" ]; then
|
||||
echo "Removing existing target: ${TARGET_PATH}"
|
||||
rm -rf "${TARGET_PATH}"
|
||||
fi
|
||||
|
||||
# Create target directory and extract
|
||||
mkdir -p "${TARGET_PATH}"
|
||||
echo ""
|
||||
echo "Extracting cache..."
|
||||
zstd -d -c "${TEMP_TARBALL}" | tar -xf - -C "${TARGET_PATH}"
|
||||
echo "✓ Cache extracted to: ${TARGET_PATH}"
|
||||
|
||||
# Cleanup
|
||||
rm -f "${TEMP_TARBALL}"
|
||||
|
||||
# Set outputs
|
||||
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache restore completed successfully"
|
||||
echo "Cache hit: ${EXACT_MATCH}"
|
||||
echo "Matched key: ${MATCHED_KEY}"
|
||||
echo "=========================================="
|
||||
110
.github/actions/xahau-ga-cache-save/action.yml
vendored
Normal file
110
.github/actions/xahau-ga-cache-save/action.yml
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
name: 'Xahau Cache Save (S3)'
|
||||
description: 'Drop-in replacement for actions/cache/save using S3 storage'
|
||||
|
||||
inputs:
|
||||
path:
|
||||
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
|
||||
required: true
|
||||
key:
|
||||
description: 'An explicit key for saving the cache'
|
||||
required: true
|
||||
s3-bucket:
|
||||
description: 'S3 bucket name for cache storage'
|
||||
required: false
|
||||
default: 'xahaud-github-actions-cache-niq'
|
||||
s3-region:
|
||||
description: 'S3 region'
|
||||
required: false
|
||||
default: 'us-east-1'
|
||||
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 access'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 access'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Save cache to S3
|
||||
shell: bash
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
S3_BUCKET: ${{ inputs.s3-bucket }}
|
||||
S3_REGION: ${{ inputs.s3-region }}
|
||||
CACHE_KEY: ${{ inputs.key }}
|
||||
TARGET_PATH: ${{ inputs.path }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "=========================================="
|
||||
echo "Xahau Cache Save (S3)"
|
||||
echo "=========================================="
|
||||
echo "Target path: ${TARGET_PATH}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "S3 bucket: s3://${S3_BUCKET}"
|
||||
echo ""
|
||||
|
||||
# Normalize target path (expand tilde and resolve to absolute path)
|
||||
if [[ "${TARGET_PATH}" == ~* ]]; then
|
||||
TARGET_PATH="${HOME}${TARGET_PATH:1}"
|
||||
fi
|
||||
echo "Normalized target path: ${TARGET_PATH}"
|
||||
echo ""
|
||||
|
||||
# Check if target directory exists
|
||||
if [ ! -d "${TARGET_PATH}" ]; then
|
||||
echo "⚠️ Target directory does not exist: ${TARGET_PATH}"
|
||||
echo "Skipping cache save."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Use static base name (one base per key, immutable)
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
|
||||
|
||||
# Check if base already exists (immutability - first write wins)
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "⚠️ Cache already exists: ${S3_BASE_KEY}"
|
||||
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed (already exists)"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create tarball
|
||||
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
|
||||
|
||||
echo "Creating cache tarball..."
|
||||
tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
|
||||
|
||||
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
|
||||
echo "✓ Cache tarball created: ${BASE_SIZE}"
|
||||
echo ""
|
||||
|
||||
# Upload to S3
|
||||
echo "Uploading cache to S3..."
|
||||
echo " Key: ${CACHE_KEY}-base.tar.zst"
|
||||
|
||||
aws s3api put-object \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--key "${CACHE_KEY}-base.tar.zst" \
|
||||
--body "${BASE_TARBALL}" \
|
||||
--tagging 'type=base' \
|
||||
--region "${S3_REGION}" \
|
||||
>/dev/null 2>&1
|
||||
|
||||
echo "✓ Uploaded: ${S3_BASE_KEY}"
|
||||
|
||||
# Cleanup
|
||||
rm -f "${BASE_TARBALL}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed successfully"
|
||||
echo "Cache size: ${BASE_SIZE}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "=========================================="
|
||||
125
.github/actions/xahau-ga-dependencies/action.yml
vendored
125
.github/actions/xahau-ga-dependencies/action.yml
vendored
@@ -10,7 +10,7 @@ inputs:
|
||||
required: false
|
||||
default: '.build'
|
||||
compiler-id:
|
||||
description: 'Unique identifier for compiler/version combination used for cache keys'
|
||||
description: 'Unique identifier: compiler-version-stdlib[-gccversion] (e.g. clang-14-libstdcxx-gcc11, gcc-13-libstdcxx)'
|
||||
required: false
|
||||
default: ''
|
||||
cache_version:
|
||||
@@ -25,6 +25,41 @@ inputs:
|
||||
description: 'Main branch name for restore keys'
|
||||
required: false
|
||||
default: 'dev'
|
||||
os:
|
||||
description: 'Operating system (Linux, Macos)'
|
||||
required: false
|
||||
default: 'Linux'
|
||||
arch:
|
||||
description: 'Architecture (x86_64, armv8)'
|
||||
required: false
|
||||
default: 'x86_64'
|
||||
compiler:
|
||||
description: 'Compiler type (gcc, clang, apple-clang)'
|
||||
required: true
|
||||
compiler_version:
|
||||
description: 'Compiler version (11, 13, 14, etc.)'
|
||||
required: true
|
||||
cc:
|
||||
description: 'C compiler executable (gcc-13, clang-14, etc.), empty for macOS'
|
||||
required: false
|
||||
default: ''
|
||||
cxx:
|
||||
description: 'C++ compiler executable (g++-14, clang++-14, etc.), empty for macOS'
|
||||
required: false
|
||||
default: ''
|
||||
stdlib:
|
||||
description: 'C++ standard library for Conan configuration (note: also in compiler-id)'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- libstdcxx
|
||||
- libcxx
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 cache storage'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 cache storage'
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
cache-hit:
|
||||
@@ -34,36 +69,84 @@ outputs:
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Generate safe branch name
|
||||
if: inputs.cache_enabled == 'true'
|
||||
id: safe-branch
|
||||
shell: bash
|
||||
run: |
|
||||
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
|
||||
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore Conan cache
|
||||
if: inputs.cache_enabled == 'true'
|
||||
id: cache-restore-conan
|
||||
uses: actions/cache/restore@v4
|
||||
uses: ./.github/actions/xahau-ga-cache-restore
|
||||
with:
|
||||
path: |
|
||||
~/.conan
|
||||
~/.conan2
|
||||
path: ~/.conan2
|
||||
# Note: compiler-id format is compiler-version-stdlib[-gccversion]
|
||||
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
- name: Configure Conan
|
||||
shell: bash
|
||||
run: |
|
||||
# Create the default profile directory if it doesn't exist
|
||||
mkdir -p ~/.conan2/profiles
|
||||
|
||||
# Determine the correct libcxx based on stdlib parameter
|
||||
if [ "${{ inputs.stdlib }}" = "libcxx" ]; then
|
||||
LIBCXX="libc++"
|
||||
else
|
||||
LIBCXX="libstdc++11"
|
||||
fi
|
||||
|
||||
# Create profile with our specific settings
|
||||
# This overwrites any cached profile to ensure fresh configuration
|
||||
cat > ~/.conan2/profiles/default <<EOF
|
||||
[settings]
|
||||
arch=${{ inputs.arch }}
|
||||
build_type=${{ inputs.configuration }}
|
||||
compiler=${{ inputs.compiler }}
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=${LIBCXX}
|
||||
compiler.version=${{ inputs.compiler_version }}
|
||||
os=${{ inputs.os }}
|
||||
EOF
|
||||
|
||||
# Add buildenv and conf sections for Linux (not needed for macOS)
|
||||
if [ "${{ inputs.os }}" = "Linux" ] && [ -n "${{ inputs.cc }}" ]; then
|
||||
cat >> ~/.conan2/profiles/default <<EOF
|
||||
|
||||
[buildenv]
|
||||
CC=/usr/bin/${{ inputs.cc }}
|
||||
CXX=/usr/bin/${{ inputs.cxx }}
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={"c": "/usr/bin/${{ inputs.cc }}", "cpp": "/usr/bin/${{ inputs.cxx }}"}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Add macOS-specific conf if needed
|
||||
if [ "${{ inputs.os }}" = "Macos" ]; then
|
||||
cat >> ~/.conan2/profiles/default <<EOF
|
||||
|
||||
[conf]
|
||||
# Workaround for gRPC with newer Apple Clang
|
||||
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Display profile for verification
|
||||
conan profile show
|
||||
|
||||
- name: Export custom recipes
|
||||
shell: bash
|
||||
run: |
|
||||
conan export external/snappy snappy/1.1.9@
|
||||
conan export external/soci soci/4.0.3@
|
||||
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
|
||||
conan export external/soci --version 4.0.3 --user xahaud --channel stable
|
||||
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_REQUEST_TIMEOUT: 180 # Increase timeout to 3 minutes for slow mirrors
|
||||
run: |
|
||||
# Create build directory
|
||||
mkdir -p ${{ inputs.build_dir }}
|
||||
@@ -77,10 +160,10 @@ runs:
|
||||
..
|
||||
|
||||
- name: Save Conan cache
|
||||
if: inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
if: success() && inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
|
||||
uses: ./.github/actions/xahau-ga-cache-save
|
||||
with:
|
||||
path: |
|
||||
~/.conan
|
||||
~/.conan2
|
||||
key: ${{ steps.cache-restore-conan.outputs.cache-primary-key }}
|
||||
path: ~/.conan2
|
||||
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
73
.github/actions/xahau-ga-get-commit-message/action.yml
vendored
Normal file
73
.github/actions/xahau-ga-get-commit-message/action.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
name: 'Get Commit Message'
|
||||
description: 'Gets commit message for both push and pull_request events and sets XAHAU_GA_COMMIT_MSG env var'
|
||||
|
||||
inputs:
|
||||
event-name:
|
||||
description: 'The event name (push or pull_request)'
|
||||
required: true
|
||||
head-commit-message:
|
||||
description: 'The head commit message (for push events)'
|
||||
required: false
|
||||
default: ''
|
||||
pr-head-sha:
|
||||
description: 'The PR head SHA (for pull_request events)'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Get commit message and set environment variable
|
||||
shell: python
|
||||
run: |
|
||||
import os
|
||||
import subprocess
|
||||
import secrets
|
||||
|
||||
event_name = "${{ inputs.event-name }}"
|
||||
pr_head_sha = "${{ inputs.pr-head-sha }}"
|
||||
|
||||
print("==========================================")
|
||||
print("Setting XAHAU_GA_COMMIT_MSG environment variable")
|
||||
print("==========================================")
|
||||
print(f"Event: {event_name}")
|
||||
|
||||
if event_name == 'push':
|
||||
# For push events, use the input directly
|
||||
message = """${{ inputs.head-commit-message }}"""
|
||||
print("Source: workflow input (github.event.head_commit.message)")
|
||||
elif event_name == 'pull_request':
|
||||
# For PR events, fetch the specific SHA
|
||||
print(f"Source: git show {pr_head_sha} (fetching PR head commit)")
|
||||
|
||||
# Fetch the PR head commit
|
||||
subprocess.run(
|
||||
['git', 'fetch', 'origin', pr_head_sha],
|
||||
check=True
|
||||
)
|
||||
|
||||
# Get commit message from the fetched SHA
|
||||
result = subprocess.run(
|
||||
['git', 'show', '-s', '--format=%B', pr_head_sha],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
message = result.stdout.strip()
|
||||
else:
|
||||
message = ""
|
||||
print(f"Warning: Unknown event type: {event_name}")
|
||||
|
||||
print(f"Commit message (first 100 chars): {message[:100]}")
|
||||
|
||||
# Write to GITHUB_ENV using heredoc with random delimiter (prevents injection attacks)
|
||||
# See: https://securitylab.github.com/resources/github-actions-untrusted-input/
|
||||
delimiter = f"EOF_{secrets.token_hex(16)}"
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as f:
|
||||
f.write(f'XAHAU_GA_COMMIT_MSG<<{delimiter}\n')
|
||||
f.write(message)
|
||||
f.write(f'\n{delimiter}\n')
|
||||
|
||||
print(f"✓ XAHAU_GA_COMMIT_MSG set (available to all subsequent steps)")
|
||||
print("==========================================")
|
||||
12
.github/workflows/build-in-docker.yml
vendored
12
.github/workflows/build-in-docker.yml
vendored
@@ -32,19 +32,9 @@ jobs:
|
||||
clean: true
|
||||
fetch-depth: 2 # Only get the last 2 commits, to avoid fetching all history
|
||||
|
||||
checkpatterns:
|
||||
runs-on: [self-hosted, vanity]
|
||||
needs: checkout
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ needs.checkout.outputs.checkout_path }}
|
||||
steps:
|
||||
- name: Check for suspicious patterns
|
||||
run: /bin/bash suspicious_patterns.sh
|
||||
|
||||
build:
|
||||
runs-on: [self-hosted, vanity]
|
||||
needs: [checkpatterns, checkout]
|
||||
needs: [checkout]
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ needs.checkout.outputs.checkout_path }}
|
||||
|
||||
36
.github/workflows/verify-generated-headers.yml
vendored
Normal file
36
.github/workflows/verify-generated-headers.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Verify Generated Hook Headers
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
verify-generated-headers:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: hook/error.h
|
||||
generator: ./hook/generate_error.sh
|
||||
- target: hook/extern.h
|
||||
generator: ./hook/generate_extern.sh
|
||||
- target: hook/sfcodes.h
|
||||
generator: bash ./hook/generate_sfcodes.sh
|
||||
- target: hook/tts.h
|
||||
generator: ./hook/generate_tts.sh
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.target }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Verify ${{ matrix.target }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
chmod +x hook/generate_*.sh || true
|
||||
|
||||
tmp=$(mktemp)
|
||||
trap 'rm -f "$tmp"' EXIT
|
||||
|
||||
${{ matrix.generator }} > "$tmp"
|
||||
diff -u ${{ matrix.target }} "$tmp"
|
||||
63
.github/workflows/xahau-ga-macos.yml
vendored
63
.github/workflows/xahau-ga-macos.yml
vendored
@@ -5,6 +5,8 @@ on:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -28,11 +30,19 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get commit message
|
||||
id: get-commit-message
|
||||
uses: ./.github/actions/xahau-ga-get-commit-message
|
||||
with:
|
||||
event-name: ${{ github.event_name }}
|
||||
head-commit-message: ${{ github.event.head_commit.message }}
|
||||
pr-head-sha: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Install Conan
|
||||
run: |
|
||||
brew install conan@1
|
||||
# Add Conan 1 to the PATH for this job
|
||||
echo "$(brew --prefix conan@1)/bin" >> $GITHUB_PATH
|
||||
brew install conan
|
||||
# Verify Conan 2 is installed
|
||||
conan --version
|
||||
|
||||
- name: Install Coreutils
|
||||
run: |
|
||||
@@ -58,23 +68,24 @@ jobs:
|
||||
|
||||
- name: Install CMake
|
||||
run: |
|
||||
if which cmake > /dev/null 2>&1; then
|
||||
echo "cmake executable exists"
|
||||
cmake --version
|
||||
else
|
||||
brew install cmake
|
||||
fi
|
||||
# Install CMake 3.x to match local dev environments
|
||||
# With Conan 2 and the policy args passed to CMake, newer versions
|
||||
# can have issues with dependencies that require cmake_minimum_required < 3.5
|
||||
brew uninstall cmake --ignore-dependencies 2>/dev/null || true
|
||||
|
||||
# Download and install CMake 3.31.7 directly
|
||||
curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz
|
||||
tar -xzf cmake.tar.gz
|
||||
|
||||
# Move the entire CMake.app to /Applications
|
||||
sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/
|
||||
|
||||
echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH
|
||||
/Applications/CMake.app/Contents/bin/cmake --version
|
||||
|
||||
- name: Install ccache
|
||||
run: brew install ccache
|
||||
|
||||
- name: Configure ccache
|
||||
uses: ./.github/actions/xahau-configure-ccache
|
||||
with:
|
||||
max_size: 2G
|
||||
hash_dir: true
|
||||
compiler_check: content
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
echo "PATH:"
|
||||
@@ -87,10 +98,12 @@ jobs:
|
||||
echo "---- Full Environment ----"
|
||||
env
|
||||
|
||||
- name: Configure Conan
|
||||
- name: Detect compiler version
|
||||
id: detect-compiler
|
||||
run: |
|
||||
conan profile new default --detect || true # Ignore error if profile exists
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
COMPILER_VERSION=$(clang --version | grep -oE 'version [0-9]+' | grep -oE '[0-9]+')
|
||||
echo "compiler_version=${COMPILER_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "Detected Apple Clang version: ${COMPILER_VERSION}"
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/xahau-ga-dependencies
|
||||
@@ -100,6 +113,13 @@ jobs:
|
||||
compiler-id: clang
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
os: Macos
|
||||
arch: armv8
|
||||
compiler: apple-clang
|
||||
compiler_version: ${{ steps.detect-compiler.outputs.compiler_version }}
|
||||
stdlib: libcxx
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Build
|
||||
uses: ./.github/actions/xahau-ga-build
|
||||
@@ -110,7 +130,10 @@ jobs:
|
||||
compiler-id: clang
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
stdlib: libcxx
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc)
|
||||
${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc)
|
||||
|
||||
257
.github/workflows/xahau-ga-nix.yml
vendored
257
.github/workflows/xahau-ga-nix.yml
vendored
@@ -2,70 +2,242 @@ name: Nix - GA Runner
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
branches: ["dev", "candidate", "release", "nd-experiment-overlayfs-2025-10-29"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-job:
|
||||
matrix-setup:
|
||||
runs-on: ubuntu-latest
|
||||
container: python:3-slim
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Generate build matrix
|
||||
id: set-matrix
|
||||
shell: python
|
||||
run: |
|
||||
import json
|
||||
import os
|
||||
|
||||
# Full matrix with all 6 compiler configurations
|
||||
# Each configuration includes all parameters needed by the build job
|
||||
full_matrix = [
|
||||
{
|
||||
"compiler_id": "gcc-11-libstdcxx",
|
||||
"compiler": "gcc",
|
||||
"cc": "gcc-11",
|
||||
"cxx": "g++-11",
|
||||
"compiler_version": 11,
|
||||
"stdlib": "libstdcxx",
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "gcc-13-libstdcxx",
|
||||
"compiler": "gcc",
|
||||
"cc": "gcc-13",
|
||||
"cxx": "g++-13",
|
||||
"compiler_version": 13,
|
||||
"stdlib": "libstdcxx",
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "clang-14-libstdcxx-gcc11",
|
||||
"compiler": "clang",
|
||||
"cc": "clang-14",
|
||||
"cxx": "clang++-14",
|
||||
"compiler_version": 14,
|
||||
"stdlib": "libstdcxx",
|
||||
"clang_gcc_toolchain": 11,
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "clang-16-libstdcxx-gcc13",
|
||||
"compiler": "clang",
|
||||
"cc": "clang-16",
|
||||
"cxx": "clang++-16",
|
||||
"compiler_version": 16,
|
||||
"stdlib": "libstdcxx",
|
||||
"clang_gcc_toolchain": 13,
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
"compiler_id": "clang-17-libcxx",
|
||||
"compiler": "clang",
|
||||
"cc": "clang-17",
|
||||
"cxx": "clang++-17",
|
||||
"compiler_version": 17,
|
||||
"stdlib": "libcxx",
|
||||
"configuration": "Debug"
|
||||
},
|
||||
{
|
||||
# Clang 18 - testing if it's faster than Clang 17 with libc++
|
||||
# Requires patching Conan v1 settings.yml to add version 18
|
||||
"compiler_id": "clang-18-libcxx",
|
||||
"compiler": "clang",
|
||||
"cc": "clang-18",
|
||||
"cxx": "clang++-18",
|
||||
"compiler_version": 18,
|
||||
"stdlib": "libcxx",
|
||||
"configuration": "Debug"
|
||||
}
|
||||
]
|
||||
|
||||
# Minimal matrix for PRs and feature branches
|
||||
minimal_matrix = [
|
||||
full_matrix[1], # gcc-13 (middle-ground gcc)
|
||||
full_matrix[2] # clang-14 (mature, stable clang)
|
||||
]
|
||||
|
||||
# Determine which matrix to use based on the target branch
|
||||
ref = "${{ github.ref }}"
|
||||
base_ref = "${{ github.base_ref }}" # For PRs, this is the target branch
|
||||
event_name = "${{ github.event_name }}"
|
||||
commit_message = """${{ github.event.head_commit.message }}"""
|
||||
pr_title = """${{ github.event.pull_request.title }}"""
|
||||
|
||||
# Debug logging
|
||||
print(f"Event: {event_name}")
|
||||
print(f"Ref: {ref}")
|
||||
print(f"Base ref: {base_ref}")
|
||||
print(f"PR title: {pr_title}")
|
||||
print(f"Commit message: {commit_message}")
|
||||
|
||||
# Check for override tags in commit message or PR title
|
||||
force_full = "[ci-nix-full-matrix]" in commit_message or "[ci-nix-full-matrix]" in pr_title
|
||||
print(f"Force full matrix: {force_full}")
|
||||
|
||||
# Check if this is targeting a main branch
|
||||
# For PRs: check base_ref (target branch)
|
||||
# For pushes: check ref (current branch)
|
||||
main_branches = ["refs/heads/dev", "refs/heads/release", "refs/heads/candidate"]
|
||||
|
||||
if force_full:
|
||||
# Override: always use full matrix if tag is present
|
||||
use_full = True
|
||||
elif event_name == "pull_request":
|
||||
# For PRs, base_ref is just the branch name (e.g., "dev", not "refs/heads/dev")
|
||||
# Check if the PR targets release or candidate (more critical branches)
|
||||
use_full = base_ref in ["release", "candidate"]
|
||||
else:
|
||||
# For pushes, ref is the full reference (e.g., "refs/heads/dev")
|
||||
use_full = ref in main_branches
|
||||
|
||||
# Select the appropriate matrix
|
||||
if use_full:
|
||||
if force_full:
|
||||
print(f"Using FULL matrix (6 configs) - forced by [ci-nix-full-matrix] tag")
|
||||
else:
|
||||
print(f"Using FULL matrix (6 configs) - targeting main branch")
|
||||
matrix = full_matrix
|
||||
else:
|
||||
print(f"Using MINIMAL matrix (2 configs) - feature branch/PR")
|
||||
matrix = minimal_matrix
|
||||
|
||||
# Output the matrix as JSON
|
||||
output = json.dumps({"include": matrix})
|
||||
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
|
||||
f.write(f"matrix={output}\n")
|
||||
|
||||
build:
|
||||
needs: matrix-setup
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
artifact_name: ${{ steps.set-artifact-name.outputs.artifact_name }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
compiler: [gcc]
|
||||
configuration: [Debug]
|
||||
include:
|
||||
- compiler: gcc
|
||||
cc: gcc-11
|
||||
cxx: g++-11
|
||||
compiler_id: gcc-11
|
||||
matrix: ${{ fromJSON(needs.matrix-setup.outputs.matrix) }}
|
||||
env:
|
||||
build_dir: .build
|
||||
# Bump this number to invalidate all caches globally.
|
||||
CACHE_VERSION: 1
|
||||
CACHE_VERSION: 3
|
||||
MAIN_BRANCH_NAME: dev
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get commit message
|
||||
id: get-commit-message
|
||||
uses: ./.github/actions/xahau-ga-get-commit-message
|
||||
with:
|
||||
event-name: ${{ github.event_name }}
|
||||
head-commit-message: ${{ github.event.head_commit.message }}
|
||||
pr-head-sha: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ninja-build ${{ matrix.cc }} ${{ matrix.cxx }} ccache
|
||||
# Install specific Conan version needed
|
||||
pip install --upgrade "conan<2"
|
||||
|
||||
- name: Configure ccache
|
||||
uses: ./.github/actions/xahau-configure-ccache
|
||||
with:
|
||||
max_size: 2G
|
||||
hash_dir: true
|
||||
compiler_check: content
|
||||
# Install the specific GCC version needed for Clang
|
||||
if [ -n "${{ matrix.clang_gcc_toolchain }}" ]; then
|
||||
echo "=== Installing GCC ${{ matrix.clang_gcc_toolchain }} for Clang ==="
|
||||
sudo apt-get install -y gcc-${{ matrix.clang_gcc_toolchain }} g++-${{ matrix.clang_gcc_toolchain }} libstdc++-${{ matrix.clang_gcc_toolchain }}-dev
|
||||
|
||||
- name: Configure Conan
|
||||
run: |
|
||||
conan profile new default --detect || true # Ignore error if profile exists
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler=${{ matrix.compiler }} default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
conan profile update env.CC=/usr/bin/${{ matrix.cc }} default
|
||||
conan profile update env.CXX=/usr/bin/${{ matrix.cxx }} default
|
||||
conan profile update conf.tools.build:compiler_executables='{"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}' default
|
||||
|
||||
# Set correct compiler version based on matrix.compiler
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
conan profile update settings.compiler.version=11 default
|
||||
elif [ "${{ matrix.compiler }}" = "clang" ]; then
|
||||
conan profile update settings.compiler.version=14 default
|
||||
echo "=== GCC versions available after installation ==="
|
||||
ls -la /usr/lib/gcc/x86_64-linux-gnu/ | grep -E "^d"
|
||||
fi
|
||||
# Display profile for verification
|
||||
conan profile show default
|
||||
|
||||
# For Clang < 16 with --gcc-toolchain, hide newer GCC versions
|
||||
# This is needed because --gcc-toolchain still picks the highest version
|
||||
#
|
||||
# THE GREAT GCC HIDING TRICK (for Clang < 16):
|
||||
# Clang versions before 16 don't have --gcc-install-dir, only --gcc-toolchain
|
||||
# which is deprecated and still uses discovery heuristics that ALWAYS pick
|
||||
# the highest version number. So we play a sneaky game...
|
||||
#
|
||||
# We rename newer GCC versions to very low integers (1, 2, 3...) which makes
|
||||
# Clang think they're ancient GCC versions. Since 11 > 3 > 2 > 1, Clang will
|
||||
# pick GCC 11 over our renamed versions. It's dumb but it works!
|
||||
#
|
||||
# Example: GCC 12→1, GCC 13→2, GCC 14→3, so Clang picks 11 (highest number)
|
||||
if [ -n "${{ matrix.clang_gcc_toolchain }}" ] && [ "${{ matrix.compiler_version }}" -lt "16" ]; then
|
||||
echo "=== Hiding GCC versions newer than ${{ matrix.clang_gcc_toolchain }} for Clang < 16 ==="
|
||||
target_version=${{ matrix.clang_gcc_toolchain }}
|
||||
counter=1 # Start with 1 - these will be seen as "GCC version 1, 2, 3" etc
|
||||
for dir in /usr/lib/gcc/x86_64-linux-gnu/*/; do
|
||||
if [ -d "$dir" ]; then
|
||||
version=$(basename "$dir")
|
||||
# Check if version is numeric and greater than target
|
||||
if [[ "$version" =~ ^[0-9]+$ ]] && [ "$version" -gt "$target_version" ]; then
|
||||
echo "Hiding GCC $version -> renaming to $counter (will be seen as GCC version $counter)"
|
||||
# Safety check: ensure target doesn't already exist
|
||||
if [ ! -e "/usr/lib/gcc/x86_64-linux-gnu/$counter" ]; then
|
||||
sudo mv "$dir" "/usr/lib/gcc/x86_64-linux-gnu/$counter"
|
||||
else
|
||||
echo "ERROR: Cannot rename GCC $version - /usr/lib/gcc/x86_64-linux-gnu/$counter already exists"
|
||||
exit 1
|
||||
fi
|
||||
counter=$((counter + 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Verify what Clang will use
|
||||
if [ -n "${{ matrix.clang_gcc_toolchain }}" ]; then
|
||||
echo "=== Verifying GCC toolchain selection ==="
|
||||
echo "Available GCC versions:"
|
||||
ls -la /usr/lib/gcc/x86_64-linux-gnu/ | grep -E "^d.*[0-9]+$" || true
|
||||
|
||||
echo ""
|
||||
echo "Clang's detected GCC installation:"
|
||||
${{ matrix.cxx }} -v -E -x c++ /dev/null -o /dev/null 2>&1 | grep "Found candidate GCC installation" || true
|
||||
fi
|
||||
|
||||
# Install libc++ dev packages if using libc++ (not needed for libstdc++)
|
||||
if [ "${{ matrix.stdlib }}" = "libcxx" ]; then
|
||||
sudo apt-get install -y libc++-${{ matrix.compiler_version }}-dev libc++abi-${{ matrix.compiler_version }}-dev
|
||||
fi
|
||||
|
||||
# Install Conan 2
|
||||
pip install --upgrade "conan>=2.0,<3"
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
@@ -87,6 +259,13 @@ jobs:
|
||||
compiler-id: ${{ matrix.compiler_id }}
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
compiler: ${{ matrix.compiler }}
|
||||
compiler_version: ${{ matrix.compiler_version }}
|
||||
cc: ${{ matrix.cc }}
|
||||
cxx: ${{ matrix.cxx }}
|
||||
stdlib: ${{ matrix.stdlib }}
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Build
|
||||
uses: ./.github/actions/xahau-ga-build
|
||||
@@ -99,6 +278,10 @@ jobs:
|
||||
compiler-id: ${{ matrix.compiler_id }}
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
stdlib: ${{ matrix.stdlib }}
|
||||
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Set artifact name
|
||||
id: set-artifact-name
|
||||
@@ -120,4 +303,4 @@ jobs:
|
||||
else
|
||||
echo "Error: rippled executable not found in ${{ env.build_dir }}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -24,6 +24,11 @@ bin/project-cache.jam
|
||||
|
||||
build/docker
|
||||
|
||||
# Ignore release builder files
|
||||
.env
|
||||
release-build
|
||||
cmake-*.tar.gz
|
||||
|
||||
# Ignore object files.
|
||||
*.o
|
||||
build
|
||||
|
||||
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -8,6 +8,6 @@
|
||||
"editor.semanticHighlighting.enabled": true,
|
||||
"editor.tabSize": 4,
|
||||
"editor.defaultFormatter": "xaver.clang-format",
|
||||
"editor.formatOnSave": false
|
||||
"editor.formatOnSave": true
|
||||
}
|
||||
}
|
||||
|
||||
72
BUILD.md
72
BUILD.md
@@ -33,7 +33,7 @@ git checkout develop
|
||||
## Minimum Requirements
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [Conan 2.x](https://conan.io/downloads)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
|
||||
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||
@@ -65,13 +65,24 @@ can't build earlier Boost versions.
|
||||
1. (Optional) If you've never used Conan, use autodetect to set up a default profile.
|
||||
|
||||
```
|
||||
conan profile new default --detect
|
||||
conan profile detect --force
|
||||
```
|
||||
|
||||
2. Update the compiler settings.
|
||||
|
||||
For Conan 2, you can edit the profile directly at `~/.conan2/profiles/default`,
|
||||
or use the Conan CLI. Ensure C++20 is set:
|
||||
|
||||
```
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile show
|
||||
```
|
||||
|
||||
Look for `compiler.cppstd=20` in the output. If it's not set, edit the profile:
|
||||
|
||||
```
|
||||
# Edit ~/.conan2/profiles/default and ensure these settings exist:
|
||||
[settings]
|
||||
compiler.cppstd=20
|
||||
```
|
||||
|
||||
Linux developers will commonly have a default Conan [profile][] that compiles
|
||||
@@ -80,7 +91,9 @@ can't build earlier Boost versions.
|
||||
then you will need to choose the `libstdc++11` ABI.
|
||||
|
||||
```
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
# In ~/.conan2/profiles/default, ensure:
|
||||
[settings]
|
||||
compiler.libcxx=libstdc++11
|
||||
```
|
||||
|
||||
On Windows, you should use the x64 native build tools.
|
||||
@@ -91,7 +104,9 @@ can't build earlier Boost versions.
|
||||
architecture.
|
||||
|
||||
```
|
||||
conan profile update settings.arch=x86_64 default
|
||||
# In ~/.conan2/profiles/default, ensure:
|
||||
[settings]
|
||||
arch=x86_64
|
||||
```
|
||||
|
||||
3. (Optional) If you have multiple compilers installed on your platform,
|
||||
@@ -100,16 +115,18 @@ can't build earlier Boost versions.
|
||||
in the generated CMake toolchain file.
|
||||
|
||||
```
|
||||
conan profile update 'conf.tools.build:compiler_executables={"c": "<path>", "cpp": "<path>"}' default
|
||||
# In ~/.conan2/profiles/default, add under [conf] section:
|
||||
[conf]
|
||||
tools.build:compiler_executables={"c": "<path>", "cpp": "<path>"}
|
||||
```
|
||||
|
||||
It should choose the compiler for dependencies as well,
|
||||
but not all of them have a Conan recipe that respects this setting (yet).
|
||||
For the rest, you can set these environment variables:
|
||||
For setting environment variables for dependencies:
|
||||
|
||||
```
|
||||
conan profile update env.CC=<path> default
|
||||
conan profile update env.CXX=<path> default
|
||||
# In ~/.conan2/profiles/default, add under [buildenv] section:
|
||||
[buildenv]
|
||||
CC=<path>
|
||||
CXX=<path>
|
||||
```
|
||||
|
||||
4. Export our [Conan recipe for Snappy](./external/snappy).
|
||||
@@ -117,14 +134,20 @@ can't build earlier Boost versions.
|
||||
which allows you to statically link it with GCC, if you want.
|
||||
|
||||
```
|
||||
conan export external/snappy snappy/1.1.9@
|
||||
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
|
||||
```
|
||||
|
||||
5. Export our [Conan recipe for SOCI](./external/soci).
|
||||
It patches their CMake to correctly import its dependencies.
|
||||
|
||||
```
|
||||
conan export external/soci soci/4.0.3@
|
||||
conan export external/soci --version 4.0.3 --user xahaud --channel stable
|
||||
```
|
||||
|
||||
6. Export our [Conan recipe for WasmEdge](./external/wasmedge).
|
||||
|
||||
```
|
||||
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
|
||||
```
|
||||
|
||||
### Build and Test
|
||||
@@ -259,23 +282,26 @@ and can be helpful for detecting `#include` omissions.
|
||||
If you have trouble building dependencies after changing Conan settings,
|
||||
try removing the Conan cache.
|
||||
|
||||
For Conan 2:
|
||||
```
|
||||
rm -rf ~/.conan/data
|
||||
rm -rf ~/.conan2/p
|
||||
```
|
||||
|
||||
Or clear the entire Conan 2 cache:
|
||||
```
|
||||
conan cache clean "*"
|
||||
```
|
||||
|
||||
|
||||
### no std::result_of
|
||||
### macOS compilation with Apple Clang 17+
|
||||
|
||||
If your compiler version is recent enough to have removed `std::result_of` as
|
||||
part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor
|
||||
definition to your build.
|
||||
If you're on macOS with Apple Clang 17 or newer, you need to add a compiler flag to work around a compilation error in gRPC dependencies.
|
||||
|
||||
Edit `~/.conan2/profiles/default` and add under the `[conf]` section:
|
||||
|
||||
```
|
||||
conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
||||
[conf]
|
||||
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
```
|
||||
|
||||
|
||||
|
||||
465
BUILD_LEDGER.md
465
BUILD_LEDGER.md
@@ -1,465 +0,0 @@
|
||||
# Hash Migration Implementation via BuildLedger
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines the approach for implementing SHA-512 Half to BLAKE3 hash migration by performing the state map rekeying operation in the ledger building process, bypassing the metadata generation problem inherent in the transaction processing pipeline.
|
||||
|
||||
## The Core Problem
|
||||
|
||||
When switching from SHA-512 Half to BLAKE3, every object in the state map needs to be rekeyed because the hash (which IS the key in the SHAMap) changes. This would generate metadata showing:
|
||||
- Every object deleted at its old SHA-512 key
|
||||
- Every object created at its new BLAKE3 key
|
||||
- Total metadata size: 2× the entire state size (potentially gigabytes)
|
||||
|
||||
## The Solution: Bypass Transaction Processing
|
||||
|
||||
Instead of trying to rekey within the transaction processor (which tracks all changes for metadata), perform the rekeying AFTER transaction processing but BEFORE ledger finalization.
|
||||
|
||||
## Implementation Location
|
||||
|
||||
The key intervention point is in `buildLedgerImpl()` at line 63 of `BuildLedger.cpp`:
|
||||
|
||||
```cpp
|
||||
// BuildLedger.cpp, lines 58-65
|
||||
{
|
||||
OpenView accum(&*built);
|
||||
assert(!accum.open());
|
||||
applyTxs(accum, built); // Apply transactions (including pseudo-txns)
|
||||
accum.apply(*built); // Apply accumulated changes to the ledger
|
||||
}
|
||||
// <-- INTERVENTION POINT HERE
|
||||
built->updateSkipList();
|
||||
```
|
||||
|
||||
## Detailed Implementation
|
||||
|
||||
### 1. Pseudo-Transaction Role (Simple Flag Setting)
|
||||
|
||||
```cpp
|
||||
// In Change.cpp
|
||||
TER Change::applyHashMigration()
|
||||
{
|
||||
// The pseudo-transaction just sets a flag
|
||||
// The actual migration happens in BuildLedger
|
||||
|
||||
JLOG(j_.warn()) << "Hash migration pseudo transaction triggered at ledger "
|
||||
<< view().seq();
|
||||
|
||||
// Create a migration flag object
|
||||
auto migrationFlag = std::make_shared<SLE>(
|
||||
keylet::hashMigrationFlag(
|
||||
hash_options{view().seq(), KEYLET_MIGRATION_FLAG}));
|
||||
|
||||
migrationFlag->setFieldU32(sfLedgerSequence, view().seq());
|
||||
migrationFlag->setFieldU8(sfMigrationStatus, 1); // 1 = pending
|
||||
|
||||
view().insert(migrationFlag);
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. BuildLedger Modification
|
||||
|
||||
```cpp
|
||||
// In BuildLedger.cpp, after line 63
|
||||
template <class ApplyTxs>
|
||||
std::shared_ptr<Ledger>
|
||||
buildLedgerImpl(
|
||||
std::shared_ptr<Ledger const> const& parent,
|
||||
NetClock::time_point closeTime,
|
||||
const bool closeTimeCorrect,
|
||||
NetClock::duration closeResolution,
|
||||
Application& app,
|
||||
beast::Journal j,
|
||||
ApplyTxs&& applyTxs)
|
||||
{
|
||||
auto built = std::make_shared<Ledger>(*parent, closeTime);
|
||||
|
||||
if (built->isFlagLedger() && built->rules().enabled(featureNegativeUNL))
|
||||
{
|
||||
built->updateNegativeUNL();
|
||||
}
|
||||
|
||||
{
|
||||
OpenView accum(&*built);
|
||||
assert(!accum.open());
|
||||
applyTxs(accum, built);
|
||||
accum.apply(*built);
|
||||
}
|
||||
|
||||
// NEW: Check for hash migration flag
|
||||
if (shouldPerformHashMigration(built, app, j))
|
||||
{
|
||||
performHashMigration(built, app, j);
|
||||
}
|
||||
|
||||
built->updateSkipList();
|
||||
// ... rest of function
|
||||
}
|
||||
|
||||
// New helper functions
|
||||
bool shouldPerformHashMigration(
|
||||
std::shared_ptr<Ledger> const& ledger,
|
||||
Application& app,
|
||||
beast::Journal j)
|
||||
{
|
||||
// Check if we're in the migration window
|
||||
constexpr LedgerIndex MIGRATION_START = 20'000'000;
|
||||
constexpr LedgerIndex MIGRATION_END = 20'000'010;
|
||||
|
||||
if (ledger->seq() < MIGRATION_START || ledger->seq() >= MIGRATION_END)
|
||||
return false;
|
||||
|
||||
// Check for migration flag
|
||||
auto const flag = ledger->read(keylet::hashMigrationFlag(
|
||||
hash_options{ledger->seq(), KEYLET_MIGRATION_FLAG}));
|
||||
|
||||
if (!flag)
|
||||
return false;
|
||||
|
||||
return flag->getFieldU8(sfMigrationStatus) == 1; // 1 = pending
|
||||
}
|
||||
|
||||
void performHashMigration(
|
||||
std::shared_ptr<Ledger> const& ledger,
|
||||
Application& app,
|
||||
beast::Journal j)
|
||||
{
|
||||
JLOG(j.warn()) << "PERFORMING HASH MIGRATION at ledger " << ledger->seq();
|
||||
|
||||
auto& oldStateMap = ledger->stateMap();
|
||||
|
||||
// Create new state map with BLAKE3 hashing
|
||||
SHAMap newStateMap(SHAMapType::STATE, ledger->family());
|
||||
newStateMap.setLedgerSeq(ledger->seq());
|
||||
|
||||
// Track statistics
|
||||
std::size_t objectCount = 0;
|
||||
auto startTime = std::chrono::steady_clock::now();
|
||||
|
||||
// Walk the entire state map and rekey everything
|
||||
oldStateMap.visitLeaves([&](SHAMapItem const& item) {
|
||||
try {
|
||||
// Deserialize the ledger entry
|
||||
SerialIter sit(item.slice());
|
||||
auto sle = std::make_shared<SLE>(sit, item.key());
|
||||
|
||||
// The new key would be calculated with BLAKE3
|
||||
// For now, we'd need the actual BLAKE3 implementation
|
||||
// uint256 newKey = calculateBlake3Key(sle);
|
||||
|
||||
// For this example, let's assume we have a function that
|
||||
// computes the new key based on the SLE type and contents
|
||||
uint256 newKey = computeNewHashKey(sle, ledger->seq());
|
||||
|
||||
// Re-serialize the SLE
|
||||
Serializer s;
|
||||
sle->add(s);
|
||||
|
||||
// Add to new map with new key
|
||||
newStateMap.addGiveItem(
|
||||
SHAMapNodeType::tnACCOUNT_STATE,
|
||||
make_shamapitem(newKey, s.slice()));
|
||||
|
||||
objectCount++;
|
||||
|
||||
if (objectCount % 10000 == 0) {
|
||||
JLOG(j.info()) << "Migration progress: " << objectCount
|
||||
<< " objects rekeyed";
|
||||
}
|
||||
}
|
||||
catch (std::exception const& e) {
|
||||
JLOG(j.error()) << "Failed to migrate object " << item.key()
|
||||
<< ": " << e.what();
|
||||
throw;
|
||||
}
|
||||
});
|
||||
|
||||
auto endTime = std::chrono::steady_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
endTime - startTime);
|
||||
|
||||
JLOG(j.warn()) << "Hash migration completed: " << objectCount
|
||||
<< " objects rekeyed in " << duration.count() << "ms";
|
||||
|
||||
// Swap the state maps
|
||||
oldStateMap = std::move(newStateMap);
|
||||
|
||||
// Update the migration flag to completed
|
||||
auto flag = ledger->peek(keylet::hashMigrationFlag(
|
||||
hash_options{ledger->seq(), KEYLET_MIGRATION_FLAG}));
|
||||
if (flag) {
|
||||
flag->setFieldU8(sfMigrationStatus, 2); // 2 = completed
|
||||
ledger->rawReplace(flag);
|
||||
}
|
||||
}
|
||||
|
||||
uint256 computeNewHashKey(
|
||||
std::shared_ptr<SLE const> const& sle,
|
||||
LedgerIndex ledgerSeq)
|
||||
{
|
||||
// This would use BLAKE3 instead of SHA-512 Half
|
||||
// Implementation depends on the BLAKE3 integration
|
||||
// For now, this is a placeholder
|
||||
|
||||
// The actual implementation would:
|
||||
// 1. Determine the object type
|
||||
// 2. Extract the identifying fields
|
||||
// 3. Hash them with BLAKE3
|
||||
// 4. Return the new key
|
||||
|
||||
return uint256(); // Placeholder
|
||||
}
|
||||
```
|
||||
|
||||
## Why This Approach Works
|
||||
|
||||
### 1. No Metadata Explosion
|
||||
- The rekeying happens AFTER the `OpenView` is destroyed
|
||||
- No change tracking occurs during the rekeying
|
||||
- Only the migration flag generates metadata (minimal)
|
||||
|
||||
### 2. Direct SHAMap Access
|
||||
- We have direct access to `built->stateMap()`
|
||||
- Can manipulate the raw data structure without going through ApplyView
|
||||
- Can create a new SHAMap and swap it in
|
||||
|
||||
### 3. Clean Separation of Concerns
|
||||
- Pseudo-transaction: "Signal that migration should happen"
|
||||
- BuildLedger: "Actually perform the migration"
|
||||
- Transaction processor: Unchanged, doesn't need to handle massive rekeying
|
||||
|
||||
### 4. Timing is Perfect
|
||||
- After all transactions are applied
|
||||
- Before the ledger is finalized
|
||||
- Before the skip list is updated
|
||||
- Before the SHAMap is flushed to disk
|
||||
|
||||
## Files Referenced in This Analysis
|
||||
|
||||
### Core Implementation Files
|
||||
- `src/ripple/app/ledger/impl/BuildLedger.cpp` - Main implementation location
|
||||
- `src/ripple/app/ledger/BuildLedger.h` - Header for build functions
|
||||
- `src/ripple/app/tx/impl/Change.cpp` - Pseudo-transaction handler
|
||||
- `src/ripple/app/tx/impl/Change.h` - Change transactor header
|
||||
|
||||
### Transaction Processing Pipeline (analyzed but bypassed)
|
||||
- `src/ripple/app/tx/impl/Transactor.cpp` - Base transaction processor
|
||||
- `src/ripple/app/tx/impl/Transactor.h` - Transactor header
|
||||
- `src/ripple/app/tx/impl/apply.cpp` - Transaction application
|
||||
- `src/ripple/app/tx/impl/applySteps.cpp` - Transaction routing
|
||||
- `src/ripple/app/tx/impl/ApplyContext.h` - Application context
|
||||
|
||||
### Ledger and View Classes
|
||||
- `src/ripple/app/ledger/Ledger.h` - Ledger class definition
|
||||
- `src/ripple/app/ledger/Ledger.cpp` - Ledger implementation
|
||||
- `src/ripple/ledger/ApplyView.h` - View interface
|
||||
- `src/ripple/ledger/ApplyViewImpl.h` - View implementation header
|
||||
- `src/ripple/ledger/impl/ApplyViewImpl.cpp` - View implementation
|
||||
- `src/ripple/ledger/impl/ApplyViewBase.cpp` - Base view implementation
|
||||
- `src/ripple/ledger/detail/ApplyViewBase.h` - Base view header
|
||||
- `src/ripple/ledger/OpenView.h` - Open ledger view
|
||||
- `src/ripple/ledger/RawView.h` - Raw view interface
|
||||
|
||||
### SHAMap and Data Structures
|
||||
- `src/ripple/shamap/SHAMap.h` - SHAMap class definition
|
||||
|
||||
### Metadata Generation
|
||||
- `src/ripple/protocol/TxMeta.h` - Transaction metadata header
|
||||
- `src/ripple/protocol/impl/TxMeta.cpp` - Metadata implementation
|
||||
|
||||
### Consensus and Pseudo-Transaction Injection
|
||||
- `src/ripple/app/consensus/RCLConsensus.cpp` - Consensus implementation
|
||||
|
||||
### Supporting Documents
|
||||
- `PSEUDO_TRANSACTIONS.md` - Documentation on pseudo-transactions
|
||||
- `HASH_MIGRATION_CONTEXT.md` - Context for hash migration work
|
||||
|
||||
## Key Advantages
|
||||
|
||||
1. **Architecturally Clean**: Works within existing ledger building framework
|
||||
2. **No Metadata Issues**: Completely bypasses the metadata generation problem
|
||||
3. **Atomic Operation**: Either the entire state is rekeyed or none of it is
|
||||
4. **Fail-Safe**: Can be wrapped in try-catch for error handling
|
||||
5. **Observable**: Can log progress for large state maps
|
||||
6. **Testable**: Can be tested independently of transaction processing
|
||||
|
||||
## Challenges and Considerations
|
||||
|
||||
1. **Performance**: Rekeying millions of objects will take time
|
||||
- Solution: This happens during consensus, all nodes do it simultaneously
|
||||
|
||||
2. **Memory Usage**: Need to hold both old and new SHAMaps temporarily
|
||||
- Solution: Could potentially do in-place updates with careful ordering
|
||||
|
||||
3. **Verification**: Need to ensure all nodes get the same result
|
||||
- Solution: Deterministic rekeying based on ledger sequence
|
||||
|
||||
4. **Rollback**: If migration fails, need to handle gracefully
|
||||
- Solution: Keep old map until new map is fully built and verified
|
||||
|
||||
## Conclusion
|
||||
|
||||
By performing the hash migration at the ledger building level rather than within the transaction processing pipeline, we can successfully rekey the entire state map without generating massive metadata. This approach leverages the existing architecture's separation between transaction processing and ledger construction, providing a clean and efficient solution to what initially appeared to be an intractable problem.
|
||||
|
||||
---
|
||||
|
||||
## APPENDIX: Revised Implementation Following Ledger Pattern
|
||||
|
||||
After reviewing the existing pattern in `BuildLedger.cpp`, it's clear that special ledger operations are implemented as methods on the `Ledger` class itself (e.g., `built->updateNegativeUNL()`). Following this pattern, the hash migration should be implemented as `Ledger::migrateToBlake3()`.
|
||||
|
||||
### Updated BuildLedger.cpp Implementation
|
||||
|
||||
```cpp
|
||||
// In BuildLedger.cpp, following the existing pattern
|
||||
template <class ApplyTxs>
|
||||
std::shared_ptr<Ledger>
|
||||
buildLedgerImpl(
|
||||
std::shared_ptr<Ledger const> const& parent,
|
||||
NetClock::time_point closeTime,
|
||||
const bool closeTimeCorrect,
|
||||
NetClock::duration closeResolution,
|
||||
Application& app,
|
||||
beast::Journal j,
|
||||
ApplyTxs&& applyTxs)
|
||||
{
|
||||
auto built = std::make_shared<Ledger>(*parent, closeTime);
|
||||
|
||||
if (built->isFlagLedger() && built->rules().enabled(featureNegativeUNL))
|
||||
{
|
||||
built->updateNegativeUNL();
|
||||
}
|
||||
|
||||
{
|
||||
OpenView accum(&*built);
|
||||
assert(!accum.open());
|
||||
applyTxs(accum, built);
|
||||
accum.apply(*built);
|
||||
}
|
||||
|
||||
// NEW: Check and perform hash migration following the pattern
|
||||
if (built->rules().enabled(featureBLAKE3Migration) &&
|
||||
built->shouldMigrateToBlake3())
|
||||
{
|
||||
built->migrateToBlake3();
|
||||
}
|
||||
|
||||
built->updateSkipList();
|
||||
// ... rest of function
|
||||
}
|
||||
```
|
||||
|
||||
### Ledger.h Addition
|
||||
|
||||
```cpp
|
||||
// In src/ripple/app/ledger/Ledger.h
|
||||
class Ledger final : public std::enable_shared_from_this<Ledger>,
|
||||
public DigestAwareReadView,
|
||||
public TxsRawView,
|
||||
public CountedObject<Ledger>
|
||||
{
|
||||
public:
|
||||
// ... existing methods ...
|
||||
|
||||
/** Update the Negative UNL ledger component. */
|
||||
void
|
||||
updateNegativeUNL();
|
||||
|
||||
/** Check if hash migration to BLAKE3 should be performed */
|
||||
bool
|
||||
shouldMigrateToBlake3() const;
|
||||
|
||||
/** Perform hash migration from SHA-512 Half to BLAKE3
|
||||
* This rekeys all objects in the state map with new BLAKE3 hashes.
|
||||
* Must be called after transactions are applied but before the
|
||||
* ledger is finalized.
|
||||
*/
|
||||
void
|
||||
migrateToBlake3();
|
||||
|
||||
// ... rest of class ...
|
||||
};
|
||||
```
|
||||
|
||||
### Ledger.cpp Implementation
|
||||
|
||||
```cpp
|
||||
// In src/ripple/app/ledger/Ledger.cpp
|
||||
|
||||
bool
|
||||
Ledger::shouldMigrateToBlake3() const
|
||||
{
|
||||
// Check if we're in the migration window
|
||||
constexpr LedgerIndex MIGRATION_START = 20'000'000;
|
||||
constexpr LedgerIndex MIGRATION_END = 20'000'010;
|
||||
|
||||
if (seq() < MIGRATION_START || seq() >= MIGRATION_END)
|
||||
return false;
|
||||
|
||||
// Check for migration flag set by pseudo-transaction
|
||||
auto const flag = read(keylet::hashMigrationFlag(
|
||||
hash_options{seq(), KEYLET_MIGRATION_FLAG}));
|
||||
|
||||
if (!flag)
|
||||
return false;
|
||||
|
||||
return flag->getFieldU8(sfMigrationStatus) == 1; // 1 = pending
|
||||
}
|
||||
|
||||
void
|
||||
Ledger::migrateToBlake3()
|
||||
{
|
||||
JLOG(j_.warn()) << "Performing BLAKE3 hash migration at ledger " << seq();
|
||||
|
||||
// Create new state map with BLAKE3 hashing
|
||||
SHAMap newStateMap(SHAMapType::STATE, stateMap_.family());
|
||||
newStateMap.setLedgerSeq(seq());
|
||||
|
||||
std::size_t objectCount = 0;
|
||||
auto startTime = std::chrono::steady_clock::now();
|
||||
|
||||
// Walk the entire state map and rekey everything
|
||||
stateMap_.visitLeaves([&](SHAMapItem const& item) {
|
||||
// Deserialize the ledger entry
|
||||
SerialIter sit(item.slice());
|
||||
auto sle = std::make_shared<SLE>(sit, item.key());
|
||||
|
||||
// Calculate new BLAKE3-based key
|
||||
// This would use the actual BLAKE3 implementation
|
||||
uint256 newKey = computeBlake3Key(sle);
|
||||
|
||||
// Re-serialize and add to new map
|
||||
Serializer s;
|
||||
sle->add(s);
|
||||
|
||||
newStateMap.addGiveItem(
|
||||
SHAMapNodeType::tnACCOUNT_STATE,
|
||||
make_shamapitem(newKey, s.slice()));
|
||||
|
||||
if (++objectCount % 10000 == 0) {
|
||||
JLOG(j_.info()) << "Migration progress: " << objectCount
|
||||
<< " objects rekeyed";
|
||||
}
|
||||
});
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - startTime);
|
||||
|
||||
JLOG(j_.warn()) << "BLAKE3 migration completed: " << objectCount
|
||||
<< " objects rekeyed in " << duration.count() << "ms";
|
||||
|
||||
// Swap the state maps
|
||||
stateMap_ = std::move(newStateMap);
|
||||
|
||||
// Update the migration flag to completed
|
||||
auto flag = peek(keylet::hashMigrationFlag(
|
||||
hash_options{seq(), KEYLET_MIGRATION_FLAG}));
|
||||
if (flag) {
|
||||
flag->setFieldU8(sfMigrationStatus, 2); // 2 = completed
|
||||
rawReplace(flag);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This approach follows the established pattern in the codebase where special ledger operations are encapsulated as methods on the `Ledger` class itself, making the code more maintainable and consistent with the existing architecture.
|
||||
@@ -50,6 +50,12 @@ target_sources (xrpl_core PRIVATE
|
||||
src/ripple/beast/utility/src/beast_Journal.cpp
|
||||
src/ripple/beast/utility/src/beast_PropertyStream.cpp)
|
||||
|
||||
# Conditionally add enhanced logging source when BEAST_ENHANCED_LOGGING is enabled
|
||||
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
|
||||
target_sources(xrpl_core PRIVATE
|
||||
src/ripple/beast/utility/src/beast_EnhancedLogging.cpp)
|
||||
endif()
|
||||
|
||||
#[===============================[
|
||||
core sources
|
||||
#]===============================]
|
||||
@@ -153,9 +159,15 @@ target_link_libraries (xrpl_core
|
||||
Ripple::syslibs
|
||||
secp256k1::secp256k1
|
||||
ed25519::ed25519
|
||||
BLAKE3::blake3
|
||||
date::date
|
||||
Ripple::opts)
|
||||
|
||||
# Link date-tz library when enhanced logging is enabled
|
||||
if(DEFINED BEAST_ENHANCED_LOGGING AND BEAST_ENHANCED_LOGGING)
|
||||
if(TARGET date::date-tz)
|
||||
target_link_libraries(xrpl_core PUBLIC date::date-tz)
|
||||
endif()
|
||||
endif()
|
||||
#[=================================[
|
||||
main/core headers installation
|
||||
#]=================================]
|
||||
@@ -445,6 +457,8 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/app/tx/impl/CreateCheck.cpp
|
||||
src/ripple/app/tx/impl/CreateOffer.cpp
|
||||
src/ripple/app/tx/impl/CreateTicket.cpp
|
||||
src/ripple/app/tx/impl/Cron.cpp
|
||||
src/ripple/app/tx/impl/CronSet.cpp
|
||||
src/ripple/app/tx/impl/DeleteAccount.cpp
|
||||
src/ripple/app/tx/impl/DepositPreauth.cpp
|
||||
src/ripple/app/tx/impl/Escrow.cpp
|
||||
@@ -549,7 +563,6 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/nodestore/backend/CassandraFactory.cpp
|
||||
src/ripple/nodestore/backend/RWDBFactory.cpp
|
||||
src/ripple/nodestore/backend/MemoryFactory.cpp
|
||||
src/ripple/nodestore/backend/FlatmapFactory.cpp
|
||||
src/ripple/nodestore/backend/NuDBFactory.cpp
|
||||
src/ripple/nodestore/backend/NullFactory.cpp
|
||||
src/ripple/nodestore/backend/RocksDBFactory.cpp
|
||||
@@ -638,7 +651,6 @@ target_sources (rippled PRIVATE
|
||||
src/ripple/rpc/handlers/LogLevel.cpp
|
||||
src/ripple/rpc/handlers/LogRotate.cpp
|
||||
src/ripple/rpc/handlers/Manifest.cpp
|
||||
src/ripple/rpc/handlers/MapStats.cpp
|
||||
src/ripple/rpc/handlers/NFTOffers.cpp
|
||||
src/ripple/rpc/handlers/NodeToShard.cpp
|
||||
src/ripple/rpc/handlers/NoRippleCheck.cpp
|
||||
@@ -724,6 +736,7 @@ if (tests)
|
||||
src/test/app/BaseFee_test.cpp
|
||||
src/test/app/Check_test.cpp
|
||||
src/test/app/ClaimReward_test.cpp
|
||||
src/test/app/Cron_test.cpp
|
||||
src/test/app/Clawback_test.cpp
|
||||
src/test/app/CrossingLimits_test.cpp
|
||||
src/test/app/DeliverMin_test.cpp
|
||||
@@ -888,6 +901,7 @@ if (tests)
|
||||
src/test/jtx/impl/amount.cpp
|
||||
src/test/jtx/impl/balance.cpp
|
||||
src/test/jtx/impl/check.cpp
|
||||
src/test/jtx/impl/cron.cpp
|
||||
src/test/jtx/impl/delivermin.cpp
|
||||
src/test/jtx/impl/deposit.cpp
|
||||
src/test/jtx/impl/envconfig.cpp
|
||||
@@ -951,6 +965,7 @@ if (tests)
|
||||
src/test/nodestore/Basics_test.cpp
|
||||
src/test/nodestore/DatabaseShard_test.cpp
|
||||
src/test/nodestore/Database_test.cpp
|
||||
src/test/nodestore/NuDBFactory_test.cpp
|
||||
src/test/nodestore/Timing_test.cpp
|
||||
src/test/nodestore/import_test.cpp
|
||||
src/test/nodestore/varint_test.cpp
|
||||
@@ -975,7 +990,6 @@ if (tests)
|
||||
test sources:
|
||||
subdir: protocol
|
||||
#]===============================]
|
||||
src/test/protocol/blake3_test.cpp
|
||||
src/test/protocol/BuildInfo_test.cpp
|
||||
src/test/protocol/InnerObjectFormats_test.cpp
|
||||
src/test/protocol/Issue_test.cpp
|
||||
@@ -998,6 +1012,11 @@ if (tests)
|
||||
subdir: resource
|
||||
#]===============================]
|
||||
src/test/resource/Logic_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: rdb
|
||||
#]===============================]
|
||||
src/test/rdb/RelationalDatabase_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: rpc
|
||||
@@ -1071,8 +1090,10 @@ target_link_libraries (rippled
|
||||
Ripple::opts
|
||||
Ripple::libs
|
||||
Ripple::xrpl_core
|
||||
BLAKE3::blake3
|
||||
# Workaround for a Conan 1.x bug...
|
||||
# Workaround for a Conan 1.x bug that prevents static linking of libstdc++
|
||||
# when a dependency (snappy) modifies system_libs. See the comment in
|
||||
# external/snappy/conanfile.py for a full explanation.
|
||||
# This is likely not strictly necessary, but listed explicitly as a good practice.
|
||||
m
|
||||
)
|
||||
exclude_if_included (rippled)
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH prefix path..this is where we will download
|
||||
and build any ExternalProjects, and they will hopefully
|
||||
survive across build directory deletion (manual cleans)
|
||||
#]===================================================================]
|
||||
|
||||
string (REGEX REPLACE "[ \\/%]+" "_" gen_for_path ${CMAKE_GENERATOR})
|
||||
string (TOLOWER ${gen_for_path} gen_for_path)
|
||||
# HACK: trying to shorten paths for windows CI (which hits 260 MAXPATH easily)
|
||||
# @see: https://issues.jenkins-ci.org/browse/JENKINS-38706?focusedCommentId=339847
|
||||
string (REPLACE "visual_studio" "vs" gen_for_path ${gen_for_path})
|
||||
if (NOT DEFINED NIH_CACHE_ROOT)
|
||||
if (DEFINED ENV{NIH_CACHE_ROOT})
|
||||
set (NIH_CACHE_ROOT $ENV{NIH_CACHE_ROOT})
|
||||
else ()
|
||||
set (NIH_CACHE_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/.nih_c")
|
||||
endif ()
|
||||
endif ()
|
||||
set (nih_cache_path
|
||||
"${NIH_CACHE_ROOT}/${gen_for_path}/${CMAKE_CXX_COMPILER_ID}_${CMAKE_CXX_COMPILER_VERSION}")
|
||||
if (NOT is_multiconfig)
|
||||
set (nih_cache_path "${nih_cache_path}/${CMAKE_BUILD_TYPE}")
|
||||
endif ()
|
||||
file(TO_CMAKE_PATH "${nih_cache_path}" nih_cache_path)
|
||||
message (STATUS "NIH-EP cache path: ${nih_cache_path}")
|
||||
## two convenience variables:
|
||||
set (ep_lib_prefix ${CMAKE_STATIC_LIBRARY_PREFIX})
|
||||
set (ep_lib_suffix ${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
|
||||
# this is a setting for FetchContent and needs to be
|
||||
# a cache variable
|
||||
# https://cmake.org/cmake/help/latest/module/FetchContent.html#populating-the-content
|
||||
set (FETCHCONTENT_BASE_DIR ${nih_cache_path} CACHE STRING "" FORCE)
|
||||
@@ -1,52 +0,0 @@
|
||||
find_package(Boost 1.83 REQUIRED
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread
|
||||
)
|
||||
|
||||
add_library(ripple_boost INTERFACE)
|
||||
add_library(Ripple::boost ALIAS ripple_boost)
|
||||
if(XCODE)
|
||||
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
|
||||
else()
|
||||
target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
target_link_libraries(ripple_boost
|
||||
INTERFACE
|
||||
Boost::boost
|
||||
Boost::chrono
|
||||
Boost::container
|
||||
Boost::coroutine
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::program_options
|
||||
Boost::regex
|
||||
Boost::system
|
||||
Boost::iostreams
|
||||
Boost::thread)
|
||||
if(Boost_COMPILER)
|
||||
target_link_libraries(ripple_boost INTERFACE Boost::disable_autolinking)
|
||||
endif()
|
||||
if(san AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else
|
||||
# for gcc ?
|
||||
if(NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts
|
||||
INTERFACE
|
||||
# ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif()
|
||||
@@ -1,22 +0,0 @@
|
||||
find_package(Protobuf 3.8)
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/proto_gen)
|
||||
set(ccbd ${CMAKE_CURRENT_BINARY_DIR})
|
||||
set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_BINARY_DIR}/proto_gen)
|
||||
protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS src/ripple/proto/ripple.proto)
|
||||
set(CMAKE_CURRENT_BINARY_DIR ${ccbd})
|
||||
|
||||
add_library(pbufs STATIC ${PROTO_SRCS} ${PROTO_HDRS})
|
||||
target_include_directories(pbufs SYSTEM PUBLIC
|
||||
${CMAKE_BINARY_DIR}/proto_gen
|
||||
${CMAKE_BINARY_DIR}/proto_gen/src/ripple/proto
|
||||
)
|
||||
target_link_libraries(pbufs protobuf::libprotobuf)
|
||||
target_compile_options(pbufs
|
||||
PUBLIC
|
||||
$<$<BOOL:${XCODE}>:
|
||||
--system-header-prefix="google/protobuf"
|
||||
-Wno-deprecated-dynamic-exception-spec
|
||||
>
|
||||
)
|
||||
add_library(Ripple::pbufs ALIAS pbufs)
|
||||
@@ -1,62 +0,0 @@
|
||||
find_package(gRPC 1.23)
|
||||
|
||||
#[=================================[
|
||||
generate protobuf sources for
|
||||
grpc defs and bundle into a
|
||||
static lib
|
||||
#]=================================]
|
||||
set(GRPC_GEN_DIR "${CMAKE_BINARY_DIR}/proto_gen_grpc")
|
||||
file(MAKE_DIRECTORY ${GRPC_GEN_DIR})
|
||||
set(GRPC_PROTO_SRCS)
|
||||
set(GRPC_PROTO_HDRS)
|
||||
set(GRPC_PROTO_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/proto/org")
|
||||
file(GLOB_RECURSE GRPC_DEFINITION_FILES LIST_DIRECTORIES false "${GRPC_PROTO_ROOT}/*.proto")
|
||||
foreach(file ${GRPC_DEFINITION_FILES})
|
||||
get_filename_component(_abs_file ${file} ABSOLUTE)
|
||||
get_filename_component(_abs_dir ${_abs_file} DIRECTORY)
|
||||
get_filename_component(_basename ${file} NAME_WE)
|
||||
get_filename_component(_proto_inc ${GRPC_PROTO_ROOT} DIRECTORY) # updir one level
|
||||
file(RELATIVE_PATH _rel_root_file ${_proto_inc} ${_abs_file})
|
||||
get_filename_component(_rel_root_dir ${_rel_root_file} DIRECTORY)
|
||||
file(RELATIVE_PATH _rel_dir ${CMAKE_CURRENT_SOURCE_DIR} ${_abs_dir})
|
||||
|
||||
set(src_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.cc")
|
||||
set(src_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.cc")
|
||||
set(hdr_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.h")
|
||||
set(hdr_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.h")
|
||||
add_custom_command(
|
||||
OUTPUT ${src_1} ${src_2} ${hdr_1} ${hdr_2}
|
||||
COMMAND protobuf::protoc
|
||||
ARGS --grpc_out=${GRPC_GEN_DIR}
|
||||
--cpp_out=${GRPC_GEN_DIR}
|
||||
--plugin=protoc-gen-grpc=$<TARGET_FILE:gRPC::grpc_cpp_plugin>
|
||||
-I ${_proto_inc} -I ${_rel_dir}
|
||||
${_abs_file}
|
||||
DEPENDS ${_abs_file} protobuf::protoc gRPC::grpc_cpp_plugin
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "Running gRPC C++ protocol buffer compiler on ${file}"
|
||||
VERBATIM)
|
||||
set_source_files_properties(${src_1} ${src_2} ${hdr_1} ${hdr_2} PROPERTIES GENERATED TRUE)
|
||||
list(APPEND GRPC_PROTO_SRCS ${src_1} ${src_2})
|
||||
list(APPEND GRPC_PROTO_HDRS ${hdr_1} ${hdr_2})
|
||||
endforeach()
|
||||
|
||||
add_library(grpc_pbufs STATIC ${GRPC_PROTO_SRCS} ${GRPC_PROTO_HDRS})
|
||||
#target_include_directories(grpc_pbufs PRIVATE src)
|
||||
target_include_directories(grpc_pbufs SYSTEM PUBLIC ${GRPC_GEN_DIR})
|
||||
target_link_libraries(grpc_pbufs
|
||||
"gRPC::grpc++"
|
||||
# libgrpc is missing references.
|
||||
absl::random_random
|
||||
)
|
||||
target_compile_options(grpc_pbufs
|
||||
PRIVATE
|
||||
$<$<BOOL:${MSVC}>:-wd4065>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:-Wno-deprecated-declarations>
|
||||
PUBLIC
|
||||
$<$<BOOL:${MSVC}>:-wd4996>
|
||||
$<$<BOOL:${XCODE}>:
|
||||
--system-header-prefix="google/protobuf"
|
||||
-Wno-deprecated-dynamic-exception-spec
|
||||
>)
|
||||
add_library(Ripple::grpc_pbufs ALIAS grpc_pbufs)
|
||||
@@ -1,51 +1,3 @@
|
||||
#[===================================================================[
|
||||
NIH dep: boost
|
||||
#]===================================================================]
|
||||
if((NOT DEFINED BOOST_ROOT) AND(DEFINED ENV{BOOST_ROOT}))
|
||||
set(BOOST_ROOT $ENV{BOOST_ROOT})
|
||||
endif()
|
||||
if((NOT DEFINED BOOST_LIBRARYDIR) AND(DEFINED ENV{BOOST_LIBRARYDIR}))
|
||||
set(BOOST_LIBRARYDIR $ENV{BOOST_LIBRARYDIR})
|
||||
endif()
|
||||
file(TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT)
|
||||
if(WIN32 OR CYGWIN)
|
||||
# Workaround for MSVC having two boost versions - x86 and x64 on same PC in stage folders
|
||||
if((NOT DEFINED BOOST_LIBRARYDIR) AND (DEFINED BOOST_ROOT))
|
||||
if(IS_DIRECTORY ${BOOST_ROOT}/stage64/lib)
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage64/lib)
|
||||
elseif(IS_DIRECTORY ${BOOST_ROOT}/stage/lib)
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage/lib)
|
||||
elseif(IS_DIRECTORY ${BOOST_ROOT}/lib)
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT}/lib)
|
||||
else()
|
||||
message(WARNING "Did not find expected boost library dir. "
|
||||
"Defaulting to ${BOOST_ROOT}")
|
||||
set(BOOST_LIBRARYDIR ${BOOST_ROOT})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
message(STATUS "BOOST_ROOT: ${BOOST_ROOT}")
|
||||
message(STATUS "BOOST_LIBRARYDIR: ${BOOST_LIBRARYDIR}")
|
||||
|
||||
# uncomment the following as needed to debug FindBoost issues:
|
||||
#set(Boost_DEBUG ON)
|
||||
|
||||
#[=========================================================[
|
||||
boost dynamic libraries don't trivially support @rpath
|
||||
linking right now (cmake's default), so just force
|
||||
static linking for macos, or if requested on linux by flag
|
||||
#]=========================================================]
|
||||
if(static)
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
endif()
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
if(static AND NOT APPLE)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
else()
|
||||
set(Boost_USE_STATIC_RUNTIME OFF)
|
||||
endif()
|
||||
# TBD:
|
||||
# Boost_USE_DEBUG_RUNTIME: When ON, uses Boost libraries linked against the
|
||||
find_package(Boost 1.86 REQUIRED
|
||||
COMPONENTS
|
||||
chrono
|
||||
@@ -57,12 +9,12 @@ find_package(Boost 1.86 REQUIRED
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
iostreams
|
||||
thread)
|
||||
thread
|
||||
)
|
||||
|
||||
add_library(ripple_boost INTERFACE)
|
||||
add_library(Ripple::boost ALIAS ripple_boost)
|
||||
if(is_xcode)
|
||||
if(XCODE)
|
||||
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
|
||||
else()
|
||||
@@ -77,10 +29,10 @@ target_link_libraries(ripple_boost
|
||||
Boost::coroutine
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::iostreams
|
||||
Boost::program_options
|
||||
Boost::regex
|
||||
Boost::system
|
||||
Boost::iostreams
|
||||
Boost::thread)
|
||||
if(Boost_COMPILER)
|
||||
target_link_libraries(ripple_boost INTERFACE Boost::disable_autolinking)
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: ed25519-donna
|
||||
#]===================================================================]
|
||||
|
||||
add_library (ed25519-donna STATIC
|
||||
src/ed25519-donna/ed25519.c)
|
||||
target_include_directories (ed25519-donna
|
||||
PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
|
||||
$<INSTALL_INTERFACE:include>
|
||||
PRIVATE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ed25519-donna)
|
||||
#[=========================================================[
|
||||
NOTE for macos:
|
||||
https://github.com/floodyberry/ed25519-donna/issues/29
|
||||
our source for ed25519-donna-portable.h has been
|
||||
patched to workaround this.
|
||||
#]=========================================================]
|
||||
target_link_libraries (ed25519-donna PUBLIC OpenSSL::SSL)
|
||||
add_library (NIH::ed25519-donna ALIAS ed25519-donna)
|
||||
target_link_libraries (ripple_libs INTERFACE NIH::ed25519-donna)
|
||||
#[===========================[
|
||||
headers installation
|
||||
#]===========================]
|
||||
install (
|
||||
FILES
|
||||
src/ed25519-donna/ed25519.h
|
||||
DESTINATION include/ed25519-donna)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,47 +0,0 @@
|
||||
# - Try to find jemalloc
|
||||
# Once done this will define
|
||||
# JEMALLOC_FOUND - System has jemalloc
|
||||
# JEMALLOC_INCLUDE_DIRS - The jemalloc include directories
|
||||
# JEMALLOC_LIBRARIES - The libraries needed to use jemalloc
|
||||
|
||||
if(NOT USE_BUNDLED_JEMALLOC)
|
||||
find_package(PkgConfig)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
pkg_check_modules(PC_JEMALLOC QUIET jemalloc)
|
||||
endif()
|
||||
else()
|
||||
set(PC_JEMALLOC_INCLUDEDIR)
|
||||
set(PC_JEMALLOC_INCLUDE_DIRS)
|
||||
set(PC_JEMALLOC_LIBDIR)
|
||||
set(PC_JEMALLOC_LIBRARY_DIRS)
|
||||
set(LIMIT_SEARCH NO_DEFAULT_PATH)
|
||||
endif()
|
||||
|
||||
set(JEMALLOC_DEFINITIONS ${PC_JEMALLOC_CFLAGS_OTHER})
|
||||
|
||||
find_path(JEMALLOC_INCLUDE_DIR jemalloc/jemalloc.h
|
||||
PATHS ${PC_JEMALLOC_INCLUDEDIR} ${PC_JEMALLOC_INCLUDE_DIRS}
|
||||
${LIMIT_SEARCH})
|
||||
|
||||
# If we're asked to use static linkage, add libjemalloc.a as a preferred library name.
|
||||
if(JEMALLOC_USE_STATIC)
|
||||
list(APPEND JEMALLOC_NAMES
|
||||
"${CMAKE_STATIC_LIBRARY_PREFIX}jemalloc${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
endif()
|
||||
|
||||
list(APPEND JEMALLOC_NAMES jemalloc)
|
||||
|
||||
find_library(JEMALLOC_LIBRARY NAMES ${JEMALLOC_NAMES}
|
||||
HINTS ${PC_JEMALLOC_LIBDIR} ${PC_JEMALLOC_LIBRARY_DIRS}
|
||||
${LIMIT_SEARCH})
|
||||
|
||||
set(JEMALLOC_LIBRARIES ${JEMALLOC_LIBRARY})
|
||||
set(JEMALLOC_INCLUDE_DIRS ${JEMALLOC_INCLUDE_DIR})
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
# handle the QUIETLY and REQUIRED arguments and set JEMALLOC_FOUND to TRUE
|
||||
# if all listed variables are TRUE
|
||||
find_package_handle_standard_args(JeMalloc DEFAULT_MSG
|
||||
JEMALLOC_LIBRARY JEMALLOC_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(JEMALLOC_INCLUDE_DIR JEMALLOC_LIBRARY)
|
||||
@@ -1,22 +0,0 @@
|
||||
find_package (PkgConfig REQUIRED)
|
||||
pkg_search_module (libarchive_PC QUIET libarchive>=3.4.3)
|
||||
|
||||
if(static)
|
||||
set(LIBARCHIVE_LIB libarchive.a)
|
||||
else()
|
||||
set(LIBARCHIVE_LIB archive)
|
||||
endif()
|
||||
|
||||
find_library (archive
|
||||
NAMES ${LIBARCHIVE_LIB}
|
||||
HINTS
|
||||
${libarchive_PC_LIBDIR}
|
||||
${libarchive_PC_LIBRARY_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_path (LIBARCHIVE_INCLUDE_DIR
|
||||
NAMES archive.h
|
||||
HINTS
|
||||
${libarchive_PC_INCLUDEDIR}
|
||||
${libarchive_PC_INCLUDEDIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
@@ -1,24 +0,0 @@
|
||||
find_package (PkgConfig)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
pkg_search_module (lz4_PC QUIET liblz4>=1.9)
|
||||
endif ()
|
||||
|
||||
if(static)
|
||||
set(LZ4_LIB liblz4.a)
|
||||
else()
|
||||
set(LZ4_LIB lz4.so)
|
||||
endif()
|
||||
|
||||
find_library (lz4
|
||||
NAMES ${LZ4_LIB}
|
||||
HINTS
|
||||
${lz4_PC_LIBDIR}
|
||||
${lz4_PC_LIBRARY_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_path (LZ4_INCLUDE_DIR
|
||||
NAMES lz4.h
|
||||
HINTS
|
||||
${lz4_PC_INCLUDEDIR}
|
||||
${lz4_PC_INCLUDEDIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
@@ -1,24 +0,0 @@
|
||||
find_package (PkgConfig)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
pkg_search_module (secp256k1_PC QUIET libsecp256k1)
|
||||
endif ()
|
||||
|
||||
if(static)
|
||||
set(SECP256K1_LIB libsecp256k1.a)
|
||||
else()
|
||||
set(SECP256K1_LIB secp256k1)
|
||||
endif()
|
||||
|
||||
find_library(secp256k1
|
||||
NAMES ${SECP256K1_LIB}
|
||||
HINTS
|
||||
${secp256k1_PC_LIBDIR}
|
||||
${secp256k1_PC_LIBRARY_PATHS}
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_path (SECP256K1_INCLUDE_DIR
|
||||
NAMES secp256k1.h
|
||||
HINTS
|
||||
${secp256k1_PC_INCLUDEDIR}
|
||||
${secp256k1_PC_INCLUDEDIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
@@ -1,24 +0,0 @@
|
||||
find_package (PkgConfig)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
pkg_search_module (snappy_PC QUIET snappy>=1.1.7)
|
||||
endif ()
|
||||
|
||||
if(static)
|
||||
set(SNAPPY_LIB libsnappy.a)
|
||||
else()
|
||||
set(SNAPPY_LIB libsnappy.so)
|
||||
endif()
|
||||
|
||||
find_library (snappy
|
||||
NAMES ${SNAPPY_LIB}
|
||||
HINTS
|
||||
${snappy_PC_LIBDIR}
|
||||
${snappy_PC_LIBRARY_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_path (SNAPPY_INCLUDE_DIR
|
||||
NAMES snappy.h
|
||||
HINTS
|
||||
${snappy_PC_INCLUDEDIR}
|
||||
${snappy_PC_INCLUDEDIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
@@ -1,19 +0,0 @@
|
||||
find_package (PkgConfig)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
# TBD - currently no soci pkgconfig
|
||||
#pkg_search_module (soci_PC QUIET libsoci_core>=3.2)
|
||||
endif ()
|
||||
|
||||
if(static)
|
||||
set(SOCI_LIB libsoci.a)
|
||||
else()
|
||||
set(SOCI_LIB libsoci_core.so)
|
||||
endif()
|
||||
|
||||
find_library (soci
|
||||
NAMES ${SOCI_LIB})
|
||||
|
||||
find_path (SOCI_INCLUDE_DIR
|
||||
NAMES soci/soci.h)
|
||||
|
||||
message("SOCI FOUND AT: ${SOCI_LIB}")
|
||||
@@ -1,24 +0,0 @@
|
||||
find_package (PkgConfig)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
pkg_search_module (sqlite_PC QUIET sqlite3>=3.26.0)
|
||||
endif ()
|
||||
|
||||
if(static)
|
||||
set(SQLITE_LIB libsqlite3.a)
|
||||
else()
|
||||
set(SQLITE_LIB sqlite3.so)
|
||||
endif()
|
||||
|
||||
find_library (sqlite3
|
||||
NAMES ${SQLITE_LIB}
|
||||
HINTS
|
||||
${sqlite_PC_LIBDIR}
|
||||
${sqlite_PC_LIBRARY_DIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_path (SQLITE_INCLUDE_DIR
|
||||
NAMES sqlite3.h
|
||||
HINTS
|
||||
${sqlite_PC_INCLUDEDIR}
|
||||
${sqlite_PC_INCLUDEDIRS}
|
||||
NO_DEFAULT_PATH)
|
||||
@@ -1,163 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: libarchive
|
||||
#]===================================================================]
|
||||
|
||||
option (local_libarchive "use local build of libarchive." OFF)
|
||||
add_library (archive_lib UNKNOWN IMPORTED GLOBAL)
|
||||
|
||||
if (NOT local_libarchive)
|
||||
if (NOT WIN32)
|
||||
find_package(libarchive_pc REQUIRED)
|
||||
endif ()
|
||||
if (archive)
|
||||
message (STATUS "Found libarchive using pkg-config. Using ${archive}.")
|
||||
set_target_properties (archive_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${archive}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${archive}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${LIBARCHIVE_INCLUDE_DIR})
|
||||
# pkg-config can return extra info for static lib linking
|
||||
# this is probably needed/useful generally, but apply
|
||||
# to APPLE for now (mostly for homebrew)
|
||||
if (APPLE AND static AND libarchive_PC_STATIC_LIBRARIES)
|
||||
message(STATUS "NOTE: libarchive static libs: ${libarchive_PC_STATIC_LIBRARIES}")
|
||||
# also, APPLE seems to need iconv...maybe linux does too (TBD)
|
||||
target_link_libraries (archive_lib
|
||||
INTERFACE iconv ${libarchive_PC_STATIC_LIBRARIES})
|
||||
endif ()
|
||||
else ()
|
||||
## now try searching using the minimal find module that cmake provides
|
||||
find_package(LibArchive 3.4.3 QUIET)
|
||||
if (LibArchive_FOUND)
|
||||
if (static)
|
||||
# find module doesn't find static libs currently, so we re-search
|
||||
get_filename_component(_loc ${LibArchive_LIBRARY} DIRECTORY)
|
||||
find_library(_la_static
|
||||
NAMES libarchive.a archive_static.lib archive.lib
|
||||
PATHS ${_loc})
|
||||
if (_la_static)
|
||||
set (_la_lib ${_la_static})
|
||||
else ()
|
||||
message (WARNING "unable to find libarchive static lib - switching to local build")
|
||||
set (local_libarchive ON CACHE BOOL "" FORCE)
|
||||
endif ()
|
||||
else ()
|
||||
set (_la_lib ${LibArchive_LIBRARY})
|
||||
endif ()
|
||||
if (NOT local_libarchive)
|
||||
message (STATUS "Found libarchive using module/config. Using ${_la_lib}.")
|
||||
set_target_properties (archive_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${_la_lib}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${_la_lib}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${LibArchive_INCLUDE_DIRS})
|
||||
endif ()
|
||||
else ()
|
||||
set (local_libarchive ON CACHE BOOL "" FORCE)
|
||||
endif ()
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (local_libarchive)
|
||||
set (lib_post "")
|
||||
if (MSVC)
|
||||
set (lib_post "_static")
|
||||
endif ()
|
||||
ExternalProject_Add (libarchive
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/libarchive/libarchive.git
|
||||
GIT_TAG v3.4.3
|
||||
CMAKE_ARGS
|
||||
# passing the compiler seems to be needed for windows CI, sadly
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DENABLE_LZ4=ON
|
||||
-ULZ4_*
|
||||
-DLZ4_INCLUDE_DIR=$<JOIN:$<TARGET_PROPERTY:lz4_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
# because we are building a static lib, this lz4 library doesn't
|
||||
# actually matter since you can't generally link static libs to other static
|
||||
# libs. The include files are needed, but the library itself is not (until
|
||||
# we link our application, at which point we use the lz4 we built above).
|
||||
# nonetheless, we need to provide a library to libarchive else it will
|
||||
# NOT include lz4 support when configuring
|
||||
-DLZ4_LIBRARY=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:lz4_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:lz4_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-DENABLE_WERROR=OFF
|
||||
-DENABLE_TAR=OFF
|
||||
-DENABLE_TAR_SHARED=OFF
|
||||
-DENABLE_INSTALL=ON
|
||||
-DENABLE_NETTLE=OFF
|
||||
-DENABLE_OPENSSL=OFF
|
||||
-DENABLE_LZO=OFF
|
||||
-DENABLE_LZMA=OFF
|
||||
-DENABLE_ZLIB=OFF
|
||||
-DENABLE_BZip2=OFF
|
||||
-DENABLE_LIBXML2=OFF
|
||||
-DENABLE_EXPAT=OFF
|
||||
-DENABLE_PCREPOSIX=OFF
|
||||
-DENABLE_LibGCC=OFF
|
||||
-DENABLE_CNG=OFF
|
||||
-DENABLE_CPIO=OFF
|
||||
-DENABLE_CPIO_SHARED=OFF
|
||||
-DENABLE_CAT=OFF
|
||||
-DENABLE_CAT_SHARED=OFF
|
||||
-DENABLE_XATTR=OFF
|
||||
-DENABLE_ACL=OFF
|
||||
-DENABLE_ICONV=OFF
|
||||
-DENABLE_TEST=OFF
|
||||
-DENABLE_COVERAGE=OFF
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP"
|
||||
"-DCMAKE_C_FLAGS_DEBUG=-MTd"
|
||||
"-DCMAKE_C_FLAGS_RELEASE=-MT"
|
||||
>
|
||||
LIST_SEPARATOR ::
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--target archive_static
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
<BINARY_DIR>/libarchive/$<CONFIG>/${ep_lib_prefix}archive${lib_post}$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/libarchive
|
||||
>
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
DEPENDS lz4_lib
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/libarchive/${ep_lib_prefix}archive${lib_post}${ep_lib_suffix}
|
||||
<BINARY_DIR>/libarchive/${ep_lib_prefix}archive${lib_post}_d${ep_lib_suffix}
|
||||
)
|
||||
ExternalProject_Get_Property (libarchive BINARY_DIR)
|
||||
ExternalProject_Get_Property (libarchive SOURCE_DIR)
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (libarchive)
|
||||
endif ()
|
||||
file (MAKE_DIRECTORY ${SOURCE_DIR}/libarchive)
|
||||
set_target_properties (archive_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/libarchive/${ep_lib_prefix}archive${lib_post}_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/libarchive/${ep_lib_prefix}archive${lib_post}${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/libarchive
|
||||
INTERFACE_COMPILE_DEFINITIONS
|
||||
LIBARCHIVE_STATIC)
|
||||
endif()
|
||||
|
||||
add_dependencies (archive_lib libarchive)
|
||||
target_link_libraries (archive_lib INTERFACE lz4_lib)
|
||||
target_link_libraries (ripple_libs INTERFACE archive_lib)
|
||||
exclude_if_included (libarchive)
|
||||
exclude_if_included (archive_lib)
|
||||
@@ -1,79 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: lz4
|
||||
#]===================================================================]
|
||||
|
||||
add_library (lz4_lib STATIC IMPORTED GLOBAL)
|
||||
|
||||
if (NOT WIN32)
|
||||
find_package(lz4)
|
||||
endif()
|
||||
|
||||
if(lz4)
|
||||
set_target_properties (lz4_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${lz4}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${lz4}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${LZ4_INCLUDE_DIR})
|
||||
|
||||
else()
|
||||
ExternalProject_Add (lz4
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/lz4/lz4.git
|
||||
GIT_TAG v1.9.2
|
||||
SOURCE_SUBDIR contrib/cmake_unofficial
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DBUILD_STATIC_LIBS=ON
|
||||
-DBUILD_SHARED_LIBS=OFF
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP"
|
||||
"-DCMAKE_C_FLAGS_DEBUG=-MTd"
|
||||
"-DCMAKE_C_FLAGS_RELEASE=-MT"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--target lz4_static
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}lz4$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>
|
||||
>
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/${ep_lib_prefix}lz4${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}lz4_d${ep_lib_suffix}
|
||||
)
|
||||
ExternalProject_Get_Property (lz4 BINARY_DIR)
|
||||
ExternalProject_Get_Property (lz4 SOURCE_DIR)
|
||||
|
||||
file (MAKE_DIRECTORY ${SOURCE_DIR}/lz4)
|
||||
set_target_properties (lz4_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/${ep_lib_prefix}lz4_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/${ep_lib_prefix}lz4${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/lib)
|
||||
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (lz4)
|
||||
endif ()
|
||||
add_dependencies (lz4_lib lz4)
|
||||
target_link_libraries (ripple_libs INTERFACE lz4_lib)
|
||||
exclude_if_included (lz4)
|
||||
endif()
|
||||
|
||||
exclude_if_included (lz4_lib)
|
||||
@@ -1,31 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: nudb
|
||||
|
||||
NuDB is header-only, thus is an INTERFACE lib in CMake.
|
||||
TODO: move the library definition into NuDB repo and add
|
||||
proper targets and export/install
|
||||
#]===================================================================]
|
||||
|
||||
if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build
|
||||
add_library (nudb INTERFACE)
|
||||
FetchContent_Declare(
|
||||
nudb_src
|
||||
GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git
|
||||
GIT_TAG 2.0.5
|
||||
)
|
||||
FetchContent_GetProperties(nudb_src)
|
||||
if(NOT nudb_src_POPULATED)
|
||||
message (STATUS "Pausing to download NuDB...")
|
||||
FetchContent_Populate(nudb_src)
|
||||
endif()
|
||||
|
||||
file(TO_CMAKE_PATH "${nudb_src_SOURCE_DIR}" nudb_src_SOURCE_DIR)
|
||||
# specify as system includes so as to avoid warnings
|
||||
target_include_directories (nudb SYSTEM INTERFACE ${nudb_src_SOURCE_DIR}/include)
|
||||
target_link_libraries (nudb
|
||||
INTERFACE
|
||||
Boost::thread
|
||||
Boost::system)
|
||||
add_library (NIH::nudb ALIAS nudb)
|
||||
target_link_libraries (ripple_libs INTERFACE NIH::nudb)
|
||||
endif ()
|
||||
@@ -1,48 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: openssl
|
||||
#]===================================================================]
|
||||
|
||||
#[===============================================[
|
||||
OPENSSL_ROOT_DIR is the only variable that
|
||||
FindOpenSSL honors for locating, so convert any
|
||||
OPENSSL_ROOT vars to this
|
||||
#]===============================================]
|
||||
if (NOT DEFINED OPENSSL_ROOT_DIR)
|
||||
if (DEFINED ENV{OPENSSL_ROOT})
|
||||
set (OPENSSL_ROOT_DIR $ENV{OPENSSL_ROOT})
|
||||
elseif (HOMEBREW)
|
||||
execute_process (COMMAND ${HOMEBREW} --prefix openssl
|
||||
OUTPUT_VARIABLE OPENSSL_ROOT_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
endif ()
|
||||
file (TO_CMAKE_PATH "${OPENSSL_ROOT_DIR}" OPENSSL_ROOT_DIR)
|
||||
endif ()
|
||||
|
||||
if (static)
|
||||
set (OPENSSL_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set (OPENSSL_MSVC_STATIC_RT ON)
|
||||
find_package (OpenSSL 1.1.1 REQUIRED)
|
||||
target_link_libraries (ripple_libs
|
||||
INTERFACE
|
||||
OpenSSL::SSL
|
||||
OpenSSL::Crypto)
|
||||
# disable SSLv2...this can also be done when building/configuring OpenSSL
|
||||
set_target_properties(OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2)
|
||||
#[=========================================================[
|
||||
https://gitlab.kitware.com/cmake/cmake/issues/16885
|
||||
depending on how openssl is built, it might depend
|
||||
on zlib. In fact, the openssl find package should
|
||||
figure this out for us, but it does not currently...
|
||||
so let's add zlib ourselves to the lib list
|
||||
TODO: investigate linking to static zlib for static
|
||||
build option
|
||||
#]=========================================================]
|
||||
find_package (ZLIB)
|
||||
set (has_zlib FALSE)
|
||||
if (TARGET ZLIB::ZLIB)
|
||||
set_target_properties(OpenSSL::Crypto PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
|
||||
set (has_zlib TRUE)
|
||||
endif ()
|
||||
@@ -1,70 +0,0 @@
|
||||
if(reporting)
|
||||
find_package(PostgreSQL)
|
||||
if(NOT PostgreSQL_FOUND)
|
||||
message("find_package did not find postgres")
|
||||
find_library(postgres NAMES pq libpq libpq-dev pq-dev postgresql-devel)
|
||||
find_path(libpq-fe NAMES libpq-fe.h PATH_SUFFIXES postgresql pgsql include)
|
||||
|
||||
if(NOT libpq-fe_FOUND OR NOT postgres_FOUND)
|
||||
message("No system installed Postgres found. Will build")
|
||||
add_library(postgres SHARED IMPORTED GLOBAL)
|
||||
add_library(pgport SHARED IMPORTED GLOBAL)
|
||||
add_library(pgcommon SHARED IMPORTED GLOBAL)
|
||||
ExternalProject_Add(postgres_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/postgres/postgres.git
|
||||
GIT_TAG REL_14_5
|
||||
CONFIGURE_COMMAND ./configure --without-readline > /dev/null
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E env --unset=MAKELEVEL make
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/src/interfaces/libpq/${ep_lib_prefix}pq.a
|
||||
<BINARY_DIR>/src/common/${ep_lib_prefix}pgcommon.a
|
||||
<BINARY_DIR>/src/port/${ep_lib_prefix}pgport.a
|
||||
LOG_BUILD TRUE
|
||||
)
|
||||
ExternalProject_Get_Property (postgres_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (postgres_src BINARY_DIR)
|
||||
|
||||
set (postgres_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${postgres_src_SOURCE_DIR})
|
||||
list(APPEND INCLUDE_DIRS
|
||||
${SOURCE_DIR}/src/include
|
||||
${SOURCE_DIR}/src/interfaces/libpq
|
||||
)
|
||||
set_target_properties(postgres PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/src/interfaces/libpq/${ep_lib_prefix}pq.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${INCLUDE_DIRS}"
|
||||
)
|
||||
set_target_properties(pgcommon PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/src/common/${ep_lib_prefix}pgcommon.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${INCLUDE_DIRS}"
|
||||
)
|
||||
set_target_properties(pgport PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/src/port/${ep_lib_prefix}pgport.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${INCLUDE_DIRS}"
|
||||
)
|
||||
add_dependencies(postgres postgres_src)
|
||||
add_dependencies(pgcommon postgres_src)
|
||||
add_dependencies(pgport postgres_src)
|
||||
file(TO_CMAKE_PATH "${postgres_src_SOURCE_DIR}" postgres_src_SOURCE_DIR)
|
||||
target_link_libraries(ripple_libs INTERFACE postgres pgcommon pgport)
|
||||
else()
|
||||
message("Found system installed Postgres via find_libary")
|
||||
target_include_directories(ripple_libs INTERFACE ${libpq-fe})
|
||||
target_link_libraries(ripple_libs INTERFACE ${postgres})
|
||||
endif()
|
||||
else()
|
||||
message("Found system installed Postgres via find_package")
|
||||
target_include_directories(ripple_libs INTERFACE ${PostgreSQL_INCLUDE_DIRS})
|
||||
target_link_libraries(ripple_libs INTERFACE ${PostgreSQL_LIBRARIES})
|
||||
endif()
|
||||
endif()
|
||||
@@ -1,155 +1,22 @@
|
||||
#[===================================================================[
|
||||
import protobuf (lib and compiler) and create a lib
|
||||
from our proto message definitions. If the system protobuf
|
||||
is not found, fallback on EP to download and build a version
|
||||
from official source.
|
||||
#]===================================================================]
|
||||
find_package(Protobuf 3.8)
|
||||
|
||||
if (static)
|
||||
set (Protobuf_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
find_package (Protobuf 3.8)
|
||||
if (is_multiconfig)
|
||||
set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARIES})
|
||||
else ()
|
||||
string(TOUPPER ${CMAKE_BUILD_TYPE} upper_cmake_build_type)
|
||||
set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARY_${upper_cmake_build_type}})
|
||||
endif ()
|
||||
if (local_protobuf OR NOT (Protobuf_FOUND AND Protobuf_PROTOC_EXECUTABLE AND protobuf_protoc_lib))
|
||||
include (GNUInstallDirs)
|
||||
message (STATUS "using local protobuf build.")
|
||||
set(protobuf_reqs Protobuf_PROTOC_EXECUTABLE protobuf_protoc_lib)
|
||||
foreach(lib ${protobuf_reqs})
|
||||
if(NOT ${lib})
|
||||
message(STATUS "Couldn't find ${lib}")
|
||||
endif()
|
||||
endforeach()
|
||||
if (WIN32)
|
||||
# protobuf prepends lib even on windows
|
||||
set (pbuf_lib_pre "lib")
|
||||
else ()
|
||||
set (pbuf_lib_pre ${ep_lib_prefix})
|
||||
endif ()
|
||||
# for the external project build of protobuf, we currently ignore the
|
||||
# static option and always build static libs here. This is consistent
|
||||
# with our other EP builds. Dynamic libs in an EP would add complexity
|
||||
# because we'd need to get them into the runtime path, and probably
|
||||
# install them.
|
||||
ExternalProject_Add (protobuf_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/protocolbuffers/protobuf.git
|
||||
GIT_TAG v3.8.0
|
||||
SOURCE_SUBDIR cmake
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_INSTALL_PREFIX=<BINARY_DIR>/_installed_
|
||||
-Dprotobuf_BUILD_TESTS=OFF
|
||||
-Dprotobuf_BUILD_EXAMPLES=OFF
|
||||
-Dprotobuf_BUILD_PROTOC_BINARIES=ON
|
||||
-Dprotobuf_MSVC_STATIC_RUNTIME=ON
|
||||
-DBUILD_SHARED_LIBS=OFF
|
||||
-Dprotobuf_BUILD_SHARED_LIBS=OFF
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
-Dprotobuf_DEBUG_POSTFIX=_d
|
||||
-Dprotobuf_WITH_ZLIB=$<IF:$<BOOL:${has_zlib}>,ON,OFF>
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
$<$<BOOL:${unity}>:-DCMAKE_UNITY_BUILD=ON}>
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf${ep_lib_suffix}
|
||||
<BINARY_DIR>/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc${ep_lib_suffix}
|
||||
<BINARY_DIR>/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/_installed_/bin/protoc${CMAKE_EXECUTABLE_SUFFIX}
|
||||
)
|
||||
ExternalProject_Get_Property (protobuf_src BINARY_DIR)
|
||||
ExternalProject_Get_Property (protobuf_src SOURCE_DIR)
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (protobuf_src)
|
||||
endif ()
|
||||
exclude_if_included (protobuf_src)
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/proto_gen)
|
||||
set(ccbd ${CMAKE_CURRENT_BINARY_DIR})
|
||||
set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_BINARY_DIR}/proto_gen)
|
||||
protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS src/ripple/proto/ripple.proto)
|
||||
set(CMAKE_CURRENT_BINARY_DIR ${ccbd})
|
||||
|
||||
if (NOT TARGET protobuf::libprotobuf)
|
||||
add_library (protobuf::libprotobuf STATIC IMPORTED GLOBAL)
|
||||
endif ()
|
||||
file (MAKE_DIRECTORY ${BINARY_DIR}/_installed_/include)
|
||||
set_target_properties (protobuf::libprotobuf PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${BINARY_DIR}/_installed_/include)
|
||||
add_dependencies (protobuf::libprotobuf protobuf_src)
|
||||
exclude_if_included (protobuf::libprotobuf)
|
||||
|
||||
if (NOT TARGET protobuf::libprotoc)
|
||||
add_library (protobuf::libprotoc STATIC IMPORTED GLOBAL)
|
||||
endif ()
|
||||
set_target_properties (protobuf::libprotoc PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${BINARY_DIR}/_installed_/include)
|
||||
add_dependencies (protobuf::libprotoc protobuf_src)
|
||||
exclude_if_included (protobuf::libprotoc)
|
||||
|
||||
if (NOT TARGET protobuf::protoc)
|
||||
add_executable (protobuf::protoc IMPORTED)
|
||||
exclude_if_included (protobuf::protoc)
|
||||
endif ()
|
||||
set_target_properties (protobuf::protoc PROPERTIES
|
||||
IMPORTED_LOCATION "${BINARY_DIR}/_installed_/bin/protoc${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
add_dependencies (protobuf::protoc protobuf_src)
|
||||
else ()
|
||||
if (NOT TARGET protobuf::protoc)
|
||||
if (EXISTS "${Protobuf_PROTOC_EXECUTABLE}")
|
||||
add_executable (protobuf::protoc IMPORTED)
|
||||
set_target_properties (protobuf::protoc PROPERTIES
|
||||
IMPORTED_LOCATION "${Protobuf_PROTOC_EXECUTABLE}")
|
||||
else ()
|
||||
message (FATAL_ERROR "Protobuf import failed")
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
file (MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/proto_gen)
|
||||
set (save_CBD ${CMAKE_CURRENT_BINARY_DIR})
|
||||
set (CMAKE_CURRENT_BINARY_DIR ${CMAKE_BINARY_DIR}/proto_gen)
|
||||
protobuf_generate_cpp (
|
||||
PROTO_SRCS
|
||||
PROTO_HDRS
|
||||
src/ripple/proto/ripple.proto)
|
||||
set (CMAKE_CURRENT_BINARY_DIR ${save_CBD})
|
||||
|
||||
add_library (pbufs STATIC ${PROTO_SRCS} ${PROTO_HDRS})
|
||||
|
||||
target_include_directories (pbufs PRIVATE src)
|
||||
target_include_directories (pbufs
|
||||
SYSTEM PUBLIC ${CMAKE_BINARY_DIR}/proto_gen)
|
||||
target_link_libraries (pbufs protobuf::libprotobuf)
|
||||
target_compile_options (pbufs
|
||||
add_library(pbufs STATIC ${PROTO_SRCS} ${PROTO_HDRS})
|
||||
target_include_directories(pbufs SYSTEM PUBLIC
|
||||
${CMAKE_BINARY_DIR}/proto_gen
|
||||
${CMAKE_BINARY_DIR}/proto_gen/src/ripple/proto
|
||||
)
|
||||
target_link_libraries(pbufs protobuf::libprotobuf)
|
||||
target_compile_options(pbufs
|
||||
PUBLIC
|
||||
$<$<BOOL:${is_xcode}>:
|
||||
$<$<BOOL:${XCODE}>:
|
||||
--system-header-prefix="google/protobuf"
|
||||
-Wno-deprecated-dynamic-exception-spec
|
||||
>)
|
||||
add_library (Ripple::pbufs ALIAS pbufs)
|
||||
target_link_libraries (ripple_libs INTERFACE Ripple::pbufs)
|
||||
exclude_if_included (pbufs)
|
||||
>
|
||||
)
|
||||
add_library(Ripple::pbufs ALIAS pbufs)
|
||||
@@ -1,177 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: rocksdb
|
||||
#]===================================================================]
|
||||
|
||||
add_library (rocksdb_lib UNKNOWN IMPORTED GLOBAL)
|
||||
set_target_properties (rocksdb_lib
|
||||
PROPERTIES INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1)
|
||||
|
||||
option (local_rocksdb "use local build of rocksdb." OFF)
|
||||
if (NOT local_rocksdb)
|
||||
find_package (RocksDB 6.27 QUIET CONFIG)
|
||||
if (TARGET RocksDB::rocksdb)
|
||||
message (STATUS "Found RocksDB using config.")
|
||||
get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION_DEBUG)
|
||||
if (_rockslib_l)
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_DEBUG ${_rockslib_l})
|
||||
endif ()
|
||||
get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION_RELEASE)
|
||||
if (_rockslib_l)
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_RELEASE ${_rockslib_l})
|
||||
endif ()
|
||||
get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION)
|
||||
if (_rockslib_l)
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION ${_rockslib_l})
|
||||
endif ()
|
||||
get_target_property (_rockslib_i RocksDB::rocksdb INTERFACE_INCLUDE_DIRECTORIES)
|
||||
if (_rockslib_i)
|
||||
set_target_properties (rocksdb_lib PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${_rockslib_i})
|
||||
endif ()
|
||||
target_link_libraries (ripple_libs INTERFACE RocksDB::rocksdb)
|
||||
else ()
|
||||
# using a find module with rocksdb is difficult because
|
||||
# you have no idea how it was configured (transitive dependencies).
|
||||
# the code below will generally find rocksdb using the module, but
|
||||
# will then result in linker errors for static linkage since the
|
||||
# transitive dependencies are unknown. force local build here for now, but leave the code as
|
||||
# a placeholder for future investigation.
|
||||
if (static)
|
||||
set (local_rocksdb ON CACHE BOOL "" FORCE)
|
||||
# TBD if there is some way to extract transitive deps..then:
|
||||
#set (RocksDB_USE_STATIC ON)
|
||||
else ()
|
||||
find_package (RocksDB 6.27 MODULE)
|
||||
if (ROCKSDB_FOUND)
|
||||
if (RocksDB_LIBRARY_DEBUG)
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_DEBUG ${RocksDB_LIBRARY_DEBUG})
|
||||
endif ()
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_RELEASE ${RocksDB_LIBRARIES})
|
||||
set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION ${RocksDB_LIBRARIES})
|
||||
set_target_properties (rocksdb_lib PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${RocksDB_INCLUDE_DIRS})
|
||||
else ()
|
||||
set (local_rocksdb ON CACHE BOOL "" FORCE)
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (local_rocksdb)
|
||||
message (STATUS "Using local build of RocksDB.")
|
||||
ExternalProject_Add (rocksdb
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
|
||||
GIT_TAG v6.27.3
|
||||
PATCH_COMMAND
|
||||
# only used by windows build
|
||||
${CMAKE_COMMAND} -E copy_if_different
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocks_thirdparty.inc
|
||||
<SOURCE_DIR>/thirdparty.inc
|
||||
COMMAND
|
||||
# fixup their build version file to keep the values
|
||||
# from changing always
|
||||
${CMAKE_COMMAND} -E copy_if_different
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocksdb_build_version.cc.in
|
||||
<SOURCE_DIR>/util/build_version.cc.in
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
$<$<BOOL:${unity}>:-DCMAKE_UNITY_BUILD=ON}>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DBUILD_SHARED_LIBS=OFF
|
||||
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
|
||||
-DWITH_JEMALLOC=$<IF:$<BOOL:${jemalloc}>,ON,OFF>
|
||||
-DWITH_SNAPPY=ON
|
||||
-DWITH_LZ4=ON
|
||||
-DWITH_ZLIB=OFF
|
||||
-DUSE_RTTI=ON
|
||||
-DWITH_ZSTD=OFF
|
||||
-DWITH_GFLAGS=OFF
|
||||
-DWITH_BZ2=OFF
|
||||
-ULZ4_*
|
||||
-Ulz4_*
|
||||
-Dlz4_INCLUDE_DIRS=$<JOIN:$<TARGET_PROPERTY:lz4_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-Dlz4_LIBRARIES=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:lz4_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:lz4_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-Dlz4_FOUND=ON
|
||||
-USNAPPY_*
|
||||
-Usnappy_*
|
||||
-USnappy_*
|
||||
-Dsnappy_INCLUDE_DIRS=$<JOIN:$<TARGET_PROPERTY:snappy_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-Dsnappy_LIBRARIES=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-Dsnappy_FOUND=ON
|
||||
-DSnappy_INCLUDE_DIRS=$<JOIN:$<TARGET_PROPERTY:snappy_lib,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-DSnappy_LIBRARIES=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:snappy_lib,IMPORTED_LOCATION_RELEASE>>
|
||||
-DSnappy_FOUND=ON
|
||||
-DWITH_MD_LIBRARY=OFF
|
||||
-DWITH_RUNTIME_DEBUG=$<IF:$<CONFIG:Debug>,ON,OFF>
|
||||
-DFAIL_ON_WARNINGS=OFF
|
||||
-DWITH_ASAN=OFF
|
||||
-DWITH_TSAN=OFF
|
||||
-DWITH_UBSAN=OFF
|
||||
-DWITH_NUMA=OFF
|
||||
-DWITH_TBB=OFF
|
||||
-DWITH_WINDOWS_UTF8_FILENAMES=OFF
|
||||
-DWITH_XPRESS=OFF
|
||||
-DPORTABLE=ON
|
||||
-DFORCE_SSE42=OFF
|
||||
-DDISABLE_STALL_NOTIF=OFF
|
||||
-DOPTDBG=ON
|
||||
-DROCKSDB_LITE=OFF
|
||||
-DWITH_FALLOCATE=ON
|
||||
-DWITH_LIBRADOS=OFF
|
||||
-DWITH_JNI=OFF
|
||||
-DROCKSDB_INSTALL_ON_WINDOWS=OFF
|
||||
-DWITH_TESTS=OFF
|
||||
-DWITH_TOOLS=OFF
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -MP /DNDEBUG"
|
||||
>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:
|
||||
"-DCMAKE_CXX_FLAGS=-DNDEBUG"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}rocksdb$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>
|
||||
>
|
||||
LIST_SEPARATOR ::
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
DEPENDS snappy_lib lz4_lib
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/${ep_lib_prefix}rocksdb${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}rocksdb_d${ep_lib_suffix}
|
||||
)
|
||||
ExternalProject_Get_Property (rocksdb BINARY_DIR)
|
||||
ExternalProject_Get_Property (rocksdb SOURCE_DIR)
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (rocksdb)
|
||||
endif ()
|
||||
file (MAKE_DIRECTORY ${SOURCE_DIR}/include)
|
||||
set_target_properties (rocksdb_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/${ep_lib_prefix}rocksdb_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/${ep_lib_prefix}rocksdb${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies (rocksdb_lib rocksdb)
|
||||
exclude_if_included (rocksdb)
|
||||
endif ()
|
||||
|
||||
target_link_libraries (rocksdb_lib
|
||||
INTERFACE
|
||||
snappy_lib
|
||||
lz4_lib
|
||||
$<$<BOOL:${MSVC}>:rpcrt4>)
|
||||
exclude_if_included (rocksdb_lib)
|
||||
target_link_libraries (ripple_libs INTERFACE rocksdb_lib)
|
||||
@@ -1,58 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: secp256k1
|
||||
#]===================================================================]
|
||||
|
||||
add_library (secp256k1_lib STATIC IMPORTED GLOBAL)
|
||||
|
||||
if (NOT WIN32)
|
||||
find_package(secp256k1)
|
||||
endif()
|
||||
|
||||
if(secp256k1)
|
||||
set_target_properties (secp256k1_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${secp256k1}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${secp256k1}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SECP256K1_INCLUDE_DIR})
|
||||
|
||||
add_library (secp256k1 ALIAS secp256k1_lib)
|
||||
add_library (NIH::secp256k1 ALIAS secp256k1_lib)
|
||||
|
||||
else()
|
||||
set(INSTALL_SECP256K1 true)
|
||||
|
||||
add_library (secp256k1 STATIC
|
||||
src/secp256k1/src/secp256k1.c)
|
||||
target_compile_definitions (secp256k1
|
||||
PRIVATE
|
||||
USE_NUM_NONE
|
||||
USE_FIELD_10X26
|
||||
USE_FIELD_INV_BUILTIN
|
||||
USE_SCALAR_8X32
|
||||
USE_SCALAR_INV_BUILTIN)
|
||||
target_include_directories (secp256k1
|
||||
PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
|
||||
$<INSTALL_INTERFACE:include>
|
||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src/secp256k1)
|
||||
target_compile_options (secp256k1
|
||||
PRIVATE
|
||||
$<$<BOOL:${MSVC}>:-wd4319>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-unused-function
|
||||
>
|
||||
$<$<BOOL:${is_gcc}>:-Wno-nonnull-compare>)
|
||||
target_link_libraries (ripple_libs INTERFACE NIH::secp256k1)
|
||||
#[===========================[
|
||||
headers installation
|
||||
#]===========================]
|
||||
install (
|
||||
FILES
|
||||
src/secp256k1/include/secp256k1.h
|
||||
DESTINATION include/secp256k1/include)
|
||||
|
||||
add_library (NIH::secp256k1 ALIAS secp256k1)
|
||||
endif()
|
||||
@@ -1,77 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: snappy
|
||||
#]===================================================================]
|
||||
|
||||
add_library (snappy_lib STATIC IMPORTED GLOBAL)
|
||||
|
||||
if (NOT WIN32)
|
||||
find_package(snappy)
|
||||
endif()
|
||||
|
||||
if(snappy)
|
||||
set_target_properties (snappy_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${snappy}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${snappy}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SNAPPY_INCLUDE_DIR})
|
||||
|
||||
else()
|
||||
ExternalProject_Add (snappy
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/google/snappy.git
|
||||
GIT_TAG 1.1.7
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DBUILD_SHARED_LIBS=OFF
|
||||
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
|
||||
-DSNAPPY_BUILD_TESTS=OFF
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP"
|
||||
"-DCMAKE_CXX_FLAGS_DEBUG=-MTd"
|
||||
"-DCMAKE_CXX_FLAGS_RELEASE=-MT"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}snappy$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>
|
||||
>
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E copy_if_different <BINARY_DIR>/config.h <BINARY_DIR>/snappy-stubs-public.h <SOURCE_DIR>
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/${ep_lib_prefix}snappy${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}snappy_d${ep_lib_suffix}
|
||||
)
|
||||
ExternalProject_Get_Property (snappy BINARY_DIR)
|
||||
ExternalProject_Get_Property (snappy SOURCE_DIR)
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (snappy)
|
||||
endif ()
|
||||
file (MAKE_DIRECTORY ${SOURCE_DIR}/snappy)
|
||||
set_target_properties (snappy_lib PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/${ep_lib_prefix}snappy_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/${ep_lib_prefix}snappy${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
add_dependencies (snappy_lib snappy)
|
||||
target_link_libraries (ripple_libs INTERFACE snappy_lib)
|
||||
exclude_if_included (snappy)
|
||||
exclude_if_included (snappy_lib)
|
||||
@@ -1,165 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: soci
|
||||
#]===================================================================]
|
||||
|
||||
foreach (_comp core empty sqlite3)
|
||||
add_library ("soci_${_comp}" STATIC IMPORTED GLOBAL)
|
||||
endforeach ()
|
||||
|
||||
if (NOT WIN32)
|
||||
find_package(soci)
|
||||
endif()
|
||||
|
||||
if (soci)
|
||||
foreach (_comp core empty sqlite3)
|
||||
set_target_properties ("soci_${_comp}" PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${soci}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${soci}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOCI_INCLUDE_DIR})
|
||||
endforeach ()
|
||||
|
||||
else()
|
||||
set (soci_lib_pre ${ep_lib_prefix})
|
||||
set (soci_lib_post "")
|
||||
if (WIN32)
|
||||
# for some reason soci on windows still prepends lib (non-standard)
|
||||
set (soci_lib_pre lib)
|
||||
# this version in the name might change if/when we change versions of soci
|
||||
set (soci_lib_post "_4_0")
|
||||
endif ()
|
||||
get_target_property (_boost_incs Boost::date_time INTERFACE_INCLUDE_DIRECTORIES)
|
||||
get_target_property (_boost_dt Boost::date_time IMPORTED_LOCATION)
|
||||
if (NOT _boost_dt)
|
||||
get_target_property (_boost_dt Boost::date_time IMPORTED_LOCATION_RELEASE)
|
||||
endif ()
|
||||
if (NOT _boost_dt)
|
||||
get_target_property (_boost_dt Boost::date_time IMPORTED_LOCATION_DEBUG)
|
||||
endif ()
|
||||
|
||||
ExternalProject_Add (soci
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/SOCI/soci.git
|
||||
GIT_TAG 04e1870294918d20761736743bb6136314c42dd5
|
||||
# We had an issue with soci integer range checking for boost::optional
|
||||
# and needed to remove the exception that SOCI throws in this case.
|
||||
# This is *probably* a bug in SOCI, but has never been investigated more
|
||||
# nor reported to the maintainers.
|
||||
# This cmake script comments out the lines in question.
|
||||
# This patch process is likely fragile and should be reviewed carefully
|
||||
# whenever we update the GIT_TAG above.
|
||||
PATCH_COMMAND
|
||||
${CMAKE_COMMAND} -D RIPPLED_SOURCE=${CMAKE_CURRENT_SOURCE_DIR}
|
||||
-P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/soci_patch.cmake
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
$<$<BOOL:${CMAKE_TOOLCHAIN_FILE}>:-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}>
|
||||
$<$<BOOL:${VCPKG_TARGET_TRIPLET}>:-DVCPKG_TARGET_TRIPLET=${VCPKG_TARGET_TRIPLET}>
|
||||
$<$<BOOL:${unity}>:-DCMAKE_UNITY_BUILD=ON}>
|
||||
-DCMAKE_PREFIX_PATH=${CMAKE_BINARY_DIR}/sqlite3
|
||||
-DCMAKE_MODULE_PATH=${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake
|
||||
-DCMAKE_INCLUDE_PATH=$<JOIN:$<TARGET_PROPERTY:sqlite,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-DCMAKE_LIBRARY_PATH=${sqlite_BINARY_DIR}
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DSOCI_CXX_C11=ON
|
||||
-DSOCI_STATIC=ON
|
||||
-DSOCI_LIBDIR=lib
|
||||
-DSOCI_SHARED=OFF
|
||||
-DSOCI_TESTS=OFF
|
||||
# hacks to workaround the fact that soci doesn't currently use
|
||||
# boost imported targets in its cmake. If they switch to
|
||||
# proper imported targets, this next line can be removed
|
||||
# (as well as the get_property above that sets _boost_incs)
|
||||
-DBoost_INCLUDE_DIRS=$<JOIN:${_boost_incs},::>
|
||||
-DBoost_INCLUDE_DIR=$<JOIN:${_boost_incs},::>
|
||||
-DBOOST_ROOT=${BOOST_ROOT}
|
||||
-DWITH_BOOST=ON
|
||||
-DBoost_FOUND=ON
|
||||
-DBoost_NO_BOOST_CMAKE=ON
|
||||
-DBoost_DATE_TIME_FOUND=ON
|
||||
-DSOCI_HAVE_BOOST=ON
|
||||
-DSOCI_HAVE_BOOST_DATE_TIME=ON
|
||||
-DBoost_DATE_TIME_LIBRARY=${_boost_dt}
|
||||
-DSOCI_DB2=OFF
|
||||
-DSOCI_FIREBIRD=OFF
|
||||
-DSOCI_MYSQL=OFF
|
||||
-DSOCI_ODBC=OFF
|
||||
-DSOCI_ORACLE=OFF
|
||||
-DSOCI_POSTGRESQL=OFF
|
||||
-DSOCI_SQLITE3=ON
|
||||
-DSQLITE3_INCLUDE_DIR=$<JOIN:$<TARGET_PROPERTY:sqlite,INTERFACE_INCLUDE_DIRECTORIES>,::>
|
||||
-DSQLITE3_LIBRARY=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:sqlite,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:sqlite,IMPORTED_LOCATION_RELEASE>>
|
||||
$<$<BOOL:${APPLE}>:-DCMAKE_FIND_FRAMEWORK=LAST>
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP"
|
||||
"-DCMAKE_CXX_FLAGS_DEBUG=-MTd"
|
||||
"-DCMAKE_CXX_FLAGS_RELEASE=-MT"
|
||||
>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:
|
||||
"-DCMAKE_CXX_FLAGS=-Wno-deprecated-declarations"
|
||||
>
|
||||
# SEE: https://github.com/SOCI/soci/issues/640
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<VERSION_GREATER_EQUAL:${CMAKE_CXX_COMPILER_VERSION},8>>:
|
||||
"-DCMAKE_CXX_FLAGS=-Wno-deprecated-declarations -Wno-error=format-overflow -Wno-format-overflow -Wno-error=format-truncation"
|
||||
>
|
||||
LIST_SEPARATOR ::
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
<BINARY_DIR>/lib/$<CONFIG>/${soci_lib_pre}soci_core${soci_lib_post}$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib/$<CONFIG>/${soci_lib_pre}soci_empty${soci_lib_post}$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib/$<CONFIG>/${soci_lib_pre}soci_sqlite3${soci_lib_post}$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib
|
||||
>
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
DEPENDS sqlite
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/lib/${soci_lib_pre}soci_core${soci_lib_post}${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib/${soci_lib_pre}soci_core${soci_lib_post}_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib/${soci_lib_pre}soci_empty${soci_lib_post}${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib/${soci_lib_pre}soci_empty${soci_lib_post}_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib/${soci_lib_pre}soci_sqlite3${soci_lib_post}${ep_lib_suffix}
|
||||
<BINARY_DIR>/lib/${soci_lib_pre}soci_sqlite3${soci_lib_post}_d${ep_lib_suffix}
|
||||
)
|
||||
ExternalProject_Get_Property (soci BINARY_DIR)
|
||||
ExternalProject_Get_Property (soci SOURCE_DIR)
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (soci)
|
||||
endif ()
|
||||
file (MAKE_DIRECTORY ${SOURCE_DIR}/include)
|
||||
file (MAKE_DIRECTORY ${BINARY_DIR}/include)
|
||||
foreach (_comp core empty sqlite3)
|
||||
set_target_properties ("soci_${_comp}" PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/lib/${soci_lib_pre}soci_${_comp}${soci_lib_post}_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/lib/${soci_lib_pre}soci_${_comp}${soci_lib_post}${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${SOURCE_DIR}/include;${BINARY_DIR}/include")
|
||||
add_dependencies ("soci_${_comp}" soci) # something has to depend on the ExternalProject to trigger it
|
||||
target_link_libraries (ripple_libs INTERFACE "soci_${_comp}")
|
||||
if (NOT _comp STREQUAL "core")
|
||||
target_link_libraries ("soci_${_comp}" INTERFACE soci_core)
|
||||
endif ()
|
||||
endforeach ()
|
||||
endif()
|
||||
|
||||
foreach (_comp core empty sqlite3)
|
||||
exclude_if_included ("soci_${_comp}")
|
||||
endforeach ()
|
||||
|
||||
|
||||
exclude_if_included (soci)
|
||||
@@ -1,93 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: sqlite
|
||||
#]===================================================================]
|
||||
|
||||
add_library (sqlite STATIC IMPORTED GLOBAL)
|
||||
|
||||
if (NOT WIN32)
|
||||
find_package(sqlite)
|
||||
endif()
|
||||
|
||||
|
||||
if(sqlite3)
|
||||
set_target_properties (sqlite PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${sqlite3}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${sqlite3}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SQLITE_INCLUDE_DIR})
|
||||
|
||||
else()
|
||||
ExternalProject_Add (sqlite3
|
||||
PREFIX ${nih_cache_path}
|
||||
# sqlite doesn't use git, but it provides versioned tarballs
|
||||
URL https://www.sqlite.org/2018/sqlite-amalgamation-3260000.zip
|
||||
http://www.sqlite.org/2018/sqlite-amalgamation-3260000.zip
|
||||
https://www2.sqlite.org/2018/sqlite-amalgamation-3260000.zip
|
||||
http://www2.sqlite.org/2018/sqlite-amalgamation-3260000.zip
|
||||
# ^^^ version is apparent in the URL: 3260000 => 3.26.0
|
||||
URL_HASH SHA256=de5dcab133aa339a4cf9e97c40aa6062570086d6085d8f9ad7bc6ddf8a52096e
|
||||
# Don't need to worry about MITM attacks too much because the download
|
||||
# is checked against a strong hash
|
||||
TLS_VERIFY false
|
||||
# we wrote a very simple CMake file to build sqlite
|
||||
# so that's what we copy here so that we can build with
|
||||
# CMake. sqlite doesn't generally provided a build system
|
||||
# for the single amalgamation source file.
|
||||
PATCH_COMMAND
|
||||
${CMAKE_COMMAND} -E copy_if_different
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/CMake_sqlite3.txt
|
||||
<SOURCE_DIR>/CMakeLists.txt
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP"
|
||||
"-DCMAKE_C_FLAGS_DEBUG=-MTd"
|
||||
"-DCMAKE_C_FLAGS_RELEASE=-MT"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}sqlite3$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>
|
||||
>
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/${ep_lib_prefix}sqlite3${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}sqlite3_d${ep_lib_suffix}
|
||||
)
|
||||
ExternalProject_Get_Property (sqlite3 BINARY_DIR)
|
||||
ExternalProject_Get_Property (sqlite3 SOURCE_DIR)
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (sqlite3)
|
||||
endif ()
|
||||
|
||||
set_target_properties (sqlite PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/${ep_lib_prefix}sqlite3_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/${ep_lib_prefix}sqlite3${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR})
|
||||
|
||||
add_dependencies (sqlite sqlite3)
|
||||
exclude_if_included (sqlite3)
|
||||
endif()
|
||||
|
||||
target_link_libraries (sqlite INTERFACE $<$<NOT:$<BOOL:${MSVC}>>:dl>)
|
||||
target_link_libraries (ripple_libs INTERFACE sqlite)
|
||||
exclude_if_included (sqlite)
|
||||
set(sqlite_BINARY_DIR ${BINARY_DIR})
|
||||
@@ -1,84 +1 @@
|
||||
#[===================================================================[
|
||||
NIH dep: wasmedge: web assembly runtime for hooks.
|
||||
#]===================================================================]
|
||||
|
||||
find_package(Curses)
|
||||
if(CURSES_FOUND)
|
||||
include_directories(${CURSES_INCLUDE_DIR})
|
||||
target_link_libraries(ripple_libs INTERFACE ${CURSES_LIBRARY})
|
||||
else()
|
||||
message(WARNING "CURSES library not found... (only important for mac builds)")
|
||||
endif()
|
||||
|
||||
|
||||
find_package(LLVM REQUIRED CONFIG)
|
||||
message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}")
|
||||
message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
|
||||
ExternalProject_Add (wasmedge_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/WasmEdge/WasmEdge.git
|
||||
GIT_TAG 0.11.2
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
-DWASMEDGE_BUILD_SHARED_LIB=OFF
|
||||
-DWASMEDGE_BUILD_STATIC_LIB=ON
|
||||
-DWASMEDGE_BUILD_AOT_RUNTIME=ON
|
||||
-DWASMEDGE_FORCE_DISABLE_LTO=ON
|
||||
-DWASMEDGE_LINK_LLVM_STATIC=ON
|
||||
-DWASMEDGE_LINK_TOOLS_STATIC=ON
|
||||
-DWASMEDGE_BUILD_PLUGINS=OFF
|
||||
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
|
||||
-DLLVM_DIR=${LLVM_DIR}
|
||||
-DLLVM_LIBRARY_DIR=${LLVM_LIBRARY_DIR}
|
||||
-DLLVM_ENABLE_TERMINFO=OFF
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP -march=native"
|
||||
"-DCMAKE_C_FLAGS_DEBUG=-MTd"
|
||||
"-DCMAKE_C_FLAGS_RELEASE=-MT"
|
||||
>
|
||||
LOG_CONFIGURE ON
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
COMMAND
|
||||
pwd
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
$<$<VERSION_GREATER_EQUAL:${CMAKE_VERSION},3.12>:--parallel ${ep_procs}>
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/lib/api/libwasmedge.a
|
||||
)
|
||||
add_library (wasmedge STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Get_Property (wasmedge_src BINARY_DIR)
|
||||
ExternalProject_Get_Property (wasmedge_src SOURCE_DIR)
|
||||
set (wasmedge_src_BINARY_DIR "${BINARY_DIR}")
|
||||
add_dependencies (wasmedge wasmedge_src)
|
||||
execute_process(
|
||||
COMMAND
|
||||
mkdir -p "${wasmedge_src_BINARY_DIR}/include/api"
|
||||
)
|
||||
set_target_properties (wasmedge PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
"${wasmedge_src_BINARY_DIR}/lib/api/libwasmedge.a"
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
"${wasmedge_src_BINARY_DIR}/lib/api/libwasmedge.a"
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${wasmedge_src_BINARY_DIR}/include/api/"
|
||||
)
|
||||
target_link_libraries (ripple_libs INTERFACE wasmedge)
|
||||
#RH NOTE: some compilers / versions of some libraries need these, most don't
|
||||
|
||||
find_library(XAR_LIBRARY NAMES xar)
|
||||
if(XAR_LIBRARY)
|
||||
target_link_libraries(ripple_libs INTERFACE ${XAR_LIBRARY})
|
||||
else()
|
||||
message(WARNING "xar library not found... (only important for mac builds)")
|
||||
endif()
|
||||
add_library (wasmedge::wasmedge ALIAS wasmedge)
|
||||
find_package(wasmedge REQUIRED)
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
if(reporting)
|
||||
find_library(cassandra NAMES cassandra)
|
||||
if(NOT cassandra)
|
||||
|
||||
message("System installed Cassandra cpp driver not found. Will build")
|
||||
|
||||
find_library(zlib NAMES zlib1g-dev zlib-devel zlib z)
|
||||
if(NOT zlib)
|
||||
message("zlib not found. will build")
|
||||
add_library(zlib STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(zlib_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/madler/zlib.git
|
||||
GIT_TAG v1.2.12
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${ep_lib_prefix}z.a
|
||||
LOG_BUILD TRUE
|
||||
LOG_CONFIGURE TRUE
|
||||
)
|
||||
|
||||
|
||||
ExternalProject_Get_Property (zlib_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (zlib_src BINARY_DIR)
|
||||
set (zlib_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (zlib PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${ep_lib_prefix}z.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(zlib zlib_src)
|
||||
|
||||
file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
find_library(krb5 NAMES krb5-dev libkrb5-dev)
|
||||
|
||||
if(NOT krb5)
|
||||
message("krb5 not found. will build")
|
||||
add_library(krb5 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(krb5_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
||||
GIT_TAG krb5-1.20-final
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared > /dev/null
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <SOURCE_DIR>/lib/${ep_lib_prefix}krb5.a
|
||||
LOG_BUILD TRUE
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (krb5_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (krb5_src BINARY_DIR)
|
||||
set (krb5_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (krb5 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/lib/${ep_lib_prefix}krb5.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(krb5 krb5_src)
|
||||
|
||||
file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR)
|
||||
endif()
|
||||
|
||||
|
||||
find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64)
|
||||
|
||||
|
||||
if(NOT libuv1)
|
||||
message("libuv1 not found, will build")
|
||||
add_library(libuv1 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(libuv_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||
GIT_TAG v1.44.2
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${ep_lib_prefix}uv_a.a
|
||||
LOG_BUILD TRUE
|
||||
LOG_CONFIGURE TRUE
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (libuv_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (libuv_src BINARY_DIR)
|
||||
set (libuv_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (libuv1 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${ep_lib_prefix}uv_a.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(libuv1 libuv_src)
|
||||
|
||||
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
|
||||
endif()
|
||||
|
||||
add_library (cassandra STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(cassandra_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
|
||||
GIT_TAG 2.16.2
|
||||
CMAKE_ARGS
|
||||
-DLIBUV_ROOT_DIR=${BINARY_DIR}
|
||||
-DLIBUV_LIBARY=${BINARY_DIR}/libuv_a.a
|
||||
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
||||
-DCASS_BUILD_STATIC=ON
|
||||
-DCASS_BUILD_SHARED=OFF
|
||||
-DOPENSSL_ROOT_DIR=/opt/local/openssl
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${ep_lib_prefix}cassandra_static.a
|
||||
LOG_BUILD TRUE
|
||||
LOG_CONFIGURE TRUE
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (cassandra_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (cassandra_src BINARY_DIR)
|
||||
set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (cassandra PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${ep_lib_prefix}cassandra_static.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(cassandra cassandra_src)
|
||||
|
||||
if(NOT libuv1)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build libuv1)
|
||||
target_link_libraries(cassandra INTERFACE libuv1)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${libuv1})
|
||||
endif()
|
||||
if(NOT krb5)
|
||||
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build krb5)
|
||||
target_link_libraries(cassandra INTERFACE krb5)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${krb5})
|
||||
endif()
|
||||
|
||||
if(NOT zlib)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build zlib)
|
||||
target_link_libraries(cassandra INTERFACE zlib)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${zlib})
|
||||
endif()
|
||||
|
||||
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
|
||||
target_link_libraries(ripple_libs INTERFACE cassandra)
|
||||
else()
|
||||
message("Found system installed cassandra cpp driver")
|
||||
|
||||
find_path(cassandra_includes NAMES cassandra.h REQUIRED)
|
||||
target_link_libraries (ripple_libs INTERFACE ${cassandra})
|
||||
target_include_directories(ripple_libs INTERFACE ${cassandra_includes})
|
||||
endif()
|
||||
|
||||
exclude_if_included (cassandra)
|
||||
endif()
|
||||
@@ -1,18 +0,0 @@
|
||||
#[===================================================================[
|
||||
NIH dep: date
|
||||
|
||||
the main library is header-only, thus is an INTERFACE lib in CMake.
|
||||
|
||||
NOTE: this has been accepted into c++20 so can likely be replaced
|
||||
when we update to that standard
|
||||
#]===================================================================]
|
||||
|
||||
find_package (date QUIET)
|
||||
if (NOT TARGET date::date)
|
||||
FetchContent_Declare(
|
||||
hh_date_src
|
||||
GIT_REPOSITORY https://github.com/HowardHinnant/date.git
|
||||
GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829
|
||||
)
|
||||
FetchContent_MakeAvailable(hh_date_src)
|
||||
endif ()
|
||||
@@ -1,324 +1,15 @@
|
||||
|
||||
# currently linking to unsecure versions...if we switch, we'll
|
||||
# need to add ssl as a link dependency to the grpc targets
|
||||
option (use_secure_grpc "use TLS version of grpc libs." OFF)
|
||||
if (use_secure_grpc)
|
||||
set (grpc_suffix "")
|
||||
else ()
|
||||
set (grpc_suffix "_unsecure")
|
||||
endif ()
|
||||
|
||||
find_package (gRPC 1.23 CONFIG QUIET)
|
||||
if (TARGET gRPC::gpr AND NOT local_grpc)
|
||||
get_target_property (_grpc_l gRPC::gpr IMPORTED_LOCATION_DEBUG)
|
||||
if (NOT _grpc_l)
|
||||
get_target_property (_grpc_l gRPC::gpr IMPORTED_LOCATION_RELEASE)
|
||||
endif ()
|
||||
if (NOT _grpc_l)
|
||||
get_target_property (_grpc_l gRPC::gpr IMPORTED_LOCATION)
|
||||
endif ()
|
||||
message (STATUS "Found cmake config for gRPC. Using ${_grpc_l}.")
|
||||
else ()
|
||||
find_package (PkgConfig QUIET)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
pkg_check_modules (grpc QUIET "grpc${grpc_suffix}>=1.25" "grpc++${grpc_suffix}" gpr)
|
||||
endif ()
|
||||
|
||||
if (grpc_FOUND)
|
||||
message (STATUS "Found gRPC using pkg-config. Using ${grpc_gpr_PREFIX}.")
|
||||
endif ()
|
||||
|
||||
add_executable (gRPC::grpc_cpp_plugin IMPORTED)
|
||||
exclude_if_included (gRPC::grpc_cpp_plugin)
|
||||
|
||||
if (grpc_FOUND AND NOT local_grpc)
|
||||
# use installed grpc (via pkg-config)
|
||||
macro (add_imported_grpc libname_)
|
||||
if (static)
|
||||
set (_search "${CMAKE_STATIC_LIBRARY_PREFIX}${libname_}${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
else ()
|
||||
set (_search "${CMAKE_SHARED_LIBRARY_PREFIX}${libname_}${CMAKE_SHARED_LIBRARY_SUFFIX}")
|
||||
endif()
|
||||
find_library(_found_${libname_}
|
||||
NAMES ${_search}
|
||||
HINTS ${grpc_LIBRARY_DIRS})
|
||||
if (_found_${libname_})
|
||||
message (STATUS "importing ${libname_} as ${_found_${libname_}}")
|
||||
else ()
|
||||
message (FATAL_ERROR "using pkg-config for grpc, can't find ${_search}")
|
||||
endif ()
|
||||
add_library ("gRPC::${libname_}" STATIC IMPORTED GLOBAL)
|
||||
set_target_properties ("gRPC::${libname_}" PROPERTIES IMPORTED_LOCATION ${_found_${libname_}})
|
||||
if (grpc_INCLUDE_DIRS)
|
||||
set_target_properties ("gRPC::${libname_}" PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${grpc_INCLUDE_DIRS})
|
||||
endif ()
|
||||
target_link_libraries (ripple_libs INTERFACE "gRPC::${libname_}")
|
||||
exclude_if_included ("gRPC::${libname_}")
|
||||
endmacro ()
|
||||
|
||||
set_target_properties (gRPC::grpc_cpp_plugin PROPERTIES
|
||||
IMPORTED_LOCATION "${grpc_gpr_PREFIX}/bin/grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
|
||||
pkg_check_modules (cares QUIET libcares)
|
||||
if (cares_FOUND)
|
||||
if (static)
|
||||
set (_search "${CMAKE_STATIC_LIBRARY_PREFIX}cares${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
set (_prefix cares_STATIC)
|
||||
set (_static STATIC)
|
||||
else ()
|
||||
set (_search "${CMAKE_SHARED_LIBRARY_PREFIX}cares${CMAKE_SHARED_LIBRARY_SUFFIX}")
|
||||
set (_prefix cares)
|
||||
set (_static)
|
||||
endif()
|
||||
find_library(_location NAMES ${_search} HINTS ${cares_LIBRARY_DIRS})
|
||||
if (NOT _location)
|
||||
message (FATAL_ERROR "using pkg-config for grpc, can't find c-ares")
|
||||
endif ()
|
||||
if(${_location} MATCHES "\\.a$")
|
||||
add_library(c-ares::cares STATIC IMPORTED GLOBAL)
|
||||
else()
|
||||
add_library(c-ares::cares SHARED IMPORTED GLOBAL)
|
||||
endif()
|
||||
set_target_properties (c-ares::cares PROPERTIES
|
||||
IMPORTED_LOCATION ${_location}
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${${_prefix}_INCLUDE_DIRS}"
|
||||
INTERFACE_LINK_OPTIONS "${${_prefix}_LDFLAGS}"
|
||||
)
|
||||
exclude_if_included (c-ares::cares)
|
||||
else ()
|
||||
message (FATAL_ERROR "using pkg-config for grpc, can't find c-ares")
|
||||
endif ()
|
||||
else ()
|
||||
#[===========================[
|
||||
c-ares (grpc requires)
|
||||
#]===========================]
|
||||
ExternalProject_Add (c-ares_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/c-ares/c-ares.git
|
||||
GIT_TAG cares-1_15_0
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DCMAKE_INSTALL_PREFIX=<BINARY_DIR>/_installed_
|
||||
-DCARES_SHARED=OFF
|
||||
-DCARES_STATIC=ON
|
||||
-DCARES_STATIC_PIC=ON
|
||||
-DCARES_INSTALL=ON
|
||||
-DCARES_MSVC_STATIC_RUNTIME=ON
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/_installed_/lib/${ep_lib_prefix}cares${ep_lib_suffix}
|
||||
<BINARY_DIR>/_installed_/lib/${ep_lib_prefix}cares_d${ep_lib_suffix}
|
||||
)
|
||||
exclude_if_included (c-ares_src)
|
||||
ExternalProject_Get_Property (c-ares_src BINARY_DIR)
|
||||
set (cares_binary_dir "${BINARY_DIR}")
|
||||
|
||||
add_library (c-ares::cares STATIC IMPORTED GLOBAL)
|
||||
file (MAKE_DIRECTORY ${BINARY_DIR}/_installed_/include)
|
||||
set_target_properties (c-ares::cares PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}cares_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}cares${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${BINARY_DIR}/_installed_/include)
|
||||
add_dependencies (c-ares::cares c-ares_src)
|
||||
exclude_if_included (c-ares::cares)
|
||||
|
||||
if (NOT has_zlib)
|
||||
#[===========================[
|
||||
zlib (grpc requires)
|
||||
#]===========================]
|
||||
if (MSVC)
|
||||
set (zlib_debug_postfix "d") # zlib cmake sets this internally for MSVC, so we really don't have a choice
|
||||
set (zlib_base "zlibstatic")
|
||||
else ()
|
||||
set (zlib_debug_postfix "_d")
|
||||
set (zlib_base "z")
|
||||
endif ()
|
||||
ExternalProject_Add (zlib_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/madler/zlib.git
|
||||
GIT_TAG v1.2.11
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
-DCMAKE_DEBUG_POSTFIX=${zlib_debug_postfix}
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DCMAKE_INSTALL_PREFIX=<BINARY_DIR>/_installed_
|
||||
-DBUILD_SHARED_LIBS=OFF
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP"
|
||||
"-DCMAKE_C_FLAGS_DEBUG=-MTd"
|
||||
"-DCMAKE_C_FLAGS_RELEASE=-MT"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $<CONFIG> --target install
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/_installed_/lib/${ep_lib_prefix}${zlib_base}${ep_lib_suffix}
|
||||
<BINARY_DIR>/_installed_/lib/${ep_lib_prefix}${zlib_base}${zlib_debug_postfix}${ep_lib_suffix}
|
||||
)
|
||||
exclude_if_included (zlib_src)
|
||||
ExternalProject_Get_Property (zlib_src BINARY_DIR)
|
||||
set (zlib_binary_dir "${BINARY_DIR}")
|
||||
|
||||
add_library (ZLIB::ZLIB STATIC IMPORTED GLOBAL)
|
||||
file (MAKE_DIRECTORY ${BINARY_DIR}/_installed_/include)
|
||||
set_target_properties (ZLIB::ZLIB PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}${zlib_base}${zlib_debug_postfix}${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}${zlib_base}${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${BINARY_DIR}/_installed_/include)
|
||||
add_dependencies (ZLIB::ZLIB zlib_src)
|
||||
exclude_if_included (ZLIB::ZLIB)
|
||||
endif ()
|
||||
|
||||
#[===========================[
|
||||
grpc
|
||||
#]===========================]
|
||||
ExternalProject_Add (grpc_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/grpc/grpc.git
|
||||
GIT_TAG v1.25.0
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:-DCMAKE_VERBOSE_MAKEFILE=ON>
|
||||
$<$<BOOL:${CMAKE_TOOLCHAIN_FILE}>:-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}>
|
||||
$<$<BOOL:${VCPKG_TARGET_TRIPLET}>:-DVCPKG_TARGET_TRIPLET=${VCPKG_TARGET_TRIPLET}>
|
||||
$<$<BOOL:${unity}>:-DCMAKE_UNITY_BUILD=ON}>
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
$<$<NOT:$<BOOL:${is_multiconfig}>>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}>
|
||||
-DgRPC_BUILD_TESTS=OFF
|
||||
-DgRPC_BENCHMARK_PROVIDER=""
|
||||
-DgRPC_BUILD_CSHARP_EXT=OFF
|
||||
-DgRPC_MSVC_STATIC_RUNTIME=ON
|
||||
-DgRPC_INSTALL=OFF
|
||||
-DgRPC_CARES_PROVIDER=package
|
||||
-Dc-ares_DIR=${cares_binary_dir}/_installed_/lib/cmake/c-ares
|
||||
-DgRPC_SSL_PROVIDER=package
|
||||
-DOPENSSL_ROOT_DIR=${OPENSSL_ROOT_DIR}
|
||||
-DgRPC_PROTOBUF_PROVIDER=package
|
||||
-DProtobuf_USE_STATIC_LIBS=$<IF:$<AND:$<BOOL:${Protobuf_FOUND}>,$<NOT:$<BOOL:${static}>>>,OFF,ON>
|
||||
-DProtobuf_INCLUDE_DIR=$<JOIN:$<TARGET_PROPERTY:protobuf::libprotobuf,INTERFACE_INCLUDE_DIRECTORIES>,:_:>
|
||||
-DProtobuf_LIBRARY=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:protobuf::libprotobuf,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:protobuf::libprotobuf,IMPORTED_LOCATION_RELEASE>>
|
||||
-DProtobuf_PROTOC_LIBRARY=$<IF:$<CONFIG:Debug>,$<TARGET_PROPERTY:protobuf::libprotoc,IMPORTED_LOCATION_DEBUG>,$<TARGET_PROPERTY:protobuf::libprotoc,IMPORTED_LOCATION_RELEASE>>
|
||||
-DProtobuf_PROTOC_EXECUTABLE=$<TARGET_PROPERTY:protobuf::protoc,IMPORTED_LOCATION>
|
||||
-DgRPC_ZLIB_PROVIDER=package
|
||||
$<$<NOT:$<BOOL:${has_zlib}>>:-DZLIB_ROOT=${zlib_binary_dir}/_installed_>
|
||||
$<$<BOOL:${MSVC}>:
|
||||
"-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP"
|
||||
"-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP"
|
||||
>
|
||||
LOG_BUILD ON
|
||||
LOG_CONFIGURE ON
|
||||
BUILD_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
--build .
|
||||
--config $<CONFIG>
|
||||
--parallel ${ep_procs}
|
||||
$<$<BOOL:${is_multiconfig}>:
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E copy
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}grpc${grpc_suffix}$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}grpc++${grpc_suffix}$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}address_sorting$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/$<CONFIG>/${ep_lib_prefix}gpr$<$<CONFIG:Debug>:_d>${ep_lib_suffix}
|
||||
<BINARY_DIR>/$<CONFIG>/grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX}
|
||||
<BINARY_DIR>
|
||||
>
|
||||
LIST_SEPARATOR :_:
|
||||
TEST_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
DEPENDS c-ares_src
|
||||
BUILD_BYPRODUCTS
|
||||
<BINARY_DIR>/${ep_lib_prefix}grpc${grpc_suffix}${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}grpc${grpc_suffix}_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}grpc++${grpc_suffix}${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}grpc++${grpc_suffix}_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}address_sorting${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}address_sorting_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}gpr${ep_lib_suffix}
|
||||
<BINARY_DIR>/${ep_lib_prefix}gpr_d${ep_lib_suffix}
|
||||
<BINARY_DIR>/grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX}
|
||||
)
|
||||
if (TARGET protobuf_src)
|
||||
ExternalProject_Add_StepDependencies(grpc_src build protobuf_src)
|
||||
endif ()
|
||||
exclude_if_included (grpc_src)
|
||||
ExternalProject_Get_Property (grpc_src BINARY_DIR)
|
||||
ExternalProject_Get_Property (grpc_src SOURCE_DIR)
|
||||
set (grpc_binary_dir "${BINARY_DIR}")
|
||||
set (grpc_source_dir "${SOURCE_DIR}")
|
||||
if (CMAKE_VERBOSE_MAKEFILE)
|
||||
print_ep_logs (grpc_src)
|
||||
endif ()
|
||||
file (MAKE_DIRECTORY ${SOURCE_DIR}/include)
|
||||
|
||||
macro (add_imported_grpc libname_)
|
||||
add_library ("gRPC::${libname_}" STATIC IMPORTED GLOBAL)
|
||||
set_target_properties ("gRPC::${libname_}" PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG
|
||||
${grpc_binary_dir}/${ep_lib_prefix}${libname_}_d${ep_lib_suffix}
|
||||
IMPORTED_LOCATION_RELEASE
|
||||
${grpc_binary_dir}/${ep_lib_prefix}${libname_}${ep_lib_suffix}
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${grpc_source_dir}/include)
|
||||
add_dependencies ("gRPC::${libname_}" grpc_src)
|
||||
target_link_libraries (ripple_libs INTERFACE "gRPC::${libname_}")
|
||||
exclude_if_included ("gRPC::${libname_}")
|
||||
endmacro ()
|
||||
|
||||
set_target_properties (gRPC::grpc_cpp_plugin PROPERTIES
|
||||
IMPORTED_LOCATION "${grpc_binary_dir}/grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
add_dependencies (gRPC::grpc_cpp_plugin grpc_src)
|
||||
endif ()
|
||||
|
||||
add_imported_grpc (gpr)
|
||||
add_imported_grpc ("grpc${grpc_suffix}")
|
||||
add_imported_grpc ("grpc++${grpc_suffix}")
|
||||
add_imported_grpc (address_sorting)
|
||||
|
||||
target_link_libraries ("gRPC::grpc${grpc_suffix}" INTERFACE c-ares::cares gRPC::gpr gRPC::address_sorting ZLIB::ZLIB)
|
||||
target_link_libraries ("gRPC::grpc++${grpc_suffix}" INTERFACE "gRPC::grpc${grpc_suffix}" gRPC::gpr)
|
||||
endif ()
|
||||
find_package(gRPC 1.23)
|
||||
|
||||
#[=================================[
|
||||
generate protobuf sources for
|
||||
grpc defs and bundle into a
|
||||
static lib
|
||||
#]=================================]
|
||||
set (GRPC_GEN_DIR "${CMAKE_BINARY_DIR}/proto_gen_grpc")
|
||||
file (MAKE_DIRECTORY ${GRPC_GEN_DIR})
|
||||
set (GRPC_PROTO_SRCS)
|
||||
set (GRPC_PROTO_HDRS)
|
||||
set (GRPC_PROTO_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/proto/org")
|
||||
set(GRPC_GEN_DIR "${CMAKE_BINARY_DIR}/proto_gen_grpc")
|
||||
file(MAKE_DIRECTORY ${GRPC_GEN_DIR})
|
||||
set(GRPC_PROTO_SRCS)
|
||||
set(GRPC_PROTO_HDRS)
|
||||
set(GRPC_PROTO_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/proto/org")
|
||||
file(GLOB_RECURSE GRPC_DEFINITION_FILES LIST_DIRECTORIES false "${GRPC_PROTO_ROOT}/*.proto")
|
||||
foreach(file ${GRPC_DEFINITION_FILES})
|
||||
get_filename_component(_abs_file ${file} ABSOLUTE)
|
||||
@@ -329,10 +20,10 @@ foreach(file ${GRPC_DEFINITION_FILES})
|
||||
get_filename_component(_rel_root_dir ${_rel_root_file} DIRECTORY)
|
||||
file(RELATIVE_PATH _rel_dir ${CMAKE_CURRENT_SOURCE_DIR} ${_abs_dir})
|
||||
|
||||
set (src_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.cc")
|
||||
set (src_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.cc")
|
||||
set (hdr_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.h")
|
||||
set (hdr_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.h")
|
||||
set(src_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.cc")
|
||||
set(src_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.cc")
|
||||
set(hdr_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.h")
|
||||
set(hdr_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.h")
|
||||
add_custom_command(
|
||||
OUTPUT ${src_1} ${src_2} ${hdr_1} ${hdr_2}
|
||||
COMMAND protobuf::protoc
|
||||
@@ -350,20 +41,22 @@ foreach(file ${GRPC_DEFINITION_FILES})
|
||||
list(APPEND GRPC_PROTO_HDRS ${hdr_1} ${hdr_2})
|
||||
endforeach()
|
||||
|
||||
add_library (grpc_pbufs STATIC ${GRPC_PROTO_SRCS} ${GRPC_PROTO_HDRS})
|
||||
#target_include_directories (grpc_pbufs PRIVATE src)
|
||||
target_include_directories (grpc_pbufs SYSTEM PUBLIC ${GRPC_GEN_DIR})
|
||||
target_link_libraries (grpc_pbufs protobuf::libprotobuf "gRPC::grpc++${grpc_suffix}")
|
||||
target_compile_options (grpc_pbufs
|
||||
add_library(grpc_pbufs STATIC ${GRPC_PROTO_SRCS} ${GRPC_PROTO_HDRS})
|
||||
#target_include_directories(grpc_pbufs PRIVATE src)
|
||||
target_include_directories(grpc_pbufs SYSTEM PUBLIC ${GRPC_GEN_DIR})
|
||||
target_link_libraries(grpc_pbufs
|
||||
"gRPC::grpc++"
|
||||
# libgrpc is missing references.
|
||||
absl::random_random
|
||||
)
|
||||
target_compile_options(grpc_pbufs
|
||||
PRIVATE
|
||||
$<$<BOOL:${MSVC}>:-wd4065>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:-Wno-deprecated-declarations>
|
||||
PUBLIC
|
||||
$<$<BOOL:${MSVC}>:-wd4996>
|
||||
$<$<BOOL:${is_xcode}>:
|
||||
$<$<BOOL:${XCODE}>:
|
||||
--system-header-prefix="google/protobuf"
|
||||
-Wno-deprecated-dynamic-exception-spec
|
||||
>)
|
||||
add_library (Ripple::grpc_pbufs ALIAS grpc_pbufs)
|
||||
target_link_libraries (ripple_libs INTERFACE Ripple::grpc_pbufs)
|
||||
exclude_if_included (grpc_pbufs)
|
||||
add_library(Ripple::grpc_pbufs ALIAS grpc_pbufs)
|
||||
@@ -186,6 +186,10 @@ test.protocol > ripple.crypto
|
||||
test.protocol > ripple.json
|
||||
test.protocol > ripple.protocol
|
||||
test.protocol > test.toplevel
|
||||
test.rdb > ripple.app
|
||||
test.rdb > ripple.core
|
||||
test.rdb > test.jtx
|
||||
test.rdb > test.toplevel
|
||||
test.resource > ripple.basics
|
||||
test.resource > ripple.beast
|
||||
test.resource > ripple.resource
|
||||
|
||||
157
CMakeLists.txt
157
CMakeLists.txt
@@ -33,6 +33,25 @@ if(Git_FOUND)
|
||||
endif()
|
||||
endif() #git
|
||||
|
||||
# make SOURCE_ROOT_PATH define available for logging
|
||||
set(SOURCE_ROOT_PATH "${CMAKE_CURRENT_SOURCE_DIR}/src/")
|
||||
add_definitions(-DSOURCE_ROOT_PATH="${SOURCE_ROOT_PATH}")
|
||||
|
||||
# BEAST_ENHANCED_LOGGING option - adds file:line numbers and formatting to logs
|
||||
# Default to ON for Debug builds, OFF for Release
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" ON)
|
||||
else()
|
||||
option(BEAST_ENHANCED_LOGGING "Include file and line numbers in log messages" OFF)
|
||||
endif()
|
||||
|
||||
if(BEAST_ENHANCED_LOGGING)
|
||||
add_definitions(-DBEAST_ENHANCED_LOGGING=1)
|
||||
message(STATUS "Log line numbers enabled")
|
||||
else()
|
||||
message(STATUS "Log line numbers disabled")
|
||||
endif()
|
||||
|
||||
if(thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
add_compile_options("-stdlib=libc++")
|
||||
@@ -50,11 +69,6 @@ if(CMAKE_TOOLCHAIN_FILE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT USE_CONAN)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake")
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps")
|
||||
endif()
|
||||
|
||||
include (CheckCXXCompilerFlag)
|
||||
include (FetchContent)
|
||||
include (ExternalProject)
|
||||
@@ -67,9 +81,7 @@ endif ()
|
||||
include(RippledSanity)
|
||||
include(RippledVersion)
|
||||
include(RippledSettings)
|
||||
if (NOT USE_CONAN)
|
||||
include(RippledNIH)
|
||||
endif()
|
||||
|
||||
# this check has to remain in the top-level cmake
|
||||
# because of the early return statement
|
||||
if (packages_only)
|
||||
@@ -81,87 +93,64 @@ endif ()
|
||||
include(RippledCompiler)
|
||||
include(RippledInterface)
|
||||
|
||||
###
|
||||
if (NOT USE_CONAN)
|
||||
add_subdirectory(src/secp256k1)
|
||||
add_subdirectory(src/ed25519-donna)
|
||||
include(deps/Boost)
|
||||
include(deps/OpenSSL)
|
||||
# include(deps/Secp256k1)
|
||||
# include(deps/Ed25519-donna)
|
||||
include(deps/Lz4)
|
||||
include(deps/Libarchive)
|
||||
include(deps/Sqlite)
|
||||
include(deps/Soci)
|
||||
include(deps/Snappy)
|
||||
include(deps/Rocksdb)
|
||||
include(deps/Nudb)
|
||||
include(deps/date)
|
||||
include(deps/Protobuf)
|
||||
include(deps/gRPC)
|
||||
include(deps/cassandra)
|
||||
include(deps/Postgres)
|
||||
include(deps/WasmEdge)
|
||||
else()
|
||||
include(conan/Boost)
|
||||
find_package(OpenSSL 1.1.1 REQUIRED)
|
||||
set_target_properties(OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
include(deps/Boost)
|
||||
find_package(OpenSSL 1.1.1 REQUIRED)
|
||||
set_target_properties(OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
)
|
||||
add_subdirectory(src/secp256k1)
|
||||
add_subdirectory(src/ed25519-donna)
|
||||
find_package(lz4 REQUIRED)
|
||||
# Target names with :: are not allowed in a generator expression.
|
||||
# We need to pull the include directories and imported location properties
|
||||
# from separate targets.
|
||||
find_package(LibArchive REQUIRED)
|
||||
find_package(SOCI REQUIRED)
|
||||
find_package(SQLite3 REQUIRED)
|
||||
find_package(Snappy REQUIRED)
|
||||
# find_package(wasmedge REQUIRED)
|
||||
option(rocksdb "Enable RocksDB" ON)
|
||||
if(rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1
|
||||
)
|
||||
add_subdirectory(src/secp256k1)
|
||||
add_subdirectory(src/ed25519-donna)
|
||||
find_package(lz4 REQUIRED)
|
||||
# Target names with :: are not allowed in a generator expression.
|
||||
# We need to pull the include directories and imported location properties
|
||||
# from separate targets.
|
||||
find_package(LibArchive REQUIRED)
|
||||
find_package(SOCI REQUIRED)
|
||||
find_package(SQLite3 REQUIRED)
|
||||
find_package(Snappy REQUIRED)
|
||||
find_package(wasmedge REQUIRED)
|
||||
option(rocksdb "Enable RocksDB" ON)
|
||||
if(rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1
|
||||
)
|
||||
target_link_libraries(ripple_libs INTERFACE RocksDB::rocksdb)
|
||||
endif()
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(date REQUIRED)
|
||||
find_package(BLAKE3 REQUIRED)
|
||||
include(conan/Protobuf)
|
||||
include(conan/gRPC)
|
||||
if(TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
elseif(TARGET NuDB::nudb)
|
||||
set(nudb NuDB::nudb)
|
||||
else()
|
||||
message(FATAL_ERROR "unknown nudb target")
|
||||
endif()
|
||||
target_link_libraries(ripple_libs INTERFACE ${nudb})
|
||||
target_link_libraries(ripple_libs INTERFACE RocksDB::rocksdb)
|
||||
endif()
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(date REQUIRED)
|
||||
include(deps/Protobuf)
|
||||
include(deps/gRPC)
|
||||
include(deps/WasmEdge)
|
||||
if(TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
elseif(TARGET NuDB::nudb)
|
||||
set(nudb NuDB::nudb)
|
||||
else()
|
||||
message(FATAL_ERROR "unknown nudb target")
|
||||
endif()
|
||||
target_link_libraries(ripple_libs INTERFACE ${nudb})
|
||||
|
||||
if(reporting)
|
||||
find_package(cassandra-cpp-driver REQUIRED)
|
||||
find_package(PostgreSQL REQUIRED)
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PostgreSQL::PostgreSQL
|
||||
)
|
||||
endif()
|
||||
if(reporting)
|
||||
find_package(cassandra-cpp-driver REQUIRED)
|
||||
find_package(PostgreSQL REQUIRED)
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
ed25519::ed25519
|
||||
LibArchive::LibArchive
|
||||
lz4::lz4
|
||||
OpenSSL::Crypto
|
||||
OpenSSL::SSL
|
||||
Ripple::grpc_pbufs
|
||||
Ripple::pbufs
|
||||
secp256k1::secp256k1
|
||||
soci::soci
|
||||
SQLite::SQLite3
|
||||
cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PostgreSQL::PostgreSQL
|
||||
)
|
||||
endif()
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
ed25519::ed25519
|
||||
LibArchive::LibArchive
|
||||
lz4::lz4
|
||||
OpenSSL::Crypto
|
||||
OpenSSL::SSL
|
||||
Ripple::grpc_pbufs
|
||||
Ripple::pbufs
|
||||
secp256k1::secp256k1
|
||||
soci::soci
|
||||
SQLite::SQLite3
|
||||
)
|
||||
|
||||
###
|
||||
|
||||
|
||||
@@ -176,10 +176,11 @@ existing maintainer without a vote.
|
||||
|
||||
## Current Maintainers
|
||||
|
||||
* [Richard Holland](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation)
|
||||
* [Denis Angell](https://github.com/dangell7) (XRPL Labs + XRP Ledger Foundation)
|
||||
* [Wietse Wind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation)
|
||||
* [Richard Holland](https://github.com/RichardAH) (XRPL Labs + INFTF)
|
||||
* [Denis Angell](https://github.com/dangell7) (XRPL Labs + INFTF)
|
||||
* [Wietse Wind](https://github.com/WietseWind) (XRPL Labs + INFTF)
|
||||
* [tequ](https://github.com/tequdev) (Independent + INFTF)
|
||||
|
||||
|
||||
[1]: https://docs.github.com/en/get-started/quickstart/contributing-to-projects
|
||||
[2]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-commits
|
||||
[2]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-commits
|
||||
|
||||
@@ -1,217 +0,0 @@
|
||||
# Auto-Disable Strategy for Hash Migration
|
||||
|
||||
## Core Concept
|
||||
Instead of trying to fix entries with hardcoded old keys, **automatically disable them** during migration. If it contains old keys, it's broken anyway - make that explicit.
|
||||
|
||||
## The Algorithm
|
||||
|
||||
### Phase 1: Build Complete Old Key Set
|
||||
```cpp
|
||||
std::unordered_set<uint256> all_old_keys;
|
||||
|
||||
// Collect ALL SHA-512 keys from current state
|
||||
stateMap_.visitLeaves([&](SHAMapItem const& item) {
|
||||
all_old_keys.insert(item.key());
|
||||
|
||||
// Also collect keys from reference fields
|
||||
SerialIter sit(item.slice());
|
||||
auto sle = std::make_shared<SLE>(sit, item.key());
|
||||
|
||||
// Collect from vector fields
|
||||
if (sle->isFieldPresent(sfIndexes)) {
|
||||
for (auto& key : sle->getFieldV256(sfIndexes)) {
|
||||
all_old_keys.insert(key);
|
||||
}
|
||||
}
|
||||
// ... check all other reference fields
|
||||
});
|
||||
```
|
||||
|
||||
### Phase 2: Scan and Disable
|
||||
|
||||
#### Hook Definitions (WASM Code)
|
||||
```cpp
|
||||
bool scanWASMForKeys(Blob const& wasm_code, std::unordered_set<uint256> const& keys) {
|
||||
// Scan for 32-byte sequences matching known keys
|
||||
for (size_t i = 0; i <= wasm_code.size() - 32; i++) {
|
||||
uint256 potential_key = extract32Bytes(wasm_code, i);
|
||||
if (keys.count(potential_key)) {
|
||||
return true; // Found hardcoded key!
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// During migration
|
||||
for (auto& hookDef : allHookDefinitions) {
|
||||
if (scanWASMForKeys(hookDef->getFieldBlob(sfCreateCode), all_old_keys)) {
|
||||
hookDef->setFieldU32(sfFlags, hookDef->getFlags() | HOOK_DISABLED_OLD_KEYS);
|
||||
disabled_hooks.push_back(hookDef->key());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Hook State (Arbitrary Data)
|
||||
```cpp
|
||||
for (auto& hookState : allHookStates) {
|
||||
auto data = hookState->getFieldBlob(sfHookStateData);
|
||||
if (containsAnyKey(data, all_old_keys)) {
|
||||
hookState->setFieldU32(sfFlags, STATE_INVALID_OLD_KEYS);
|
||||
disabled_states.push_back(hookState->key());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Other Vulnerable Entry Types
|
||||
```cpp
|
||||
void disableEntriesWithOldKeys(SLE& sle) {
|
||||
switch(sle.getType()) {
|
||||
case ltHOOK:
|
||||
if (hasOldKeys(sle)) {
|
||||
sle.setFlag(HOOK_DISABLED_MIGRATION);
|
||||
}
|
||||
break;
|
||||
|
||||
case ltESCROW:
|
||||
// Check if destination/condition contains old keys
|
||||
if (containsOldKeyReferences(sle)) {
|
||||
sle.setFlag(ESCROW_FROZEN_MIGRATION);
|
||||
}
|
||||
break;
|
||||
|
||||
case ltPAYCHAN:
|
||||
// Payment channels with old key references
|
||||
if (hasOldKeyInFields(sle)) {
|
||||
sle.setFlag(PAYCHAN_SUSPENDED_MIGRATION);
|
||||
}
|
||||
break;
|
||||
|
||||
case ltHOOK_STATE:
|
||||
// Already handled above
|
||||
break;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Flag Definitions
|
||||
|
||||
```cpp
|
||||
// New flags for migration-disabled entries
|
||||
constexpr uint32_t HOOK_DISABLED_OLD_KEYS = 0x00100000;
|
||||
constexpr uint32_t STATE_INVALID_OLD_KEYS = 0x00200000;
|
||||
constexpr uint32_t ESCROW_FROZEN_MIGRATION = 0x00400000;
|
||||
constexpr uint32_t PAYCHAN_SUSPENDED_MIGRATION = 0x00800000;
|
||||
constexpr uint32_t ENTRY_BROKEN_MIGRATION = 0x01000000;
|
||||
```
|
||||
|
||||
## Execution Prevention
|
||||
|
||||
```cpp
|
||||
// In transaction processing
|
||||
TER HookExecutor::executeHook(Hook const& hook) {
|
||||
if (hook.isFieldPresent(sfFlags)) {
|
||||
if (hook.getFlags() & HOOK_DISABLED_OLD_KEYS) {
|
||||
return tecHOOK_DISABLED_MIGRATION;
|
||||
}
|
||||
}
|
||||
// Normal execution
|
||||
}
|
||||
|
||||
TER processEscrow(Escrow const& escrow) {
|
||||
if (escrow.getFlags() & ESCROW_FROZEN_MIGRATION) {
|
||||
return tecESCROW_FROZEN_MIGRATION;
|
||||
}
|
||||
// Normal processing
|
||||
}
|
||||
```
|
||||
|
||||
## Re-enabling Process
|
||||
|
||||
### For Hooks
|
||||
Developer must submit a new SetHook transaction with updated WASM:
|
||||
```cpp
|
||||
TER SetHook::doApply() {
|
||||
// If hook was disabled for migration
|
||||
if (oldHook->getFlags() & HOOK_DISABLED_OLD_KEYS) {
|
||||
// Verify new WASM doesn't contain old keys
|
||||
if (scanWASMForKeys(newWasm, all_old_keys)) {
|
||||
return tecSTILL_CONTAINS_OLD_KEYS;
|
||||
}
|
||||
// Clear the disabled flag
|
||||
newHook->clearFlag(HOOK_DISABLED_OLD_KEYS);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### For Hook State
|
||||
Must be cleared and rebuilt:
|
||||
```cpp
|
||||
TER HookStateModify::doApply() {
|
||||
if (state->getFlags() & STATE_INVALID_OLD_KEYS) {
|
||||
// Can only delete, not modify
|
||||
if (operation != DELETE) {
|
||||
return tecSTATE_REQUIRES_REBUILD;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Migration Report
|
||||
|
||||
```json
|
||||
{
|
||||
"migration_ledger": 20000000,
|
||||
"entries_scanned": 620000,
|
||||
"entries_disabled": {
|
||||
"hooks": 12,
|
||||
"hook_definitions": 3,
|
||||
"hook_states": 1847,
|
||||
"escrows": 5,
|
||||
"payment_channels": 2
|
||||
},
|
||||
"disabled_by_reason": {
|
||||
"wasm_contains_keys": 3,
|
||||
"state_contains_keys": 1847,
|
||||
"reference_old_keys": 19
|
||||
},
|
||||
"action_required": [
|
||||
{
|
||||
"type": "HOOK_DEFINITION",
|
||||
"key": "0xABCD...",
|
||||
"owner": "rXXX...",
|
||||
"reason": "WASM contains 3 hardcoded SHA-512 keys",
|
||||
"action": "Recompile hook with new keys or remove hardcoding"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **Safety**: Broken things explicitly disabled, not silently failing
|
||||
2. **Transparency**: Clear record of what was disabled and why
|
||||
3. **Natural Cleanup**: Abandoned entries stay disabled forever
|
||||
4. **Developer Responsibility**: Owners must actively fix and re-enable
|
||||
5. **No Silent Corruption**: Better to disable than corrupt
|
||||
6. **Audit Trail**: Complete record of migration casualties
|
||||
|
||||
## Implementation Complexity
|
||||
|
||||
- **Scanning**: O(n×m) where n=entries, m=data size
|
||||
- **Memory**: Need all old keys in memory (~40MB)
|
||||
- **False Positives**: Extremely unlikely (2^-256 probability)
|
||||
- **Recovery**: Clear path to re-enable fixed entries
|
||||
|
||||
## The Nuclear Option
|
||||
|
||||
If too many critical entries would be disabled:
|
||||
```cpp
|
||||
if (disabled_count > ACCEPTABLE_THRESHOLD) {
|
||||
// Abort migration
|
||||
return temMIGRATION_TOO_DESTRUCTIVE;
|
||||
}
|
||||
```
|
||||
|
||||
## Summary
|
||||
|
||||
Instead of attempting impossible fixes for hardcoded keys, acknowledge reality: **if it contains old keys, it's broken**. Make that brokenness explicit through disabling, forcing conscious action to repair and re-enable. This turns an impossible problem (fixing hardcoded keys in WASM) into a manageable one (identifying and disabling broken entries).
|
||||
@@ -1,663 +0,0 @@
|
||||
# Hash Migration to Blake3 - Work Context
|
||||
|
||||
## Build Commands
|
||||
- **To build**: `ninja -C build`
|
||||
- **To count errors**: `ninja -C build 2>&1 | grep "error:" | wc -l`
|
||||
- **To see failed files**: `ninja -C build 2>&1 | grep "^FAILED:" | head -20`
|
||||
- **DO NOT USE**: `cmake --build` or `make`
|
||||
|
||||
## Test Compilation of Single Files
|
||||
|
||||
### Quick Method (basic errors only)
|
||||
```bash
|
||||
clang++ -std=c++20 -I/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src \
|
||||
-c src/test/app/NFToken_test.cpp -o /tmp/test.o 2>&1 | head -50
|
||||
```
|
||||
|
||||
### Full Compilation Command (from compile_commands.json)
|
||||
Extract the exact compilation command for any file:
|
||||
```bash
|
||||
# For any specific file:
|
||||
jq -r '.[] | select(.file | endswith("NFToken_test.cpp")) | .command' build/compile_commands.json
|
||||
|
||||
# Or simplified with just the file path:
|
||||
FILE="src/test/app/NFToken_test.cpp"
|
||||
jq -r --arg file "$FILE" '.[] | select(.file | endswith($file)) | .command' build/compile_commands.json
|
||||
```
|
||||
|
||||
### compile_commands.json location
|
||||
- **Location**: `build/compile_commands.json`
|
||||
- Contains exact compilation commands with all include paths and flags for each source file
|
||||
- Generated by CMake during configuration
|
||||
|
||||
## Objective
|
||||
Modify Xahaud to pass ledger_index through all hash functions to enable switching from SHA512-Half to Blake3 at a specific ledger index.
|
||||
|
||||
## Current Approach
|
||||
Using a `hash_options` struct containing `ledger_index` that must be passed to all hash functions.
|
||||
|
||||
## Structure Added
|
||||
```cpp
|
||||
struct hash_options {
|
||||
std::uint32_t ledger_index;
|
||||
explicit hash_options(std::uint32_t li) : ledger_index(li) {}
|
||||
};
|
||||
```
|
||||
|
||||
## CRITICAL: Hash Function Classification Required
|
||||
|
||||
### The Historical Ledger Problem
|
||||
**EVERY** hash operation needs proper classification because even "content hashing" (like transaction IDs, validator manifests, signatures) depends on WHEN it was created:
|
||||
- Transaction from ledger 10M (pre-transition) → Must use SHA-512 Half
|
||||
- Transaction from ledger 20M (post-transition) → Must use BLAKE3
|
||||
- You cannot mix hash algorithms for the same ledger - it's all or nothing
|
||||
|
||||
### Classification Constants (to be added to digest.h)
|
||||
As an interim step, introduce classification constants to make intent explicit:
|
||||
```cpp
|
||||
// Special ledger_index values for hash operations
|
||||
constexpr uint32_t LEDGER_INDEX_UNKNOWN = 0; // DANGEROUS - avoid!
|
||||
constexpr uint32_t LEDGER_INDEX_TEST_ONLY = std::numeric_limits<uint32_t>::max();
|
||||
constexpr uint32_t LEDGER_INDEX_NETWORK_PROTOCOL = std::numeric_limits<uint32_t>::max() - 1;
|
||||
constexpr uint32_t LEDGER_INDEX_CURRENT = std::numeric_limits<uint32_t>::max() - 2; // Use current ledger
|
||||
```
|
||||
|
||||
### Classification Categories
|
||||
|
||||
1. **Ledger Object Indexing** (MUST use actual ledger_index)
|
||||
- All `indexHash()` calls determining WHERE objects live
|
||||
- All keylet functions creating/finding ledger objects
|
||||
- SHAMap node hashing (builds the Merkle tree)
|
||||
|
||||
2. **Historical Content Hashing** (MUST use ledger_index from when it was created)
|
||||
- Transaction IDs (use ledger where tx was included)
|
||||
- Validator manifests (use ledger when signed)
|
||||
- Hook code hashing (use ledger when deployed)
|
||||
- Signatures referencing ledger data
|
||||
|
||||
3. **Test Code** (use LEDGER_INDEX_TEST_ONLY)
|
||||
- Unit tests
|
||||
- Mock objects
|
||||
- Test fixtures
|
||||
|
||||
4. **Network Protocol** (special handling needed)
|
||||
- Peer handshakes
|
||||
- Protocol messages
|
||||
- May need to support both algorithms during transition
|
||||
|
||||
### Why hash_options{0} is Dangerous
|
||||
Using `hash_options{0}` assumes everything is pre-transition (SHA-512 Half forever), which breaks after the transition point. Every usage must be classified and given the appropriate context.
|
||||
|
||||
## SHAMap Hash Architecture Analysis
|
||||
|
||||
### Current State
|
||||
- SHAMap stores `ledgerSeq_` and knows which ledger it represents
|
||||
- Nodes compute hashes WITHOUT ledger context (just use sha512Half directly)
|
||||
- Nodes can be SHARED between SHAMaps via canonicalization/caching
|
||||
- Node's `hash_` member stores only ONE hash value
|
||||
|
||||
### The Fundamental Problem: Node Sharing vs Hash Migration
|
||||
|
||||
When nodes are shared between ledgers through canonicalization:
|
||||
- A node from ledger 19,999,999 (SHA-512) has hash H1
|
||||
- Same node referenced by ledger 20,000,000 (BLAKE3) needs hash H2
|
||||
- **Cannot store both hashes without major memory overhead**
|
||||
|
||||
### Merkle Tree Constraint
|
||||
The Merkle tree structure requires homogeneous hashing:
|
||||
```
|
||||
Root (BLAKE3)
|
||||
├── Child1 (BLAKE3) ✓
|
||||
└── Child2 (SHA-512) ✗ IMPOSSIBLE - breaks tree integrity
|
||||
```
|
||||
You cannot mix hash algorithms within a single tree - all nodes must use the same algorithm.
|
||||
|
||||
### Migration Strategies Considered
|
||||
|
||||
#### Option 1: Lazy/Gradual Migration ✗
|
||||
- Store both SHA-512 and BLAKE3 hashes in each node
|
||||
- Problems:
|
||||
- Double memory usage per node
|
||||
- Complex cache invalidation logic
|
||||
- Still can't mix algorithms in same tree
|
||||
- Node sharing between ledgers becomes impossible
|
||||
|
||||
#### Option 2: Big Bang Migration ✓ (Recommended)
|
||||
- At transition ledger:
|
||||
- Invalidate ALL cached/stored nodes
|
||||
- Rebuild entire state with new hash algorithm
|
||||
- Maintain separate caches for pre/post transition
|
||||
- Benefits:
|
||||
- Clean separation of hash epochs
|
||||
- Easier to reason about
|
||||
- No memory overhead
|
||||
- Maintains tree integrity
|
||||
|
||||
### Implementation Requirements for Big Bang
|
||||
|
||||
1. **Pass ledger_index through all hash operations:**
|
||||
```cpp
|
||||
void updateHash(std::uint32_t ledgerSeq);
|
||||
SHAMapHash getHash(std::uint32_t ledgerSeq) const;
|
||||
```
|
||||
|
||||
2. **Separate node caches by hash epoch:**
|
||||
- Pre-transition: SHA-512 node cache
|
||||
- Post-transition: BLAKE3 node cache
|
||||
- Never share nodes between epochs
|
||||
|
||||
3. **Critical functions needing updates:**
|
||||
- `SHAMapInnerNode::updateHash()`
|
||||
- `SHAMapInnerNode::updateHashDeep()`
|
||||
- All `SHAMapLeafNode` subclass hash computations
|
||||
- `SHAMap::flushDirty()` and `walkSubTree()`
|
||||
|
||||
### Why Big Bang is Preferred
|
||||
- **Correctness**: Guarantees Merkle tree integrity
|
||||
- **Simplicity**: No complex dual-hash logic
|
||||
- **Performance**: No overhead of maintaining multiple hashes
|
||||
- **Clear boundaries**: Pre-transition and post-transition are completely separate
|
||||
|
||||
The transition point becomes a hard boundary where the entire ledger state is rehashed with the new algorithm.
|
||||
|
||||
### Alternative: Heterogeneous Tree - Deep Dive
|
||||
|
||||
After deeper analysis, heterogeneous trees are more complex but potentially viable. Here's a comprehensive examination:
|
||||
|
||||
#### The Core Insight: Hash Values Are Algorithm-Agnostic
|
||||
|
||||
When `SHAMapInnerNode::updateHash()` computes a hash:
|
||||
```cpp
|
||||
void updateHash(hash_options const& opts) {
|
||||
sha512_half_hasher h(opts);
|
||||
hash_append(h, HashPrefix::innerNode);
|
||||
iterChildren([&](SHAMapHash const& hh) { hash_append(h, hh); });
|
||||
// Parent hashes the HASH VALUES of children, not the raw data
|
||||
}
|
||||
```
|
||||
|
||||
**Key realization**: Parent nodes hash their children's hash values (256-bit numbers), NOT the children's data. This means a BLAKE3 parent can hash SHA-512 child hashes without issue.
|
||||
|
||||
#### How Heterogeneous Trees Would Work
|
||||
|
||||
Post-transition rule: **Any NEW hash uses BLAKE3**
|
||||
|
||||
```
|
||||
Ledger 19,999,999 (SHA-512):
|
||||
Root_SHA512
|
||||
├── Child1_SHA512
|
||||
└── Child2_SHA512
|
||||
|
||||
Ledger 20,000,000 (BLAKE3) - Child1 modified:
|
||||
Root_BLAKE3 = BLAKE3(Child1_BLAKE3_hash || Child2_SHA512_hash)
|
||||
├── Child1_BLAKE3 (NEW hash due to modification)
|
||||
└── Child2_SHA512 (unchanged, keeps old hash)
|
||||
```
|
||||
|
||||
#### The Canonical Structure Ensures Determinism
|
||||
|
||||
**Critical insight**: SHAMap trees are **canonical** - the structure is deterministic based on keys:
|
||||
- Alice's account always goes in the same tree position
|
||||
- Bob's account always goes in the same position
|
||||
- Tree shape is fully determined by the set of keys
|
||||
|
||||
Therefore:
|
||||
- Same modifications → Same tree structure
|
||||
- Same tree structure → Same nodes get rehashed
|
||||
- Same rehashing → Same final root hash
|
||||
- **Consensus is maintained!**
|
||||
|
||||
#### The "NEW vs OLD" Detection Problem
|
||||
|
||||
The killer issue: How do you know if you're computing a NEW hash vs verifying an OLD one?
|
||||
|
||||
```cpp
|
||||
void updateHash(hash_options const& opts) {
|
||||
// Am I computing a NEW hash (use BLAKE3)?
|
||||
// Or verifying an OLD hash (could be SHA-512)?
|
||||
// This function doesn't know WHY it was called!
|
||||
}
|
||||
```
|
||||
|
||||
Without explicit context about NEW vs OLD:
|
||||
- Loading from DB: Don't know if it's SHA-512 or BLAKE3
|
||||
- Modifying a node: Should use BLAKE3
|
||||
- Verifying from network: Could be either
|
||||
|
||||
Potential solutions:
|
||||
1. **Try-both approach**: Verify with BLAKE3, fallback to SHA-512
|
||||
2. **Version tracking**: Store algorithm version with each node
|
||||
3. **Context passing**: Thread NEW/OLD context through all calls
|
||||
|
||||
#### Canonical Nodes (cowid=0) - A Complication
|
||||
|
||||
Canonical nodes are immutable and shared, BUT:
|
||||
- They ARE verified when loaded from DB or network
|
||||
- The verification needs to compute the hash to check integrity
|
||||
- This means we need to know WHICH algorithm was used
|
||||
- Can't just trust the hash - must verify data matches
|
||||
|
||||
This actually makes heterogeneous trees HARDER because:
|
||||
```cpp
|
||||
// When loading a canonical node from DB:
|
||||
auto node = SHAMapTreeNode::makeFromPrefix(data, hash);
|
||||
// Need to verify: does hash(data) == provided_hash?
|
||||
// But which hash function? SHA-512 or BLAKE3?
|
||||
// Must try both, adding complexity and ambiguity
|
||||
```
|
||||
|
||||
#### System-Wide Implications
|
||||
|
||||
##### Database Layer
|
||||
- Heterogeneous: Same data might have 2 entries (SHA-512 and BLAKE3 versions)
|
||||
- Big Bang: Clean cutover, old entries become invalid
|
||||
|
||||
##### Network Sync
|
||||
- Heterogeneous: Ambiguous - "I need node 0xABC..." (which algorithm?)
|
||||
- Big Bang: Clear - algorithm determined by ledger context
|
||||
|
||||
##### Consensus
|
||||
- Heterogeneous: Works IF all validators make same NEW/OLD decisions
|
||||
- Big Bang: Simple - everyone uses same algorithm
|
||||
|
||||
##### External Proof Verification
|
||||
- Heterogeneous: Complex - mixed algorithms in Merkle paths
|
||||
- Big Bang: Simple - "before ledger X use SHA-512, after use BLAKE3"
|
||||
|
||||
##### Performance
|
||||
- Heterogeneous: Double verification attempts for old nodes
|
||||
- Big Bang: One-time rehash cost
|
||||
|
||||
#### Gradual Migration Pattern
|
||||
|
||||
With heterogeneous trees, migration happens naturally:
|
||||
```
|
||||
Ledger 20,000,000: 5% BLAKE3, 95% SHA512
|
||||
Ledger 20,001,000: 30% BLAKE3, 70% SHA512
|
||||
Ledger 20,010,000: 60% BLAKE3, 40% SHA512
|
||||
Ledger 20,100,000: 90% BLAKE3, 10% SHA512
|
||||
Eventually: ~100% BLAKE3 (dormant nodes may remain SHA-512)
|
||||
```
|
||||
|
||||
#### Remaining Challenges
|
||||
|
||||
1. **Context Plumbing**: Need to distinguish NEW vs OLD operations everywhere
|
||||
2. **Verification Ambiguity**: Failures could be corruption OR wrong algorithm
|
||||
3. **Testing Complexity**: Many more edge cases to test
|
||||
4. **Protocol Complexity**: Merkle proofs need algorithm information
|
||||
5. **Developer Cognitive Load**: Harder to reason about
|
||||
|
||||
### Conclusion: Trade-offs
|
||||
|
||||
**Heterogeneous Trees**:
|
||||
- ✅ No "big bang" transition
|
||||
- ✅ Natural, incremental migration
|
||||
- ✅ Maintains consensus (with careful implementation)
|
||||
- ❌ Permanent complexity throughout codebase
|
||||
- ❌ Ambiguous verification
|
||||
- ❌ Complex testing
|
||||
|
||||
**Big Bang Migration**:
|
||||
- ✅ Clean, simple mental model
|
||||
- ✅ Clear algorithm boundaries
|
||||
- ✅ Easier testing and debugging
|
||||
- ❌ One-time massive performance hit
|
||||
- ❌ Requires careful coordination
|
||||
- ❌ Can't easily roll back
|
||||
|
||||
The heterogeneous approach is **theoretically viable** but adds significant permanent complexity. Big Bang is simpler but has a painful transition. The choice depends on whether you prefer one-time pain (Big Bang) or permanent complexity (heterogeneous).
|
||||
|
||||
## Files Modified So Far
|
||||
1. `src/ripple/protocol/digest.h` - Added hash_options struct and modified sha512Half signatures
|
||||
2. `src/ripple/protocol/impl/Indexes.cpp` - Updated indexHash and all keylet functions to accept hash_options
|
||||
3. `src/ripple/protocol/Indexes.h` - Updated all function declarations to include hash_options
|
||||
|
||||
## Current Status
|
||||
- Core hash functions modified with backward-compatible overloads
|
||||
- All keylet functions updated to require hash_options
|
||||
- Propagating hash_options through codebase - MASSIVE undertaking
|
||||
- 91+ compilation errors remaining after fixing ~20 files
|
||||
- Every fix exposes more errors as headers propagate changes
|
||||
|
||||
## SHAMap Node Factory Methods - Missing Ledger Context
|
||||
|
||||
### The Problem with makeFromWire and makeFromPrefix
|
||||
These factory methods create SHAMap nodes from serialized data but **don't have ledger context**:
|
||||
|
||||
```cpp
|
||||
// Called when receiving nodes from peers over network
|
||||
SHAMapTreeNode::makeFromWire(Slice rawNode)
|
||||
|
||||
// Called when loading nodes from database
|
||||
SHAMapTreeNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash)
|
||||
```
|
||||
|
||||
These methods:
|
||||
- Parse serialized node data (from network or database)
|
||||
- Create `SHAMapInnerNode` or leaf nodes
|
||||
- Need to call `updateHash()` if hash isn't provided
|
||||
- **BUT** don't know which ledger they're building for!
|
||||
|
||||
### Why This Is Critical
|
||||
When a node is loaded from database or received from network:
|
||||
1. The serialized data doesn't include ledger_index
|
||||
2. The node might be shared across multiple ledgers (pre-transition)
|
||||
3. After transition, we need to know if this node uses SHA-512 or BLAKE3
|
||||
4. Currently using `LEDGER_INDEX_UNKNOWN` as placeholder
|
||||
|
||||
### Implications for P2P Protocol
|
||||
The network protocol would need changes:
|
||||
- `TMGetLedger` messages would need to specify ledger_index
|
||||
- Node responses would need hash algorithm version
|
||||
- Database storage would need to track which hash was used
|
||||
|
||||
This reinforces why **Big Bang migration** is simpler - no protocol changes needed!
|
||||
|
||||
## Next Steps
|
||||
1. Add hash_options{0} to all keylet call sites (using 0 as placeholder ledger_index)
|
||||
2. Get the code compiling first
|
||||
3. Later: Thread actual ledger_index values through from Views/transactions
|
||||
4. Eventually: Add Blake3 switching logic based on ledger_index threshold
|
||||
|
||||
## Key Insight
|
||||
Every place that creates a ledger key needs to know what ledger it's operating on. This is a massive change touching:
|
||||
- All View classes
|
||||
- All transaction processors
|
||||
- All RPC handlers
|
||||
- All tests
|
||||
- Consensus code
|
||||
|
||||
## Compilation Strategy
|
||||
NEVER use hash_options{0} - always use proper classification from the HashContext enum in digest.h!
|
||||
|
||||
## Quick Reference for Fixing Test Files
|
||||
|
||||
### Essential Files to Reference
|
||||
1. **@src/ripple/protocol/digest.h** - Lines 37-107 contain the HashContext enum with ALL valid classifiers
|
||||
2. **@src/ripple/protocol/Indexes.h** - Shows all keylet function signatures that need hash_options
|
||||
|
||||
### Keylet Function Mapping to HashContext
|
||||
When you see a keylet function, use the corresponding KEYLET_* enum value:
|
||||
```cpp
|
||||
keylet::account() → KEYLET_ACCOUNT
|
||||
keylet::amendments() → KEYLET_AMENDMENTS
|
||||
keylet::book() → KEYLET_BOOK
|
||||
keylet::check() → KEYLET_CHECK
|
||||
keylet::child() → KEYLET_CHILD
|
||||
keylet::depositPreauth() → KEYLET_DEPOSIT_PREAUTH
|
||||
keylet::dirPage() → KEYLET_DIR_PAGE
|
||||
keylet::emittedDir() → KEYLET_EMITTED_DIR
|
||||
keylet::emittedTxn() → KEYLET_EMITTED_TXN
|
||||
keylet::escrow() → KEYLET_ESCROW
|
||||
keylet::fees() → KEYLET_FEES
|
||||
keylet::hook() → KEYLET_HOOK
|
||||
keylet::hookDefinition() → KEYLET_HOOK_DEFINITION
|
||||
keylet::hookState() → KEYLET_HOOK_STATE
|
||||
keylet::hookStateDir() → KEYLET_HOOK_STATE_DIR
|
||||
keylet::importVLSeq() → KEYLET_IMPORT_VLSEQ
|
||||
keylet::negativeUNL() → KEYLET_NEGATIVE_UNL
|
||||
keylet::nftBuys() → KEYLET_NFT_BUYS
|
||||
keylet::nftOffer() → KEYLET_NFT_OFFER
|
||||
keylet::nftPage() → KEYLET_NFT_PAGE
|
||||
keylet::nftSells() → KEYLET_NFT_SELLS
|
||||
keylet::offer() → KEYLET_OFFER
|
||||
keylet::ownerDir() → KEYLET_OWNER_DIR
|
||||
keylet::payChan() → KEYLET_PAYCHAN
|
||||
keylet::signers() → KEYLET_SIGNERS
|
||||
keylet::skip() → KEYLET_SKIP_LIST
|
||||
keylet::ticket() → KEYLET_TICKET
|
||||
keylet::trustline() → KEYLET_TRUSTLINE
|
||||
keylet::unchecked() → KEYLET_UNCHECKED
|
||||
keylet::UNLReport() → KEYLET_UNL_REPORT
|
||||
keylet::uriToken() → KEYLET_URI_TOKEN
|
||||
```
|
||||
|
||||
### Non-Keylet Hash Classifications
|
||||
```cpp
|
||||
sha512Half() for validator data → VALIDATOR_LIST_HASH
|
||||
sha512Half() for hook code → HOOK_DEFINITION or LEDGER_INDEX_UNNEEDED
|
||||
sha512Half() for signatures → CRYPTO_SIGNATURE_HASH
|
||||
sha512Half() for network protocol → NETWORK_HANDSHAKE_HASH
|
||||
sha512Half_s() for secure hashing → Same rules apply
|
||||
```
|
||||
|
||||
## Key Insights
|
||||
|
||||
### The Scale Problem
|
||||
Every place that creates a ledger key needs to know what ledger it's operating on:
|
||||
- All View classes
|
||||
- All transaction processors (50+ files)
|
||||
- All RPC handlers (30+ files)
|
||||
- All tests (hundreds of files)
|
||||
- Consensus code
|
||||
- Path finding code
|
||||
- Payment processing pipelines
|
||||
|
||||
### Why This Is Hard
|
||||
1. **Cascading changes**: Fixing one header file exposes dozens of new errors
|
||||
2. **Deep call chains**: Ledger index must be threaded through multiple layers
|
||||
3. **Protocol implications**: Network messages would need to include ledger sequences
|
||||
4. **No gradual migration**: Can't mix hash algorithms - it's all or nothing
|
||||
5. **Testing nightmare**: Every test that creates mock ledger objects needs updating
|
||||
|
||||
### The Fundamental Challenge
|
||||
The hash function is so deeply embedded in the architecture that changing it is like replacing the foundation of a building while people are living in it. This is why most blockchains never change their hash functions after launch.
|
||||
|
||||
## Key Learnings - Where to Get ledger_index
|
||||
|
||||
### 1. ReadView/ApplyView Classes
|
||||
- `view.seq()` returns the LedgerIndex (found in ReadView.h:193)
|
||||
- ReadView has `info()` method that returns LedgerInfo struct
|
||||
- LedgerInfo contains `seq` field which is the ledger sequence number
|
||||
- Cast to uint32_t: `static_cast<std::uint32_t>(view.seq())`
|
||||
|
||||
### 2. Common Patterns Found
|
||||
- Functions that take `ReadView const& view` can access `view.seq()`
|
||||
- Functions that take `ApplyView& view` can also access `view.seq()` (ApplyView inherits from ReadView)
|
||||
|
||||
### 3. Files That Need Updates
|
||||
- View.h - DONE - all 6 keylet calls updated to use view.seq()
|
||||
- Any file with keylet:: namespace calls
|
||||
- Transaction processors that create/lookup ledger objects
|
||||
|
||||
### 4. Progress Log
|
||||
|
||||
#### Stats (Updated: 2025-09-09)
|
||||
- Total files with keylet:: calls: 126
|
||||
- Files fixed so far: **100+** (including all core files, transaction processors, RPC handlers, and most tests)
|
||||
- Started with 7 errors, peaked at 105+ as fixes propagate through headers
|
||||
- **Currently down to 113 errors across 11 test files** (from hundreds of files!)
|
||||
- ALL keylet function signatures updated to require hash_options
|
||||
- Pattern emerging: Every transaction processor, every RPC handler, every test needs updating
|
||||
|
||||
#### Major Milestone Achieved
|
||||
- Successfully fixed ALL non-test source files
|
||||
- Fixed majority of test files using parallel agents
|
||||
- Demonstrated that the hash migration IS possible despite being a massive undertaking
|
||||
|
||||
#### Major Files Fixed
|
||||
- All core ledger files (View.cpp, ApplyView.cpp, ReadView.cpp, etc.)
|
||||
- Most transaction processors (Payment, Escrow, CreateOffer, NFToken*, etc.)
|
||||
- Hook implementation files (applyHook.cpp, SetHook.cpp)
|
||||
- Infrastructure files (Transactor.cpp, BookDirs.cpp, Directory.cpp)
|
||||
|
||||
#### Key Milestone
|
||||
- Updated ALL keylet function signatures in Indexes.h/cpp to require hash_options
|
||||
- Even functions that take pre-existing uint256 keys now require hash_options for consistency
|
||||
- This causes massive cascading compilation errors but ensures consistency
|
||||
|
||||
#### Files Fixed So Far
|
||||
1. **View.h** - Fixed using `view.seq()`
|
||||
2. **Ledger.cpp** - Fixed using `info_.seq` in constructor, `seq()` in methods
|
||||
3. **LocalTxs.cpp** - Fixed using `view.seq()`
|
||||
4. **NegativeUNLVote.cpp** - Fixed using `prevLedger->seq()`
|
||||
5. **TxQ.cpp** - Fixed 5 calls using `view.seq()`
|
||||
6. **SkipListAcquire.cpp** - Fixed (with protocol issue on line 90)
|
||||
7. **LedgerReplayMsgHandler.cpp** - Fixed using `info.seq`
|
||||
8. **RCLValidations.cpp** - Fixed using `ledger->seq()`
|
||||
9. **Credit.cpp** - Fixed using `view.seq()`
|
||||
10. **StepChecks.h** - Fixed using `view.seq()`
|
||||
11. **BookTip.cpp** - Fixed using `view.seq()`
|
||||
12. **XRPEndpointStep.cpp** - Fixed using `ctx.view.seq()`
|
||||
13. **DirectStep.cpp** - Fixed using `sb.seq()` and `ctx.view.seq()`
|
||||
14. **BookStep.cpp** - Fixed using `afView.seq()` and `view.seq()`
|
||||
15. **CancelCheck.cpp** - Fixed using `ctx.view.seq()` and `view().seq()`
|
||||
16. **Pathfinder.cpp** - Fixed using `mLedger->seq()`
|
||||
17. More coming...
|
||||
|
||||
#### Common Patterns Found
|
||||
- **ReadView/ApplyView**: Use `.seq()`
|
||||
- **Ledger pointer**: Use `->seq()`
|
||||
- **Transactor classes**: Use `view().seq()`
|
||||
- **PaymentSandbox**: Use `sb.seq()`
|
||||
- **StrandContext**: Use `ctx.view.seq()`
|
||||
- **LedgerInfo struct**: Use `.seq` field directly
|
||||
|
||||
### 5. Architectural Questions
|
||||
|
||||
#### Keylets with pre-existing keys
|
||||
- Functions like `keylet::check(uint256 const& key)` just wrap an existing key
|
||||
- They don't compute a new hash, just interpret the key as a specific ledger type
|
||||
- **Question**: Do these really need hash_options?
|
||||
- **Current approach**: Include hash_options for consistency, might store ledger_index in Keylet for future use
|
||||
- **Note**: This could be revisited - might be unnecessary overhead for simple key wrapping
|
||||
|
||||
### 6. Known Issues
|
||||
- SkipListAcquire.cpp line 90: Requesting skip list by hash without knowing ledger seq yet
|
||||
- Can't know the sequence until we GET the ledger
|
||||
- Using hash_options{0} as placeholder
|
||||
- Would need to refactor to fetch ledger first, THEN request skip list with proper seq
|
||||
- Or protocol change to handle "skip list for whatever ledger this hash is"
|
||||
|
||||
### 7. Network Protocol Has Ledger Sequence!
|
||||
|
||||
**Critical Discovery**: The protobuf definitions show that network messages DO carry ledger sequence in many places:
|
||||
|
||||
#### TMLedgerData (what InboundLedger::processData receives):
|
||||
```protobuf
|
||||
message TMLedgerData {
|
||||
required bytes ledgerHash = 1;
|
||||
required uint32 ledgerSeq = 2; // <-- HAS THE SEQUENCE!
|
||||
required TMLedgerInfoType type = 3;
|
||||
repeated TMLedgerNode nodes = 4;
|
||||
}
|
||||
```
|
||||
|
||||
#### TMGetLedger (the request):
|
||||
```protobuf
|
||||
message TMGetLedger {
|
||||
optional uint32 ledgerSeq = 4; // Can request by sequence
|
||||
}
|
||||
```
|
||||
|
||||
#### TMIndexedObject (per-object context):
|
||||
```protobuf
|
||||
message TMIndexedObject {
|
||||
optional uint32 ledgerSeq = 5; // Per-object sequence!
|
||||
}
|
||||
```
|
||||
|
||||
**Implications**:
|
||||
- The protocol already has infrastructure for ledger context
|
||||
- `InboundLedger` can use `packet.ledgerseq()` from TMLedgerData
|
||||
- Network sync might be solvable WITHOUT major protocol changes
|
||||
- The ledger sequence just needs to be threaded through to hash functions
|
||||
|
||||
**Key Flow**:
|
||||
```cpp
|
||||
InboundLedger::processData(protocol::TMLedgerData& packet)
|
||||
packet.ledgerseq() // <-- Extract sequence from protocol message
|
||||
<- SHAMap::addKnownNode(..., ledgerSeq)
|
||||
<- SHAMapTreeNode::makeFromWire(data, ledgerSeq)
|
||||
<- updateHash(hash_options{ledgerSeq})
|
||||
```
|
||||
|
||||
This solves a major piece of the puzzle - the network layer CAN provide context for hash verification!
|
||||
|
||||
### 8. Test File Patterns
|
||||
|
||||
**Key Discovery**: Tests should use actual ledger sequences, NOT placeholders!
|
||||
|
||||
#### Getting Ledger Sequence in Tests
|
||||
Tests typically use `test::jtx::Env` which provides access to ledger context:
|
||||
- `env.current()` - Returns a ReadView pointer
|
||||
- `env.current()->seq()` - Gets the current ledger sequence (already uint32_t)
|
||||
|
||||
#### Common Test Patterns
|
||||
|
||||
##### Pattern 1: Direct keylet calls
|
||||
```cpp
|
||||
// OLD
|
||||
env.le(keylet::line(alice, bob, currency));
|
||||
|
||||
// NEW
|
||||
env.le(keylet::line(hash_options{env.current()->seq(), KEYLET_TRUSTLINE}, alice, bob, currency));
|
||||
```
|
||||
|
||||
##### Pattern 2: Helper functions need env parameter
|
||||
```cpp
|
||||
// OLD
|
||||
static uint256 getCheckIndex(AccountID const& account, uint32_t seq) {
|
||||
return keylet::check(account, seq).key;
|
||||
}
|
||||
// Called as: getCheckIndex(alice, seq)
|
||||
|
||||
// NEW
|
||||
static uint256 getCheckIndex(test::jtx::Env& env, AccountID const& account, uint32_t seq) {
|
||||
return keylet::check(hash_options{env.current()->seq(), KEYLET_CHECK}, account, seq).key;
|
||||
}
|
||||
// Called as: getCheckIndex(env, alice, seq)
|
||||
```
|
||||
|
||||
##### Pattern 3: Fee calculations
|
||||
```cpp
|
||||
// Uses env.current() to get fee information
|
||||
XRPAmount const baseFeeDrops{env.current()->fees().base};
|
||||
```
|
||||
|
||||
#### Test Files Status
|
||||
- **Total test files needing fixes**: ~12-15
|
||||
- **Pattern**: All test files that create or look up ledger objects need updating
|
||||
- **Common test files**:
|
||||
- Check_test.cpp - PARTIALLY FIXED
|
||||
- AccountDelete_test.cpp
|
||||
- Escrow_test.cpp
|
||||
- NFToken_test.cpp
|
||||
- Flow_test.cpp
|
||||
- Import_test.cpp
|
||||
- etc.
|
||||
|
||||
#### Why NOT to use placeholders in tests
|
||||
- Tests verify actual ledger behavior
|
||||
- Using `hash_options{0}` would test wrong behavior after transition
|
||||
- Tests need to work both pre and post hash migration
|
||||
- `env.current()->seq()` gives the actual test ledger sequence
|
||||
|
||||
### 6. Ways to Get Ledger Sequence
|
||||
|
||||
#### From Application (app_):
|
||||
- `app_.getLedgerMaster()` gives you LedgerMaster
|
||||
|
||||
#### From LedgerMaster:
|
||||
- `getLedgerByHash(hash)` - Get ledger by hash, then call `->seq()` on it
|
||||
- `getLedgerBySeq(index)` - Get ledger by sequence directly
|
||||
- `getCurrentLedger()` - Current open ledger, call `->seq()`
|
||||
- `getClosedLedger()` - Last closed ledger, call `->seq()`
|
||||
- `getValidatedLedger()` - Last validated ledger, call `->seq()`
|
||||
- `getPublishedLedger()` - Last published ledger, call `->seq()`
|
||||
- `getCurrentLedgerIndex()` - Direct sequence number
|
||||
- `getValidLedgerIndex()` - Direct sequence number
|
||||
- `walkHashBySeq()` - Walk ledger chain to find hash by sequence
|
||||
|
||||
#### From Ledger object:
|
||||
- `ledger->seq()` - Direct method
|
||||
- `ledger->info().seq` - Through LedgerInfo
|
||||
|
||||
#### From ReadView/OpenView/ApplyView:
|
||||
- `view.seq()` - All views have this method
|
||||
- `view.info().seq` - Through LedgerInfo
|
||||
|
||||
#### Special Case - SkipListAcquire:
|
||||
- Line 67: `app_.getLedgerMaster().getLedgerByHash(hash_)`
|
||||
- If ledger exists locally, we get it and can use its seq()
|
||||
- If not, we're requesting it from peers - don't know seq yet!
|
||||
@@ -1,108 +0,0 @@
|
||||
# Last Testament: SHA-512 to BLAKE3 Migration Deep Dive
|
||||
|
||||
## The Journey
|
||||
Started with "just change the hash function" - ended up discovering why hash functions are permanent blockchain decisions.
|
||||
|
||||
## Key Discoveries
|
||||
|
||||
### 1. Ledger Implementation Added
|
||||
- `Ledger::shouldMigrateToBlake3()` - checks migration window/flags
|
||||
- `Ledger::migrateToBlake3()` - placeholder for actual migration
|
||||
- Hook in `BuildLedger.cpp` after transaction processing
|
||||
- Migration happens OUTSIDE transaction pipeline to avoid metadata explosion
|
||||
|
||||
### 2. The Rekeying Nightmare (REKEYING_ISSUES.md)
|
||||
Every object key changes SHA512→BLAKE3, but keys are EVERYWHERE:
|
||||
- **DirectoryNode.sfIndexes** - vectors of keys
|
||||
- **sfPreviousTxnID, sfIndexNext, sfIndexPrevious** - direct key refs
|
||||
- **Order books** - sorted by key value (order changes!)
|
||||
- **Hook state** - arbitrary blobs with embedded keys
|
||||
- **620k objects** with millions of interconnected references
|
||||
|
||||
### 3. The LUT Approach
|
||||
```cpp
|
||||
// Build lookup table: old_key → new_key (40MB for 620k entries)
|
||||
// O(n) to build, O(n×m) to update all fields
|
||||
// Must check EVERY uint256 field in EVERY object
|
||||
```
|
||||
**Problems:**
|
||||
- LUT check on EVERY lookup forever (performance tax)
|
||||
- Can't know if key is old/new without checking
|
||||
- Might need bidirectional LUT (80MB)
|
||||
- Can NEVER remove it (WASM has hardcoded keys!)
|
||||
|
||||
### 4. The WASM Hook Bomb
|
||||
Hook code can hardcode keys in compiled WASM:
|
||||
- Can't modify without changing hook hash
|
||||
- Changing hash breaks all references to hook
|
||||
- Literally unfixable without breaking hooks
|
||||
|
||||
### 5. MapStats Enhancement
|
||||
Added ledger entry type tracking:
|
||||
- Count, total bytes, avg size per type
|
||||
- Uses `LedgerFormats::getInstance().findByType()` for names
|
||||
- Shows 124k HookState entries (potential key references!)
|
||||
|
||||
### 6. Current Ledger Stats
|
||||
- 620k total objects, 98MB total
|
||||
- 117k DirectoryNodes (full of references)
|
||||
- 124k HookState entries (arbitrary data)
|
||||
- 80 HookDefinitions (WASM code)
|
||||
- Millions of internal key references
|
||||
|
||||
## Why It's Impossible
|
||||
|
||||
### The Lookup Problem
|
||||
```cpp
|
||||
auto key = keylet::account(Alice).key;
|
||||
// Which hash function? SHA512 or BLAKE3?
|
||||
// NO WAY TO KNOW without timestamp/LUT/double-lookup
|
||||
```
|
||||
|
||||
### The Fundamental Issues
|
||||
1. **No key timestamp** - Can't tell if key is pre/post migration
|
||||
2. **Embedded references everywhere** - sfIndexes, hook state, WASM code
|
||||
3. **Permanent LUT required** - Check on every operation forever
|
||||
4. **Performance death** - 2x lookups or LUT check on everything
|
||||
5. **Dual-key SHAMap impossible** - Breaks ordering/structure
|
||||
|
||||
### The "Solutions" That Don't Work
|
||||
- **Lazy migration**: LUT forever, complexity forever
|
||||
- **Big bang**: Still need permanent LUT for old references
|
||||
- **Heterogeneous tree**: Can't mix hash functions in Merkle tree
|
||||
- **Binary search-replace**: Could corrupt data, no validation
|
||||
- **Import to v2 chain**: Same reference update problems
|
||||
|
||||
## The Verdict
|
||||
|
||||
After threading ledger_index through 1000+ functions for hash context, the migration faces insurmountable challenges:
|
||||
|
||||
1. **WASM hooks** contain unfixable hardcoded keys
|
||||
2. **Every lookup** needs LUT check forever (performance tax)
|
||||
3. **Can't determine key age** from identifier alone
|
||||
4. **Millions of references** need perfect updating
|
||||
5. **One mistake** = corrupted ledger, lost funds
|
||||
|
||||
**Conclusion**: This is a "v2 blockchain" problem, not a migration problem. SHA-512 is forever.
|
||||
|
||||
## Lessons Learned
|
||||
|
||||
- Hash functions are THE fundamental addressing system
|
||||
- Deeper than consensus, deeper than data structures
|
||||
- Once chosen, essentially permanent
|
||||
- The attempt revealed the true complexity of blockchain internals
|
||||
- Even 98MB of data has millions of interconnected references
|
||||
- WASM hooks make any migration effectively impossible
|
||||
|
||||
**Final Status**: Technically possible with permanent LUT and massive complexity. Practically impossible due to hooks, performance, and risk. SHA-512 until heat death of universe.
|
||||
|
||||
## Code Artifacts
|
||||
|
||||
- Hash classification system in `digest.h`
|
||||
- `hash_options` threaded through Indexes/keylets
|
||||
- Ledger migration methods (placeholder)
|
||||
- MapStats with entry type breakdown
|
||||
- REKEYING_ISSUES.md with full analysis
|
||||
- 100+ files modified to thread ledger context
|
||||
|
||||
The migration died not from lack of effort, but from the discovery that some architectural decisions are truly permanent.
|
||||
@@ -1,71 +0,0 @@
|
||||
Yes! You’re onto something quite elegant there. Let me break it down:
|
||||
|
||||
---
|
||||
|
||||
### 🔑 LedgerEntry “PriorKey” field
|
||||
|
||||
If each SLE carried an **optional `LedgerIndexPriorSha512`** (or whatever name), then:
|
||||
|
||||
* When you rekey at cutover, you don’t lose continuity.
|
||||
* Anyone walking the ledger can trivially build the old→new LUT on the fly.
|
||||
* For “day-to-day” operations, the node only needs the canonical keys.
|
||||
|
||||
This basically lets you say:
|
||||
|
||||
* **Canonical view** = new Blake3 keys.
|
||||
* **Continuity view** = traverse new keys, but if you need to resolve old references, follow the `LedgerIndexPriorSha512` link.
|
||||
|
||||
---
|
||||
|
||||
### 📊 For analysis and data science
|
||||
|
||||
You can now reconstruct historical continuity without keeping a giant external LUT file. Just walk the ledger and collect `(old, new)` pairs from the entries.
|
||||
|
||||
This means explorers, forensic tools, compliance, etc. can:
|
||||
|
||||
* Map old keys to new at any point.
|
||||
* Handle partially migrated networks (some ledgers pre-cutover, some post).
|
||||
|
||||
---
|
||||
|
||||
### 🟢 Operational benefits
|
||||
|
||||
* Validators don’t need to bloat memory with a global LUT — it’s **distributed inside the ledger itself**.
|
||||
* New ledgers “just work” for CRUD.
|
||||
* Old references (hooks, weird integrations) can be handled until people migrate.
|
||||
|
||||
---
|
||||
|
||||
### ⚠️ Caveats
|
||||
|
||||
1. **Storage overhead**: each SLE gets \~32 bytes extra. But given only \~600k objects in the state tree, that’s trivial compared to the win.
|
||||
2. **Hardcoded keys in hooks/wasm**: still a landmine — if someone has *literally baked in* the old canonical key, they’ll need the LUT or breakage handling at ApplyView.
|
||||
3. **Consensus rules**: adding this field changes serialization — so it’s an amendment, not just an operator convenience.
|
||||
|
||||
---
|
||||
|
||||
### 🔄 Proposal workflow
|
||||
|
||||
1. Cutover ledger → during bookkeeping, every object is rekeyed into the new canonical tree.
|
||||
2. At the same time, each new object gets `LedgerIndexPriorSha512 = old_key`.
|
||||
3. From then on, **every ledger post-cutover contains the LUT implicitly**.
|
||||
4. After some deprecation period, once nobody references `LedgerIndexPriorSha512`, you can prune support.
|
||||
|
||||
---
|
||||
|
||||
So in practice:
|
||||
|
||||
* **Validators** only care about the canonical map.
|
||||
* **Explorers, auditors, backward-compat systems** can lazily reconstruct the LUT.
|
||||
* You don’t poison `succ()` or iteration with mixed domains.
|
||||
|
||||
---
|
||||
|
||||
This actually solves both your goals:
|
||||
|
||||
* Day-to-day simplicity (Blake-only canonical).
|
||||
* Historical continuity (walk the field if you need).
|
||||
|
||||
---
|
||||
|
||||
Want me to sketch how `ReadView::read(Keylet)` could transparently try `LUT` lookups only if the canonical miss happens, using that new field? That would make it zero-effort for old call sites.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
|
||||
**Note:** Throughout this README, references to "we" or "our" pertain to the community and contributors involved in the Xahau network. It does not imply a legal entity or a specific collection of individuals.
|
||||
|
||||
[Xahau](https://xahau.network/) is a decentralized cryptographic ledger that builds upon the robust foundation of the XRP Ledger. It inherits the XRP Ledger's Byzantine Fault Tolerant consensus algorithm and enhances it with additional features and functionalities. Developers and users familiar with the XRP Ledger will find that most documentation and tutorials available on [xrpl.org](https://xrpl.org) are relevant and applicable to Xahau, including those related to running validators and managing validator keys. For Xahau specific documentation you can visit our [documentation](https://docs.xahau.network/)
|
||||
[Xahau](https://xahau.network/) is a decentralized cryptographic ledger that builds upon the robust foundation of the XRP Ledger. It inherits the XRP Ledger's Byzantine Fault Tolerant consensus algorithm and enhances it with additional features and functionalities. Developers and users familiar with the XRP Ledger will find that most documentation and tutorials available on [xrpl.org](https://xrpl.org) are relevant and applicable to Xahau, including those related to running validators and managing validator keys. For Xahau specific documentation you can visit our [documentation](https://xahau.network/)
|
||||
|
||||
## XAH
|
||||
XAH is the public, counterparty-free asset native to Xahau and functions primarily as network gas. Transactions submitted to the Xahau network must supply an appropriate amount of XAH, to be burnt by the network as a fee, in order to be successfully included in a validated ledger. In addition, XAH also acts as a bridge currency within the Xahau DEX. XAH is traded on the open-market and is available for anyone to access. Xahau was created in 2023 with a supply of 600 million units of XAH.
|
||||
@@ -12,7 +12,7 @@ The server software that powers Xahau is called `xahaud` and is available in thi
|
||||
|
||||
### Build from Source
|
||||
|
||||
* [Read the build instructions in our documentation](https://docs.xahau.network/infrastructure/building-xahau)
|
||||
* [Read the build instructions in our documentation](https://xahau.network/infrastructure/building-xahau)
|
||||
* If you encounter any issues, please [open an issue](https://github.com/xahau/xahaud/issues)
|
||||
|
||||
## Highlights of Xahau
|
||||
@@ -58,7 +58,7 @@ git-subtree. See those directories' README files for more details.
|
||||
|
||||
- **Documentation**: Documentation for XRPL, Xahau and Hooks.
|
||||
- [Xrpl Documentation](https://xrpl.org)
|
||||
- [Xahau Documentation](https://docs.xahau.network/)
|
||||
- [Xahau Documentation](https://xahau.network/)
|
||||
- [Hooks Technical Documentation](https://xrpl-hooks.readme.io/)
|
||||
- **Explorers**: Explore the Xahau ledger using various explorers:
|
||||
- [xahauexplorer.com](https://xahauexplorer.com)
|
||||
|
||||
@@ -1,332 +0,0 @@
|
||||
# Hash Migration Rekeying Issues
|
||||
|
||||
## The Fundamental Problem
|
||||
|
||||
When migrating from SHA-512 Half to BLAKE3, we're not just changing a hash function - we're changing the **keys** that identify every object in the ledger's state map. Since the hash IS the key in the SHAMap, every object needs a new address.
|
||||
|
||||
## What Needs to be Rekeyed
|
||||
|
||||
### 1. Primary Ledger Objects
|
||||
Every SLE (STLedgerEntry) in the state map has its key computed from:
|
||||
- Its type (Account, Offer, RippleState, etc.)
|
||||
- Its identifying data (AccountID, offer sequence, etc.)
|
||||
|
||||
When we change the hash function, EVERY object gets a new key.
|
||||
|
||||
### 2. Directory Structures
|
||||
Directories are ledger objects that contain **lists of other objects' keys**:
|
||||
|
||||
#### Owner Directories (`keylet::ownerDir`)
|
||||
- Contains keys of all objects owned by an account
|
||||
- Every offer, escrow, check, payment channel, etc. key stored here
|
||||
- When those objects are rekeyed, ALL these references must be updated
|
||||
|
||||
#### Order Book Directories (`keylet::book`, `keylet::quality`)
|
||||
- Contains keys of offers at specific quality levels
|
||||
- All offer keys must be updated to their new BLAKE3 values
|
||||
|
||||
#### NFT Directories (`keylet::nft_buys`, `keylet::nft_sells`)
|
||||
- Contains keys of NFT offers
|
||||
- All NFT offer keys must be updated
|
||||
|
||||
#### Hook State Directories (`keylet::hookStateDir`)
|
||||
- Contains keys of hook state entries
|
||||
- All hook state keys must be updated
|
||||
|
||||
### 3. Cross-References Between Objects
|
||||
Many objects contain direct references to other objects:
|
||||
|
||||
#### Account Objects
|
||||
- `sfNFTokenPage` - References to NFT page keys
|
||||
- Previous/Next links in directory pages
|
||||
|
||||
#### Directory Pages
|
||||
- `sfIndexes` - Vector of object keys
|
||||
- `sfPreviousTxnID` - Transaction hash references
|
||||
- `sfIndexPrevious`/`sfIndexNext` - Links to other directory pages
|
||||
|
||||
#### NFT Pages
|
||||
- References to previous/next pages in the chain
|
||||
|
||||
## The Cascade Effect
|
||||
|
||||
Re-keying isn't a simple one-pass operation:
|
||||
|
||||
```
|
||||
1. Rekey Account A (SHA512 → BLAKE3)
|
||||
↓
|
||||
2. Update Account A's owner directory
|
||||
↓
|
||||
3. Rekey the owner directory itself
|
||||
↓
|
||||
4. Update all objects IN the directory with new keys
|
||||
↓
|
||||
5. Update any directories THOSE objects appear in
|
||||
↓
|
||||
6. Continue cascading...
|
||||
```
|
||||
|
||||
## Implementation Challenges
|
||||
|
||||
### Challenge 1: Directory Entry Updates
|
||||
```cpp
|
||||
// Current directory structure
|
||||
STVector256 indexes = directory->getFieldV256(sfIndexes);
|
||||
// Contains: [sha512_key1, sha512_key2, sha512_key3, ...]
|
||||
|
||||
// After migration needs to be:
|
||||
// [blake3_key1, blake3_key2, blake3_key3, ...]
|
||||
```
|
||||
|
||||
### Challenge 2: Finding All References
|
||||
There's no reverse index - given an object's key, you can't easily find all directories that reference it. You'd need to:
|
||||
1. Walk the entire state map
|
||||
2. Check every directory's `sfIndexes` field
|
||||
3. Update any matching keys
|
||||
|
||||
### Challenge 3: Maintaining Consistency
|
||||
During migration, you need to ensure:
|
||||
- No orphaned references (keys pointing to non-existent objects)
|
||||
- No duplicate entries
|
||||
- Proper ordering in sorted structures (offer books)
|
||||
|
||||
### Challenge 4: Page Links
|
||||
Directory pages link to each other:
|
||||
```cpp
|
||||
uint256 prevPage = dir->getFieldU256(sfIndexPrevious);
|
||||
uint256 nextPage = dir->getFieldU256(sfIndexNext);
|
||||
```
|
||||
These links are also keys that need updating!
|
||||
|
||||
## Why This Makes Migration Complex
|
||||
|
||||
### Option A: Big Bang Migration
|
||||
- Must update EVERYTHING atomically
|
||||
- Need to track old→new key mappings for entire ledger
|
||||
- Memory requirements: ~2x the state size for mapping table
|
||||
- Risk: Any missed reference breaks the ledger
|
||||
|
||||
### Option B: Heterogeneous Tree
|
||||
- Old nodes keep SHA-512 keys
|
||||
- New/modified nodes use BLAKE3
|
||||
- Problem: How do you know which hash to use for lookups?
|
||||
- Problem: Directory contains mix of old and new keys?
|
||||
|
||||
### Option C: Double Storage
|
||||
- Store objects under BOTH keys temporarily
|
||||
- Gradually migrate references
|
||||
- Problem: Massive storage overhead
|
||||
- Problem: Synchronization between copies
|
||||
|
||||
## Example: Rekeying an Offer
|
||||
|
||||
Consider rekeying a single offer:
|
||||
|
||||
1. **The Offer Itself**
|
||||
- Old key: `sha512Half(OFFER, account, sequence)`
|
||||
- New key: `blake3(OFFER, account, sequence)`
|
||||
|
||||
2. **Owner Directory**
|
||||
- Must update `sfIndexes` to replace old offer key with new
|
||||
|
||||
3. **Order Book Directory**
|
||||
- Must update `sfIndexes` in the quality directory
|
||||
- May need to update multiple quality levels if offer moved
|
||||
|
||||
4. **Account Object**
|
||||
- Update offer count/reserve tracking if needed
|
||||
|
||||
5. **The Directories Themselves**
|
||||
- Owner directory key: `sha512Half(OWNER_DIR, account, page)`
|
||||
- New key: `blake3(OWNER_DIR, account, page)`
|
||||
- Order book key: `sha512Half(BOOK_DIR, ...)`
|
||||
- New key: `blake3(BOOK_DIR, ...)`
|
||||
|
||||
## Potential Solutions
|
||||
|
||||
### 1. Migration Ledger Object
|
||||
Create a temporary "migration map" ledger object:
|
||||
```cpp
|
||||
sfOldKey → sfNewKey mappings
|
||||
```
|
||||
But this could be gigabytes for millions of objects.
|
||||
|
||||
### 2. Deterministic Rekeying
|
||||
Since we can determine an object's type from its `LedgerEntryType`, we could:
|
||||
1. Load each SLE
|
||||
2. Determine its type
|
||||
3. Recompute its key with BLAKE3
|
||||
4. Track the mapping
|
||||
|
||||
But we still need to update all references.
|
||||
|
||||
### 3. Lazy Migration
|
||||
Only rekey objects when they're modified:
|
||||
- Pro: Spreads migration over time
|
||||
- Con: Permanent complexity in codebase
|
||||
- Con: Must support both hash types forever
|
||||
|
||||
### 4. New State Structure
|
||||
Instead of migrating in-place, build an entirely new state map:
|
||||
1. Create new empty BLAKE3 SHAMap
|
||||
2. Walk old map, inserting with new keys
|
||||
3. Update all references during copy
|
||||
4. Atomically swap maps
|
||||
|
||||
This is essentially what BUILD_LEDGER.md suggests, but the reference updating remains complex.
|
||||
|
||||
## The Lookup Table Approach
|
||||
|
||||
After further analysis, a lookup table (LUT) based approach might actually be feasible:
|
||||
|
||||
### Algorithm Overview
|
||||
|
||||
#### Phase 1: Build the LUT (O(n))
|
||||
```cpp
|
||||
std::unordered_map<uint256, uint256> old_to_new;
|
||||
|
||||
stateMap_.visitLeaves([&](SHAMapItem const& item) {
|
||||
SerialIter sit(item.slice());
|
||||
auto sle = std::make_shared<SLE>(sit, item.key());
|
||||
|
||||
// Determine type from the SLE
|
||||
LedgerEntryType type = sle->getType();
|
||||
|
||||
// Recompute key with BLAKE3 based on type
|
||||
uint256 newKey = computeBlake3Key(sle, type);
|
||||
old_to_new[item.key()] = newKey;
|
||||
});
|
||||
// Results in ~620k entries in the LUT
|
||||
```
|
||||
|
||||
#### Phase 2: Update ALL uint256 Fields (O(n × m))
|
||||
Walk every object and check every uint256 field against the LUT:
|
||||
|
||||
```cpp
|
||||
stateMap_.visitLeaves([&](SHAMapItem& item) {
|
||||
SerialIter sit(item.slice());
|
||||
auto sle = std::make_shared<SLE>(sit, item.key());
|
||||
bool modified = false;
|
||||
|
||||
// Check every possible uint256 field
|
||||
modified |= updateField(sle, sfPreviousTxnID, old_to_new);
|
||||
modified |= updateField(sle, sfIndexPrevious, old_to_new);
|
||||
modified |= updateField(sle, sfIndexNext, old_to_new);
|
||||
modified |= updateField(sle, sfBookNode, old_to_new);
|
||||
|
||||
// Vector fields
|
||||
modified |= updateVector(sle, sfIndexes, old_to_new);
|
||||
modified |= updateVector(sle, sfHashes, old_to_new);
|
||||
modified |= updateVector(sle, sfAmendments, old_to_new);
|
||||
modified |= updateVector(sle, sfNFTokenOffers, old_to_new);
|
||||
|
||||
if (modified) {
|
||||
// Re-serialize with updated references
|
||||
Serializer s;
|
||||
sle->add(s);
|
||||
// Create new item with new key
|
||||
item = make_shamapitem(old_to_new[item.key()], s.slice());
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Complexity Analysis
|
||||
- **Phase 1**: O(n) where n = number of objects (~620k)
|
||||
- **Phase 2**: O(n × m) where m = average fields per object
|
||||
- **Hash lookups**: O(1) average case
|
||||
- **Total**: Linear in the number of objects!
|
||||
|
||||
### Memory Requirements
|
||||
- LUT size: 620k entries × (32 bytes + 32 bytes) = ~40 MB
|
||||
- Reasonable to keep in memory during migration
|
||||
|
||||
### Implementation Challenges
|
||||
|
||||
#### 1. Comprehensive Field Coverage
|
||||
Must check EVERY field that could contain a key:
|
||||
```cpp
|
||||
// Singleton uint256 fields
|
||||
sfPreviousTxnID, sfIndexPrevious, sfIndexNext, sfBookNode,
|
||||
sfNFTokenID, sfEmitParentTxnID, sfHookOn, sfHookStateKey...
|
||||
|
||||
// Vector256 fields
|
||||
sfIndexes, sfHashes, sfAmendments, sfNFTokenOffers,
|
||||
sfHookNamespaces, sfURITokenIDs...
|
||||
|
||||
// Nested structures
|
||||
STArray fields containing STObjects with uint256 fields
|
||||
```
|
||||
|
||||
#### 2. False Positive Risk
|
||||
Any uint256 that happens to match a key would be updated:
|
||||
- Could corrupt data if a non-key field matches
|
||||
- Mitigation: Only update known reference fields
|
||||
- Risk: Missing custom fields added by hooks
|
||||
|
||||
#### 3. Order Book Sorting
|
||||
Order books are sorted by key value. After rekeying:
|
||||
- Sort order changes completely
|
||||
- Need to rebuild book directories
|
||||
- Quality levels might shift
|
||||
|
||||
### Alternative: Persistent Migration Map
|
||||
|
||||
Instead of one-time migration, store the mapping permanently:
|
||||
|
||||
```cpp
|
||||
// Special ledger entries (one per ~1000 mappings)
|
||||
MigrationMap_0000: {
|
||||
sfOldKeys: [old_hash_0, old_hash_1, ...],
|
||||
sfNewKeys: [new_hash_0, new_hash_1, ...]
|
||||
}
|
||||
MigrationMap_0001: { ... }
|
||||
// ~620 of these objects
|
||||
```
|
||||
|
||||
Pros:
|
||||
- Can verify historical references
|
||||
- Debugging is easier
|
||||
- Can be pruned later if needed
|
||||
|
||||
Cons:
|
||||
- Permanent state bloat (~40MB)
|
||||
- Must be loaded on every node forever
|
||||
- Lookup overhead for historical operations
|
||||
|
||||
### The Nuclear Option: Binary Search-Replace
|
||||
|
||||
For maximum chaos (don't actually do this):
|
||||
```cpp
|
||||
// Build LUT
|
||||
std::map<std::array<uint8_t, 32>, std::array<uint8_t, 32>> binary_lut;
|
||||
|
||||
// Scan serialized blobs and replace
|
||||
for (auto& node : shamap) {
|
||||
auto data = node.getData();
|
||||
for (size_t i = 0; i <= data.size() - 32; i++) {
|
||||
if (binary_lut.count(data[i..i+31])) {
|
||||
memcpy(&data[i], binary_lut[data[i..i+31]], 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Why this is insane:
|
||||
- False positives would corrupt data
|
||||
- No validation of what you're replacing
|
||||
- Breaks all checksums and signatures
|
||||
- Impossible to debug when it goes wrong
|
||||
|
||||
## Conclusion
|
||||
|
||||
The rekeying problem is not just about changing hash functions - it's about maintaining referential integrity across millions of interlinked objects. Every key change cascades through the reference graph, making this one of the most complex migrations possible in a blockchain system.
|
||||
|
||||
The lookup table approach makes it algorithmically feasible (linear time rather than quadratic), but the implementation complexity and risk remain enormous. You need to:
|
||||
1. Find every single field that could contain a key
|
||||
2. Update them all correctly
|
||||
3. Handle sorting changes in order books
|
||||
4. Avoid false positives
|
||||
5. Deal with custom fields from hooks
|
||||
6. Maintain consistency across the entire state
|
||||
|
||||
This is likely why most blockchains never change their hash functions after genesis - even with an efficient algorithm, the complexity and risk are enormous.
|
||||
@@ -1,179 +0,0 @@
|
||||
# Test Files That Need hash_options Fixes
|
||||
|
||||
## How to Check Compilation Errors
|
||||
|
||||
Use the `compile_single_v2.py` script to check individual files:
|
||||
```bash
|
||||
# Check compilation errors for a specific file
|
||||
./compile_single_v2.py src/test/app/SomeFile_test.cpp -e 3 --errors-only
|
||||
|
||||
# Get just the last few lines to see if it compiled successfully
|
||||
./compile_single_v2.py src/test/app/SomeFile_test.cpp 2>&1 | tail -5
|
||||
```
|
||||
|
||||
## Originally Fixed Files (11 files)
|
||||
|
||||
1. **src/test/app/Import_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
2. **src/test/app/LedgerReplay_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
3. **src/test/app/Offer_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
4. **src/test/app/SetHook_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
5. **src/test/app/SetHookTSH_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
6. **src/test/app/ValidatorList_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
7. **src/test/app/XahauGenesis_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options, keylet::fees() needs hash_options
|
||||
|
||||
8. **src/test/consensus/NegativeUNL_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
9. **src/test/consensus/UNLReport_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
10. **src/test/jtx/impl/balance.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
11. **src/test/jtx/impl/Env.cpp**
|
||||
- Status: Needs fixing
|
||||
- Errors: keylet functions missing hash_options
|
||||
|
||||
## Fix Strategy
|
||||
|
||||
Each file needs:
|
||||
1. keylet function calls updated to include hash_options{ledger_seq, classifier}
|
||||
2. The ledger_seq typically comes from env.current()->seq() or view.seq()
|
||||
3. The classifier matches the keylet type (e.g., KEYLET_ACCOUNT, KEYLET_FEES, etc.)
|
||||
|
||||
## Progress Tracking
|
||||
|
||||
- [x] Import_test.cpp - FIXED
|
||||
- [x] LedgerReplay_test.cpp - FIXED
|
||||
- [x] Offer_test.cpp - FIXED
|
||||
- [x] SetHook_test.cpp - FIXED
|
||||
- [x] SetHookTSH_test.cpp - FIXED
|
||||
- [x] ValidatorList_test.cpp - FIXED (sha512Half calls updated with VALIDATOR_LIST_HASH classifier)
|
||||
- [x] XahauGenesis_test.cpp - FIXED (removed duplicate hash_options parameters)
|
||||
- [x] NegativeUNL_test.cpp - FIXED
|
||||
- [x] UNLReport_test.cpp - FIXED
|
||||
- [x] balance.cpp - FIXED
|
||||
- [x] Env.cpp - FIXED
|
||||
|
||||
## All original 11 files have been successfully fixed!
|
||||
|
||||
## Remaining Files Still Needing Fixes (9 files)
|
||||
|
||||
### Status: NOT STARTED
|
||||
These files still have compilation errors and need hash_options fixes:
|
||||
|
||||
1. **src/test/jtx/impl/uritoken.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/jtx/impl/uritoken.cpp -e 3 --errors-only`
|
||||
|
||||
2. **src/test/jtx/impl/utility.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/jtx/impl/utility.cpp -e 3 --errors-only`
|
||||
|
||||
3. **src/test/ledger/Directory_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/ledger/Directory_test.cpp -e 3 --errors-only`
|
||||
|
||||
4. **src/test/ledger/Invariants_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/ledger/Invariants_test.cpp -e 3 --errors-only`
|
||||
|
||||
5. **src/test/overlay/compression_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/overlay/compression_test.cpp -e 3 --errors-only`
|
||||
|
||||
6. **src/test/rpc/AccountSet_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/rpc/AccountSet_test.cpp -e 3 --errors-only`
|
||||
|
||||
7. **src/test/rpc/AccountTx_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/rpc/AccountTx_test.cpp -e 3 --errors-only`
|
||||
|
||||
8. **src/test/rpc/Book_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/rpc/Book_test.cpp -e 3 --errors-only`
|
||||
|
||||
9. **src/test/rpc/Catalogue_test.cpp**
|
||||
- Status: Needs fixing
|
||||
- Check errors: `./compile_single_v2.py src/test/rpc/Catalogue_test.cpp -e 3 --errors-only`
|
||||
|
||||
## CRITICAL INSTRUCTIONS FOR FIXING
|
||||
|
||||
### 1. Read the HashContext enum from digest.h
|
||||
**ALWAYS** check @src/ripple/protocol/digest.h lines 37-107 for the complete HashContext enum.
|
||||
This enum defines ALL the valid classifiers you can use in hash_options.
|
||||
|
||||
### 2. Understanding hash_options constructor
|
||||
The hash_options struct (lines 110-126 in digest.h) has TWO constructors:
|
||||
- `hash_options(HashContext ctx)` - classifier only, no ledger index
|
||||
- `hash_options(std::uint32_t li, HashContext ctx)` - ledger index AND classifier
|
||||
|
||||
### 3. How to classify each hash operation
|
||||
|
||||
#### For keylet functions:
|
||||
- Match the keylet function name to the KEYLET_* enum value
|
||||
- Examples:
|
||||
- `keylet::account()` → use `KEYLET_ACCOUNT`
|
||||
- `keylet::fees()` → use `KEYLET_FEES`
|
||||
- `keylet::trustline()` → use `KEYLET_TRUSTLINE`
|
||||
- `keylet::negativeUNL()` → use `KEYLET_NEGATIVE_UNL`
|
||||
- `keylet::UNLReport()` → use `KEYLET_UNL_REPORT`
|
||||
- `keylet::hook()` → use `KEYLET_HOOK`
|
||||
- `keylet::uriToken()` → use `KEYLET_URI_TOKEN`
|
||||
|
||||
#### For sha512Half calls:
|
||||
- Validator manifests/lists → use `VALIDATOR_LIST_HASH`
|
||||
- Hook code hashing → use `HOOK_DEFINITION` or `LEDGER_INDEX_UNNEEDED`
|
||||
- Network protocol → use appropriate context from enum
|
||||
|
||||
#### For test environments:
|
||||
- Use `env.current()->seq()` to get ledger sequence (it's already uint32_t, NO CAST NEEDED)
|
||||
- Use `ledger->seq()` for Ledger pointers
|
||||
- Use `view.seq()` for ReadView/ApplyView references
|
||||
|
||||
### 4. IMPORTANT: Read the entire file first!
|
||||
When fixing a file, ALWAYS:
|
||||
1. Read the ENTIRE file first (or at least 500+ lines) to understand the context
|
||||
2. Look for patterns of how the test is structured
|
||||
3. Check what types of ledger objects are being tested
|
||||
4. Then fix ALL occurrences systematically
|
||||
|
||||
### 5. Common patterns to fix:
|
||||
|
||||
```cpp
|
||||
// OLD - missing hash_options
|
||||
env.le(keylet::account(alice));
|
||||
|
||||
// NEW - with proper classification
|
||||
env.le(keylet::account(hash_options{env.current()->seq(), KEYLET_ACCOUNT}, alice));
|
||||
|
||||
// OLD - sha512Half without context
|
||||
auto hash = sha512Half(data);
|
||||
|
||||
// NEW - with proper classification
|
||||
auto hash = sha512Half(hash_options{VALIDATOR_LIST_HASH}, data);
|
||||
```
|
||||
@@ -1,204 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
from pathlib import Path
|
||||
from collections import defaultdict
|
||||
from typing import Set, Dict, List, Tuple
|
||||
|
||||
def find_keylet_calls(root_dir: str) -> Dict[str, List[Tuple[str, int, str]]]:
|
||||
"""
|
||||
Find all keylet:: function calls with hash_options as first parameter.
|
||||
Returns a dict mapping keylet function names to list of (file, line, full_match) tuples.
|
||||
"""
|
||||
# Pattern to match keylet::<function>(hash_options{...}, ...) calls
|
||||
# This captures:
|
||||
# 1. The keylet function name
|
||||
# 2. The entire first argument (hash_options{...})
|
||||
# 3. The content inside hash_options{...}
|
||||
pattern = re.compile(
|
||||
r'keylet::(\w+)\s*\(\s*(hash_options\s*\{([^}]*)\})',
|
||||
re.MULTILINE | re.DOTALL
|
||||
)
|
||||
|
||||
results = defaultdict(list)
|
||||
unique_first_args = set()
|
||||
|
||||
# Walk through all C++ source files
|
||||
for root, dirs, files in os.walk(Path(root_dir) / "src" / "ripple"):
|
||||
# Skip certain directories
|
||||
dirs[:] = [d for d in dirs if d not in ['.git', 'build', '__pycache__']]
|
||||
|
||||
for file in files:
|
||||
if file.endswith(('.cpp', '.h', '.hpp')):
|
||||
filepath = os.path.join(root, file)
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
|
||||
# Find all matches in this file
|
||||
for match in pattern.finditer(content):
|
||||
func_name = match.group(1)
|
||||
full_first_arg = match.group(2)
|
||||
inner_content = match.group(3).strip()
|
||||
|
||||
# Get line number
|
||||
line_num = content[:match.start()].count('\n') + 1
|
||||
|
||||
# Store the result
|
||||
rel_path = os.path.relpath(filepath, root_dir)
|
||||
results[func_name].append((rel_path, line_num, full_first_arg))
|
||||
unique_first_args.add(inner_content)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error reading {filepath}: {e}")
|
||||
|
||||
return results, unique_first_args
|
||||
|
||||
def analyze_hash_options_content(unique_args: Set[str]) -> Dict[str, int]:
|
||||
"""Analyze the content of hash_options{...} arguments."""
|
||||
categories = {
|
||||
'literal_0': 0,
|
||||
'literal_number': 0,
|
||||
'view_seq': 0,
|
||||
'ledger_seq': 0,
|
||||
'info_seq': 0,
|
||||
'ctx_view_seq': 0,
|
||||
'sb_seq': 0,
|
||||
'env_current_seq': 0,
|
||||
'other': 0
|
||||
}
|
||||
|
||||
other_patterns = []
|
||||
|
||||
for arg in unique_args:
|
||||
arg_clean = arg.strip()
|
||||
|
||||
if arg_clean == '0':
|
||||
categories['literal_0'] += 1
|
||||
elif arg_clean.isdigit():
|
||||
categories['literal_number'] += 1
|
||||
elif 'view.seq()' in arg_clean or 'view().seq()' in arg_clean:
|
||||
categories['view_seq'] += 1
|
||||
elif 'ledger->seq()' in arg_clean or 'ledger.seq()' in arg_clean:
|
||||
categories['ledger_seq'] += 1
|
||||
elif 'info.seq' in arg_clean or 'info_.seq' in arg_clean:
|
||||
categories['info_seq'] += 1
|
||||
elif 'ctx.view.seq()' in arg_clean:
|
||||
categories['ctx_view_seq'] += 1
|
||||
elif 'sb.seq()' in arg_clean:
|
||||
categories['sb_seq'] += 1
|
||||
elif 'env.current()->seq()' in arg_clean:
|
||||
categories['env_current_seq'] += 1
|
||||
else:
|
||||
categories['other'] += 1
|
||||
other_patterns.append(arg_clean)
|
||||
|
||||
return categories, other_patterns
|
||||
|
||||
def print_report(results: Dict[str, List], unique_args: Set[str]):
|
||||
"""Print a detailed report of findings."""
|
||||
print("=" * 80)
|
||||
print("KEYLET FUNCTION CALL ANALYSIS")
|
||||
print("=" * 80)
|
||||
|
||||
# Summary
|
||||
total_calls = sum(len(calls) for calls in results.values())
|
||||
print(f"\nTotal keylet calls found: {total_calls}")
|
||||
print(f"Unique keylet functions: {len(results)}")
|
||||
print(f"Unique hash_options arguments: {len(unique_args)}")
|
||||
|
||||
# Function frequency
|
||||
print("\n" + "=" * 80)
|
||||
print("KEYLET FUNCTIONS BY FREQUENCY:")
|
||||
print("=" * 80)
|
||||
|
||||
sorted_funcs = sorted(results.items(), key=lambda x: len(x[1]), reverse=True)
|
||||
for func_name, calls in sorted_funcs[:20]: # Top 20
|
||||
print(f" {func_name:30} {len(calls):4} calls")
|
||||
|
||||
if len(sorted_funcs) > 20:
|
||||
print(f" ... and {len(sorted_funcs) - 20} more functions")
|
||||
|
||||
# Analyze hash_options content
|
||||
print("\n" + "=" * 80)
|
||||
print("HASH_OPTIONS ARGUMENT PATTERNS:")
|
||||
print("=" * 80)
|
||||
|
||||
categories, other_patterns = analyze_hash_options_content(unique_args)
|
||||
|
||||
for category, count in sorted(categories.items(), key=lambda x: x[1], reverse=True):
|
||||
if count > 0:
|
||||
print(f" {category:25} {count:4} occurrences")
|
||||
|
||||
if other_patterns:
|
||||
print("\n" + "=" * 80)
|
||||
print("OTHER PATTERNS (need review):")
|
||||
print("=" * 80)
|
||||
for i, pattern in enumerate(sorted(set(other_patterns))[:10], 1):
|
||||
# Truncate long patterns
|
||||
display = pattern if len(pattern) <= 60 else pattern[:57] + "..."
|
||||
print(f" {i:2}. {display}")
|
||||
|
||||
# Sample calls for most common functions
|
||||
print("\n" + "=" * 80)
|
||||
print("SAMPLE CALLS (top 5 functions):")
|
||||
print("=" * 80)
|
||||
|
||||
for func_name, calls in sorted_funcs[:5]:
|
||||
print(f"\n{func_name}:")
|
||||
for filepath, line_num, arg in calls[:3]: # Show first 3 examples
|
||||
print(f" {filepath}:{line_num}")
|
||||
print(f" {arg}")
|
||||
if len(calls) > 3:
|
||||
print(f" ... and {len(calls) - 3} more")
|
||||
|
||||
def generate_replacement_script(results: Dict[str, List], unique_args: Set[str]):
|
||||
"""Generate a script to help with replacements."""
|
||||
print("\n" + "=" * 80)
|
||||
print("SUGGESTED MIGRATION STRATEGY:")
|
||||
print("=" * 80)
|
||||
|
||||
print("""
|
||||
The goal is to migrate from:
|
||||
keylet::func(hash_options{ledger_seq})
|
||||
|
||||
To either:
|
||||
keylet::func(hash_options{ledger_seq, KEYLET_CLASSIFIER})
|
||||
|
||||
Where KEYLET_CLASSIFIER would be a specific HashContext enum value
|
||||
based on the keylet function type.
|
||||
|
||||
Suggested mappings:
|
||||
- keylet::account() -> LEDGER_HEADER_HASH (or new KEYLET_ACCOUNT)
|
||||
- keylet::line() -> LEDGER_HEADER_HASH (or new KEYLET_TRUSTLINE)
|
||||
- keylet::offer() -> LEDGER_HEADER_HASH (or new KEYLET_OFFER)
|
||||
- keylet::ownerDir() -> LEDGER_HEADER_HASH (or new KEYLET_OWNER_DIR)
|
||||
- keylet::page() -> LEDGER_HEADER_HASH (or new KEYLET_DIR_PAGE)
|
||||
- keylet::fees() -> LEDGER_HEADER_HASH (or new KEYLET_FEES)
|
||||
- keylet::amendments() -> LEDGER_HEADER_HASH (or new KEYLET_AMENDMENTS)
|
||||
- keylet::check() -> LEDGER_HEADER_HASH (or new KEYLET_CHECK)
|
||||
- keylet::escrow() -> LEDGER_HEADER_HASH (or new KEYLET_ESCROW)
|
||||
- keylet::payChan() -> LEDGER_HEADER_HASH (or new KEYLET_PAYCHAN)
|
||||
- keylet::signers() -> LEDGER_HEADER_HASH (or new KEYLET_SIGNERS)
|
||||
- keylet::ticket() -> LEDGER_HEADER_HASH (or new KEYLET_TICKET)
|
||||
- keylet::nftpage_*() -> LEDGER_HEADER_HASH (or new KEYLET_NFT_PAGE)
|
||||
- keylet::nftoffer() -> LEDGER_HEADER_HASH (or new KEYLET_NFT_OFFER)
|
||||
- keylet::depositPreauth() -> LEDGER_HEADER_HASH (or new KEYLET_DEPOSIT_PREAUTH)
|
||||
""")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Get the project root directory
|
||||
project_root = "/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc"
|
||||
|
||||
print(f"Analyzing keylet calls in: {project_root}")
|
||||
print("This may take a moment...\n")
|
||||
|
||||
# Find all keylet calls
|
||||
results, unique_args = find_keylet_calls(project_root)
|
||||
|
||||
# Print the report
|
||||
print_report(results, unique_args)
|
||||
|
||||
# Generate replacement suggestions
|
||||
generate_replacement_script(results, unique_args)
|
||||
@@ -5,8 +5,6 @@
|
||||
# debugging.
|
||||
set -ex
|
||||
|
||||
set -e
|
||||
|
||||
echo "START INSIDE CONTAINER - CORE"
|
||||
|
||||
echo "-- BUILD CORES: $3"
|
||||
@@ -14,6 +12,13 @@ echo "-- GITHUB_REPOSITORY: $1"
|
||||
echo "-- GITHUB_SHA: $2"
|
||||
echo "-- GITHUB_RUN_NUMBER: $4"
|
||||
|
||||
# Use mounted filesystem for temp files to avoid container space limits
|
||||
export TMPDIR=/io/tmp
|
||||
export TEMP=/io/tmp
|
||||
export TMP=/io/tmp
|
||||
mkdir -p /io/tmp
|
||||
echo "=== Using temp directory: /io/tmp ==="
|
||||
|
||||
umask 0000;
|
||||
|
||||
cd /io/ &&
|
||||
@@ -27,7 +32,8 @@ if [[ "$?" -ne "0" ]]; then
|
||||
exit 127
|
||||
fi
|
||||
|
||||
perl -i -pe "s/^(\\s*)-DBUILD_SHARED_LIBS=OFF/\\1-DBUILD_SHARED_LIBS=OFF\\n\\1-DROCKSDB_BUILD_SHARED=OFF/g" Builds/CMake/deps/Rocksdb.cmake &&
|
||||
BUILD_TYPE=Release
|
||||
|
||||
mv Builds/CMake/deps/WasmEdge.cmake Builds/CMake/deps/WasmEdge.old &&
|
||||
echo "find_package(LLVM REQUIRED CONFIG)
|
||||
message(STATUS \"Found LLVM \${LLVM_PACKAGE_VERSION}\")
|
||||
@@ -38,13 +44,40 @@ target_link_libraries (ripple_libs INTERFACE wasmedge)
|
||||
add_library (wasmedge::wasmedge ALIAS wasmedge)
|
||||
message(\"WasmEdge DONE\")
|
||||
" > Builds/CMake/deps/WasmEdge.cmake &&
|
||||
|
||||
export LDFLAGS="-static-libstdc++"
|
||||
|
||||
git config --global --add safe.directory /io &&
|
||||
git checkout src/ripple/protocol/impl/BuildInfo.cpp &&
|
||||
sed -i s/\"0.0.0\"/\"$(date +%Y).$(date +%-m).$(date +%-d)-$(git rev-parse --abbrev-ref HEAD)+$4\"/g src/ripple/protocol/impl/BuildInfo.cpp &&
|
||||
sed -i s/\"0.0.0\"/\"$(date +%Y).$(date +%-m).$(date +%-d)-$(git rev-parse --abbrev-ref HEAD)$(if [ -n "$4" ]; then echo "+$4"; fi)\"/g src/ripple/protocol/impl/BuildInfo.cpp &&
|
||||
conan export external/snappy --version 1.1.10 --user xahaud --channel stable &&
|
||||
conan export external/soci --version 4.0.3 --user xahaud --channel stable &&
|
||||
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable &&
|
||||
cd release-build &&
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release -DBoost_NO_BOOST_CMAKE=ON -DLLVM_DIR=/usr/lib64/llvm13/lib/cmake/llvm/ -DLLVM_LIBRARY_DIR=/usr/lib64/llvm13/lib/ -DWasmEdge_LIB=/usr/local/lib64/libwasmedge.a &&
|
||||
make -j$3 VERBOSE=1 &&
|
||||
# Install dependencies - tool_requires in conanfile.py handles glibc 2.28 compatibility
|
||||
# for build tools (protoc, grpc plugins, b2) in HBB environment
|
||||
# The tool_requires('b2/5.3.2') in conanfile.py should force b2 to build from source
|
||||
# with the correct toolchain, avoiding the GLIBCXX_3.4.29 issue
|
||||
echo "=== Installing dependencies ===" &&
|
||||
conan install .. --output-folder . --build missing --settings build_type=$BUILD_TYPE \
|
||||
-o with_wasmedge=False -o tool_requires_b2=True &&
|
||||
cmake .. -G Ninja \
|
||||
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_EXE_LINKER_FLAGS="-static-libstdc++" \
|
||||
-DLLVM_DIR=$LLVM_DIR \
|
||||
-DWasmEdge_LIB=$WasmEdge_LIB \
|
||||
-Dxrpld=TRUE \
|
||||
-Dtests=TRUE &&
|
||||
ccache -z &&
|
||||
ninja -j $3 && echo "=== Re-running final link with verbose output ===" && rm -f rippled && ninja -v rippled &&
|
||||
ccache -s &&
|
||||
strip -s rippled &&
|
||||
mv rippled xahaud &&
|
||||
echo "=== Full ldd output ===" &&
|
||||
ldd xahaud &&
|
||||
echo "=== Running libcheck ===" &&
|
||||
libcheck xahaud &&
|
||||
echo "Build host: `hostname`" > release.info &&
|
||||
echo "Build date: `date`" >> release.info &&
|
||||
echo "Build md5: `md5sum xahaud`" >> release.info &&
|
||||
@@ -69,8 +102,8 @@ fi
|
||||
cd ..;
|
||||
|
||||
mv src/ripple/net/impl/RegisterSSLCerts.cpp.old src/ripple/net/impl/RegisterSSLCerts.cpp;
|
||||
mv Builds/CMake/deps/Rocksdb.cmake.old Builds/CMake/deps/Rocksdb.cmake;
|
||||
mv Builds/CMake/deps/WasmEdge.old Builds/CMake/deps/WasmEdge.cmake;
|
||||
|
||||
rm src/certs/certbundle.h;
|
||||
git checkout src/ripple/protocol/impl/BuildInfo.cpp;
|
||||
|
||||
echo "END INSIDE CONTAINER - CORE"
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
# processes launched or upon any unbound variable.
|
||||
# We use set -x to print commands before running them to help with
|
||||
# debugging.
|
||||
set -ex
|
||||
|
||||
set -e
|
||||
|
||||
echo "START INSIDE CONTAINER - FULL"
|
||||
@@ -16,13 +14,6 @@ echo "-- GITHUB_RUN_NUMBER: $4"
|
||||
|
||||
umask 0000;
|
||||
|
||||
echo "Fixing CentOS 7 EOL"
|
||||
|
||||
sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
|
||||
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
|
||||
yum clean all
|
||||
yum-config-manager --disable centos-sclo-sclo
|
||||
|
||||
####
|
||||
|
||||
cd /io;
|
||||
@@ -73,92 +64,15 @@ then
|
||||
#endif/g" src/ripple/net/impl/RegisterSSLCerts.cpp &&
|
||||
sed -i "s/#include <ripple\/net\/RegisterSSLCerts.h>/\0\n#include <certs\/certbundle.h>/g" src/ripple/net/impl/RegisterSSLCerts.cpp
|
||||
fi
|
||||
mkdir -p .nih_c;
|
||||
mkdir -p .nih_toolchain;
|
||||
cd .nih_toolchain &&
|
||||
yum install -y wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-10-binutils zlib-static ncurses-static -y \
|
||||
devtoolset-7-gcc-c++ \
|
||||
devtoolset-9-gcc-c++ \
|
||||
devtoolset-10-gcc-c++ \
|
||||
snappy snappy-devel \
|
||||
zlib zlib-devel \
|
||||
lz4-devel \
|
||||
libasan &&
|
||||
export PATH=`echo $PATH | sed -E "s/devtoolset-9/devtoolset-7/g"` &&
|
||||
echo "-- Install ZStd 1.1.3 --" &&
|
||||
yum install epel-release -y &&
|
||||
ZSTD_VERSION="1.1.3" &&
|
||||
( wget -nc -q -O zstd-${ZSTD_VERSION}.tar.gz https://github.com/facebook/zstd/archive/v${ZSTD_VERSION}.tar.gz; echo "" ) &&
|
||||
tar xzvf zstd-${ZSTD_VERSION}.tar.gz &&
|
||||
cd zstd-${ZSTD_VERSION} &&
|
||||
make -j$3 install &&
|
||||
cd .. &&
|
||||
echo "-- Install Cmake 3.23.1 --" &&
|
||||
pwd &&
|
||||
( wget -nc -q https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz; echo "" ) &&
|
||||
tar -xzf cmake-3.23.1-linux-x86_64.tar.gz -C /hbb/ &&
|
||||
echo "-- Install Boost 1.86.0 --" &&
|
||||
pwd &&
|
||||
( wget -nc -q https://archives.boost.io/release/1.86.0/source/boost_1_86_0.tar.gz; echo "" ) &&
|
||||
tar -xzf boost_1_86_0.tar.gz &&
|
||||
cd boost_1_86_0 && ./bootstrap.sh && ./b2 link=static -j$3 && ./b2 install &&
|
||||
cd ../ &&
|
||||
echo "-- Install Protobuf 3.20.0 --" &&
|
||||
pwd &&
|
||||
( wget -nc -q https://github.com/protocolbuffers/protobuf/releases/download/v3.20.0/protobuf-all-3.20.0.tar.gz; echo "" ) &&
|
||||
tar -xzf protobuf-all-3.20.0.tar.gz &&
|
||||
cd protobuf-3.20.0/ &&
|
||||
./autogen.sh && ./configure --prefix=/usr --disable-shared link=static && make -j$3 && make install &&
|
||||
cd .. &&
|
||||
echo "-- Build LLD --" &&
|
||||
pwd &&
|
||||
ln /usr/bin/llvm-config-13 /usr/bin/llvm-config &&
|
||||
mv /opt/rh/devtoolset-9/root/usr/bin/ar /opt/rh/devtoolset-9/root/usr/bin/ar-9 &&
|
||||
ln /opt/rh/devtoolset-10/root/usr/bin/ar /opt/rh/devtoolset-9/root/usr/bin/ar &&
|
||||
( wget -nc -q https://github.com/llvm/llvm-project/releases/download/llvmorg-13.0.1/lld-13.0.1.src.tar.xz; echo "" ) &&
|
||||
( wget -nc -q https://github.com/llvm/llvm-project/releases/download/llvmorg-13.0.1/libunwind-13.0.1.src.tar.xz; echo "" ) &&
|
||||
tar -xf lld-13.0.1.src.tar.xz &&
|
||||
tar -xf libunwind-13.0.1.src.tar.xz &&
|
||||
cp -r libunwind-13.0.1.src/include libunwind-13.0.1.src/src lld-13.0.1.src/ &&
|
||||
cd lld-13.0.1.src &&
|
||||
rm -rf build CMakeCache.txt &&
|
||||
mkdir -p build &&
|
||||
cd build &&
|
||||
cmake .. -DLLVM_LIBRARY_DIR=/usr/lib64/llvm13/lib/ -DCMAKE_INSTALL_PREFIX=/usr/lib64/llvm13/ -DCMAKE_BUILD_TYPE=Release &&
|
||||
make -j$3 install &&
|
||||
ln -s /usr/lib64/llvm13/lib/include/lld /usr/include/lld &&
|
||||
cp /usr/lib64/llvm13/lib/liblld*.a /usr/local/lib/ &&
|
||||
cd ../../ &&
|
||||
echo "-- Build WasmEdge --" &&
|
||||
( wget -nc -q https://github.com/WasmEdge/WasmEdge/archive/refs/tags/0.11.2.zip; unzip -o 0.11.2.zip; ) &&
|
||||
cd WasmEdge-0.11.2 &&
|
||||
( mkdir -p build; echo "" ) &&
|
||||
cd build &&
|
||||
export BOOST_ROOT="/usr/local/src/boost_1_86_0" &&
|
||||
export Boost_LIBRARY_DIRS="/usr/local/lib" &&
|
||||
export BOOST_INCLUDEDIR="/usr/local/src/boost_1_86_0" &&
|
||||
export PATH=`echo $PATH | sed -E "s/devtoolset-7/devtoolset-9/g"` &&
|
||||
cmake .. \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DWASMEDGE_BUILD_SHARED_LIB=OFF \
|
||||
-DWASMEDGE_BUILD_STATIC_LIB=ON \
|
||||
-DWASMEDGE_BUILD_AOT_RUNTIME=ON \
|
||||
-DWASMEDGE_FORCE_DISABLE_LTO=ON \
|
||||
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
|
||||
-DWASMEDGE_LINK_LLVM_STATIC=ON \
|
||||
-DWASMEDGE_BUILD_PLUGINS=OFF \
|
||||
-DWASMEDGE_LINK_TOOLS_STATIC=ON \
|
||||
-DBoost_NO_BOOST_CMAKE=ON -DLLVM_DIR=/usr/lib64/llvm13/lib/cmake/llvm/ -DLLVM_LIBRARY_DIR=/usr/lib64/llvm13/lib/ &&
|
||||
make -j$3 install &&
|
||||
export PATH=`echo $PATH | sed -E "s/devtoolset-9/devtoolset-10/g"` &&
|
||||
cp -r include/api/wasmedge /usr/include/ &&
|
||||
cd /io/ &&
|
||||
# Environment setup moved to Dockerfile in release-builder.sh
|
||||
source /opt/rh/gcc-toolset-11/enable
|
||||
export PATH=/usr/local/bin:$PATH
|
||||
export CC='ccache gcc' &&
|
||||
export CXX='ccache g++' &&
|
||||
echo "-- Build Rippled --" &&
|
||||
pwd &&
|
||||
cp Builds/CMake/deps/Rocksdb.cmake Builds/CMake/deps/Rocksdb.cmake.old &&
|
||||
|
||||
echo "MOVING TO [ build-core.sh ]"
|
||||
cd /io;
|
||||
echo "MOVING TO [ build-core.sh ]";
|
||||
|
||||
printenv > .env.temp;
|
||||
cat .env.temp | grep '=' | sed s/\\\(^[^=]\\+=\\\)/\\1\\\"/g|sed s/\$/\\\"/g > .env;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#
|
||||
# Default validators.txt
|
||||
#
|
||||
# This file is located in the same folder as your rippled.cfg file
|
||||
# This file is located in the same folder as your xahaud.cfg file
|
||||
# and defines which validators your server trusts not to collude.
|
||||
#
|
||||
# This file is UTF-8 with DOS, UNIX, or Mac style line endings.
|
||||
@@ -17,18 +17,17 @@
|
||||
# See validator_list_sites and validator_list_keys below.
|
||||
#
|
||||
# Examples:
|
||||
# n9KorY8QtTdRx7TVDpwnG9NvyxsDwHUKUEeDLY3AkiGncVaSXZi5
|
||||
# n9MqiExBcoG19UXwoLjBJnhsxEhAZMuWwJDRdkyDz1EkEkwzQTNt
|
||||
# n9L3GdotB8a3AqtsvS7NXt4BUTQSAYyJUr9xtFj2qXJjfbZsawKY
|
||||
# n9M7G6eLwQtUjfCthWUmTN8L4oEZn1sNr46yvKrpsq58K1C6LAxz
|
||||
#
|
||||
# [validator_list_sites]
|
||||
#
|
||||
# List of URIs serving lists of recommended validators.
|
||||
#
|
||||
# Examples:
|
||||
# https://vl.ripple.com
|
||||
# https://vl.xrplf.org
|
||||
# https://vl.xahau.org
|
||||
# http://127.0.0.1:8000
|
||||
# file:///etc/opt/ripple/vl.txt
|
||||
# file:///etc/opt/xahaud/vl.txt
|
||||
#
|
||||
# [validator_list_keys]
|
||||
#
|
||||
@@ -39,50 +38,48 @@
|
||||
# Validator list keys should be hex-encoded.
|
||||
#
|
||||
# Examples:
|
||||
# ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
|
||||
# ED307A760EE34F2D0CAA103377B1969117C38B8AA0AA1E2A24DAC1F32FC97087ED
|
||||
# EDA46E9C39B1389894E690E58914DC1029602870370A0993E5B87C4A24EAF4A8E8
|
||||
#
|
||||
# [import_vl_keys]
|
||||
#
|
||||
# This section is used to import the public keys of trusted validator list publishers.
|
||||
# The keys are used to authenticate and accept new lists of trusted validators.
|
||||
# In this example, the key for the publisher "vl.xrplf.org" is imported.
|
||||
# Each key is represented as a hexadecimal string.
|
||||
#
|
||||
# Examples:
|
||||
# ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
|
||||
# ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B
|
||||
# ED42AEC58B701EEBB77356FFFEC26F83C1F0407263530F068C7C73D392C7E06FD1
|
||||
# ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
|
||||
|
||||
# The default validator list publishers that the rippled instance
|
||||
# The default validator list publishers that the xahaud instance
|
||||
# trusts.
|
||||
#
|
||||
# WARNING: Changing these values can cause your rippled instance to see a
|
||||
# validated ledger that contradicts other rippled instances'
|
||||
# WARNING: Changing these values can cause your xahaud instance to see a
|
||||
# validated ledger that contradicts other xahaud instances'
|
||||
# validated ledgers (aka a ledger fork) if your validator list(s)
|
||||
# do not sufficiently overlap with the list(s) used by others.
|
||||
# See: https://arxiv.org/pdf/1802.07242.pdf
|
||||
|
||||
[validator_list_sites]
|
||||
https://vl.ripple.com
|
||||
https://vl.xrplf.org
|
||||
https://vl.xahau.org
|
||||
|
||||
[validator_list_keys]
|
||||
#vl.ripple.com
|
||||
ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
|
||||
# vl.xrplf.org
|
||||
ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B
|
||||
# vl.xahau.org
|
||||
EDA46E9C39B1389894E690E58914DC1029602870370A0993E5B87C4A24EAF4A8E8
|
||||
|
||||
[import_vl_keys]
|
||||
# vl.xrplf.org
|
||||
ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B
|
||||
ED42AEC58B701EEBB77356FFFEC26F83C1F0407263530F068C7C73D392C7E06FD1
|
||||
ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
|
||||
|
||||
# To use the test network (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
|
||||
# To use the test network (see https://xahau.network/docs/infrastructure/installing-xahaud),
|
||||
# use the following configuration instead:
|
||||
#
|
||||
# [validator_list_sites]
|
||||
# https://vl.altnet.rippletest.net
|
||||
#
|
||||
# [validator_list_keys]
|
||||
# ED264807102805220DA0F312E71FC2C69E1552C9C5790F6C25E3729DEB573D5860
|
||||
# [validators]
|
||||
# nHBoJCE3wPgkTcrNPMHyTJFQ2t77EyCAqcBRspFCpL6JhwCm94VZ
|
||||
# nHUVv4g47bFMySAZFUKVaXUYEmfiUExSoY4FzwXULNwJRzju4XnQ
|
||||
# nHBvr8avSFTz4TFxZvvi4rEJZZtyqE3J6KAAcVWVtifsE7edPM7q
|
||||
# nHUH3Z8TRU57zetHbEPr1ynyrJhxQCwrJvNjr4j1SMjYADyW1WWe
|
||||
#
|
||||
# [import_vl_keys]
|
||||
# ED264807102805220DA0F312E71FC2C69E1552C9C5790F6C25E3729DEB573D5860
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#
|
||||
# 2. Peer Protocol
|
||||
#
|
||||
# 3. Ripple Protocol
|
||||
# 3. XRPL Protocol
|
||||
#
|
||||
# 4. HTTPS Client
|
||||
#
|
||||
@@ -29,18 +29,17 @@
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# This file documents and provides examples of all rippled server process
|
||||
# configuration options. When the rippled server instance is launched, it
|
||||
# This file documents and provides examples of all xahaud server process
|
||||
# configuration options. When the xahaud server instance is launched, it
|
||||
# looks for a file with the following name:
|
||||
#
|
||||
# rippled.cfg
|
||||
# xahaud.cfg
|
||||
#
|
||||
# For more information on where the rippled server instance searches for the
|
||||
# file, visit:
|
||||
# To run xahaud with a custom configuration file, use the "--conf {file}" flag.
|
||||
# By default, xahaud will look in the local working directory or the home directory.
|
||||
#
|
||||
# https://xrpl.org/commandline-usage.html#generic-options
|
||||
#
|
||||
# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# This file should be named xahaud.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# or Mac style end of lines. Blank lines and lines beginning with '#' are
|
||||
# ignored. Undefined sections are reserved. No escapes are currently defined.
|
||||
#
|
||||
@@ -89,8 +88,8 @@
|
||||
#
|
||||
#
|
||||
#
|
||||
# rippled offers various server protocols to clients making inbound
|
||||
# connections. The listening ports rippled uses are "universal" ports
|
||||
# xahaud offers various server protocols to clients making inbound
|
||||
# connections. The listening ports xahaud uses are "universal" ports
|
||||
# which may be configured to handshake in one or more of the available
|
||||
# supported protocols. These universal ports simplify administration:
|
||||
# A single open port can be used for multiple protocols.
|
||||
@@ -103,7 +102,7 @@
|
||||
#
|
||||
# A list of port names and key/value pairs. A port name must start with a
|
||||
# letter and contain only letters and numbers. The name is not case-sensitive.
|
||||
# For each name in this list, rippled will look for a configuration file
|
||||
# For each name in this list, xahaud will look for a configuration file
|
||||
# section with the same name and use it to create a listening port. The
|
||||
# name is informational only; the choice of name does not affect the function
|
||||
# of the listening port.
|
||||
@@ -134,7 +133,7 @@
|
||||
# ip = 127.0.0.1
|
||||
# protocol = http
|
||||
#
|
||||
# When rippled is used as a command line client (for example, issuing a
|
||||
# When xahaud is used as a command line client (for example, issuing a
|
||||
# server stop command), the first port advertising the http or https
|
||||
# protocol will be used to make the connection.
|
||||
#
|
||||
@@ -175,7 +174,7 @@
|
||||
# same time. It is possible have both Websockets and Secure Websockets
|
||||
# together in one port.
|
||||
#
|
||||
# NOTE If no ports support the peer protocol, rippled cannot
|
||||
# NOTE If no ports support the peer protocol, xahaud cannot
|
||||
# receive incoming peer connections or become a superpeer.
|
||||
#
|
||||
# limit = <number>
|
||||
@@ -194,7 +193,7 @@
|
||||
# required. IP address restrictions, if any, will be checked in addition
|
||||
# to the credentials specified here.
|
||||
#
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# When acting in the client role, xahaud will supply these credentials
|
||||
# using HTTP's Basic Authentication headers when making outbound HTTP/S
|
||||
# requests.
|
||||
#
|
||||
@@ -237,7 +236,7 @@
|
||||
# WS, or WSS protocol interfaces. If administrative commands are
|
||||
# disabled for a port, these credentials have no effect.
|
||||
#
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# When acting in the client role, xahaud will supply these credentials
|
||||
# in the submitted JSON for any administrative command requests when
|
||||
# invoking JSON-RPC commands on remote servers.
|
||||
#
|
||||
@@ -258,7 +257,7 @@
|
||||
# resource controls will default to those for non-administrative users.
|
||||
#
|
||||
# The secure_gateway IP addresses are intended to represent
|
||||
# proxies. Since rippled trusts these hosts, they must be
|
||||
# proxies. Since xahaud trusts these hosts, they must be
|
||||
# responsible for properly authenticating the remote user.
|
||||
#
|
||||
# If some IP addresses are included for both "admin" and
|
||||
@@ -272,7 +271,7 @@
|
||||
# Use the specified files when configuring SSL on the port.
|
||||
#
|
||||
# NOTE If no files are specified and secure protocols are selected,
|
||||
# rippled will generate an internal self-signed certificate.
|
||||
# xahaud will generate an internal self-signed certificate.
|
||||
#
|
||||
# The files have these meanings:
|
||||
#
|
||||
@@ -295,12 +294,12 @@
|
||||
# Control the ciphers which the server will support over SSL on the port,
|
||||
# specified using the OpenSSL "cipher list format".
|
||||
#
|
||||
# NOTE If unspecified, rippled will automatically configure a modern
|
||||
# NOTE If unspecified, xahaud will automatically configure a modern
|
||||
# cipher suite. This default suite should be widely supported.
|
||||
#
|
||||
# You should not modify this string unless you have a specific
|
||||
# reason and cryptographic expertise. Incorrect modification may
|
||||
# keep rippled from connecting to other instances of rippled or
|
||||
# keep xahaud from connecting to other instances of xahaud or
|
||||
# prevent RPC and WebSocket clients from connecting.
|
||||
#
|
||||
# send_queue_limit = [1..65535]
|
||||
@@ -351,7 +350,7 @@
|
||||
#
|
||||
# Examples:
|
||||
# { "command" : "server_info" }
|
||||
# { "command" : "log_level", "partition" : "ripplecalc", "severity" : "trace" }
|
||||
# { "command" : "log_level", "partition" : "xahaudcalc", "severity" : "trace" }
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -380,16 +379,15 @@
|
||||
#-----------------
|
||||
#
|
||||
# These settings control security and access attributes of the Peer to Peer
|
||||
# server section of the rippled process. Peer Protocol implements the
|
||||
# Ripple Payment protocol. It is over peer connections that transactions
|
||||
# and validations are passed from to machine to machine, to determine the
|
||||
# contents of validated ledgers.
|
||||
# server section of the xahaud process. It is over peer connections that
|
||||
# transactions and validations are passed from to machine to machine, to
|
||||
# determine the contents of validated ledgers.
|
||||
#
|
||||
#
|
||||
#
|
||||
# [ips]
|
||||
#
|
||||
# List of hostnames or ips where the Ripple protocol is served. A default
|
||||
# List of hostnames or ips where the XRPL protocol is served. A default
|
||||
# starter list is included in the code and used if no other hostnames are
|
||||
# available.
|
||||
#
|
||||
@@ -398,24 +396,23 @@
|
||||
# does not generally matter.
|
||||
#
|
||||
# The default list of entries is:
|
||||
# - r.ripple.com 51235
|
||||
# - zaphod.alloy.ee 51235
|
||||
# - sahyadri.isrdc.in 51235
|
||||
# - hubs.xahau.as16089.net 21337
|
||||
# - bacab.alloy.ee 21337
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# [ips]
|
||||
# 192.168.0.1
|
||||
# 192.168.0.1 2459
|
||||
# r.ripple.com 51235
|
||||
# 192.168.0.1 21337
|
||||
# bacab.alloy.ee 21337
|
||||
#
|
||||
#
|
||||
# [ips_fixed]
|
||||
#
|
||||
# List of IP addresses or hostnames to which rippled should always attempt to
|
||||
# List of IP addresses or hostnames to which xahaud should always attempt to
|
||||
# maintain peer connections with. This is useful for manually forming private
|
||||
# networks, for example to configure a validation server that connects to the
|
||||
# Ripple network through a public-facing server, or for building a set
|
||||
# Xahau Network through a public-facing server, or for building a set
|
||||
# of cluster peers.
|
||||
#
|
||||
# One address or domain names per line is allowed. A port must be specified
|
||||
@@ -465,7 +462,7 @@
|
||||
#
|
||||
# IP address or domain of NTP servers to use for time synchronization.
|
||||
#
|
||||
# These NTP servers are suitable for rippled servers located in the United
|
||||
# These NTP servers are suitable for xahaud servers located in the United
|
||||
# States:
|
||||
# time.windows.com
|
||||
# time.apple.com
|
||||
@@ -566,7 +563,7 @@
|
||||
#
|
||||
# minimum_txn_in_ledger_standalone = <number>
|
||||
#
|
||||
# Like minimum_txn_in_ledger when rippled is running in standalone
|
||||
# Like minimum_txn_in_ledger when xahaud is running in standalone
|
||||
# mode. Default: 1000.
|
||||
#
|
||||
# target_txn_in_ledger = <number>
|
||||
@@ -703,7 +700,7 @@
|
||||
#
|
||||
# [validator_token]
|
||||
#
|
||||
# This is an alternative to [validation_seed] that allows rippled to perform
|
||||
# This is an alternative to [validation_seed] that allows xahaud to perform
|
||||
# validation without having to store the validator keys on the network
|
||||
# connected server. The field should contain a single token in the form of a
|
||||
# base64-encoded blob.
|
||||
@@ -738,19 +735,18 @@
|
||||
#
|
||||
# Specify the file by its name or path.
|
||||
# Unless an absolute path is specified, it will be considered relative to
|
||||
# the folder in which the rippled.cfg file is located.
|
||||
# the folder in which the xahaud.cfg file is located.
|
||||
#
|
||||
# Examples:
|
||||
# /home/ripple/validators.txt
|
||||
# C:/home/ripple/validators.txt
|
||||
# /home/xahaud/validators.txt
|
||||
# C:/home/xahaud/validators.txt
|
||||
#
|
||||
# Example content:
|
||||
# [validators]
|
||||
# n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7
|
||||
# n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj
|
||||
# n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C
|
||||
# n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS
|
||||
# n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA
|
||||
# n9L3GdotB8a3AqtsvS7NXt4BUTQSAYyJUr9xtFj2qXJjfbZsawKY
|
||||
# n9LQDHLWyFuAn5BXJuW2ow5J9uGqpmSjRYS2cFRpxf6uJbxwDzvM
|
||||
# n9MCWyKVUkiatXVJTKUrAESB5kBFP8R3hm43jGHtg8WBnjv3iDfb
|
||||
# n9KWXCLRhjpajuZtULTXsy6R5xbisA6ozGxM4zdEJFq6uHiFZDvW
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -833,7 +829,7 @@
|
||||
#
|
||||
# 0: Disable the ledger replay feature [default]
|
||||
# 1: Enable the ledger replay feature. With this feature enabled, when
|
||||
# acquiring a ledger from the network, a rippled node only downloads
|
||||
# acquiring a ledger from the network, a xahaud node only downloads
|
||||
# the ledger header and the transactions instead of the whole ledger.
|
||||
# And the ledger is built by applying the transactions to the parent
|
||||
# ledger.
|
||||
@@ -844,10 +840,9 @@
|
||||
#
|
||||
#----------------
|
||||
#
|
||||
# The rippled server instance uses HTTPS GET requests in a variety of
|
||||
# The xahaud server instance uses HTTPS GET requests in a variety of
|
||||
# circumstances, including but not limited to contacting trusted domains to
|
||||
# fetch information such as mapping an email address to a Ripple Payment
|
||||
# Network address.
|
||||
# fetch information such as mapping an email address to a user's r address.
|
||||
#
|
||||
# [ssl_verify]
|
||||
#
|
||||
@@ -884,15 +879,15 @@
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# rippled has an optional operating mode called Reporting Mode. In Reporting
|
||||
# Mode, rippled does not connect to the peer to peer network. Instead, rippled
|
||||
# will continuously extract data from one or more rippled servers that are
|
||||
# xahaud has an optional operating mode called Reporting Mode. In Reporting
|
||||
# Mode, xahaud does not connect to the peer to peer network. Instead, xahaud
|
||||
# will continuously extract data from one or more xahaud servers that are
|
||||
# connected to the peer to peer network (referred to as an ETL source).
|
||||
# Reporting mode servers will forward RPC requests that require access to the
|
||||
# peer to peer network (submit, fee, etc) to an ETL source.
|
||||
#
|
||||
# [reporting] Settings for Reporting Mode. If and only if this section is
|
||||
# present, rippled will start in reporting mode. This section
|
||||
# present, xahaud will start in reporting mode. This section
|
||||
# contains a list of ETL source names, and key-value pairs. The
|
||||
# ETL source names each correspond to a configuration file
|
||||
# section; the names must match exactly. The key-value pairs are
|
||||
@@ -997,16 +992,16 @@
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# rippled creates 4 SQLite database to hold bookkeeping information
|
||||
# xahaud creates 4 SQLite database to hold bookkeeping information
|
||||
# about transactions, local credentials, and various other things.
|
||||
# It also creates the NodeDB, which holds all the objects that
|
||||
# make up the current and historical ledgers. In Reporting Mode, rippled
|
||||
# make up the current and historical ledgers. In Reporting Mode, xahauad
|
||||
# uses a Postgres database instead of SQLite.
|
||||
#
|
||||
# The simplest way to work with Postgres is to install it locally.
|
||||
# When it is running, execute the initdb.sh script in the current
|
||||
# directory as: sudo -u postgres ./initdb.sh
|
||||
# This will create the rippled user and an empty database of the same name.
|
||||
# This will create the xahaud user and an empty database of the same name.
|
||||
#
|
||||
# The size of the NodeDB grows in proportion to the amount of new data and the
|
||||
# amount of historical data (a configurable setting) so the performance of the
|
||||
@@ -1014,7 +1009,7 @@
|
||||
# the performance of the server.
|
||||
#
|
||||
# Partial pathnames will be considered relative to the location of
|
||||
# the rippled.cfg file.
|
||||
# the xahaud.cfg file.
|
||||
#
|
||||
# [node_db] Settings for the Node Database (required)
|
||||
#
|
||||
@@ -1025,18 +1020,18 @@
|
||||
#
|
||||
# Example:
|
||||
# type=nudb
|
||||
# path=db/nudb
|
||||
# path=/opt/xahaud/db/nudb
|
||||
#
|
||||
# The "type" field must be present and controls the choice of backend:
|
||||
#
|
||||
# type = NuDB
|
||||
#
|
||||
# NuDB is a high-performance database written by Ripple Labs and optimized
|
||||
# for rippled and solid-state drives.
|
||||
# for and solid-state drives.
|
||||
#
|
||||
# NuDB maintains its high speed regardless of the amount of history
|
||||
# stored. Online delete may be selected, but is not required. NuDB is
|
||||
# available on all platforms that rippled runs on.
|
||||
# available on all platforms that xahaud runs on.
|
||||
#
|
||||
# type = RocksDB
|
||||
#
|
||||
@@ -1063,14 +1058,16 @@
|
||||
# RWDB is recommended for Validator and Peer nodes that are not required to
|
||||
# store history.
|
||||
#
|
||||
# RWDB maintains its high speed regardless of the amount of history
|
||||
# stored. Online delete should NOT be used instead RWDB will use the
|
||||
# ledger_history config value to determine how many ledgers to keep in memory.
|
||||
#
|
||||
# Required keys for NuDB, RWDB and RocksDB:
|
||||
# Required keys for NuDB and RocksDB:
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Required keys for RWDB:
|
||||
#
|
||||
# online_delete Required. RWDB stores data in memory and will
|
||||
# grow unbounded without online_delete. See the
|
||||
# online_delete section below.
|
||||
#
|
||||
# Required keys for Cassandra:
|
||||
#
|
||||
# contact_points IP of a node in the Cassandra cluster
|
||||
@@ -1110,9 +1107,19 @@
|
||||
# if sufficient IOPS capacity is available.
|
||||
# Default 0.
|
||||
#
|
||||
# Optional keys for NuDB or RocksDB:
|
||||
# online_delete for RWDB, NuDB and RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP ledger
|
||||
# online_delete Minimum value of 256. Enable automatic purging
|
||||
# of older ledger information. Maintain at least this
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history.
|
||||
#
|
||||
# REQUIRED for RWDB to prevent out-of-memory errors.
|
||||
# Optional for NuDB and RocksDB.
|
||||
#
|
||||
# Optional keys for NuDB and RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP Ledger's
|
||||
# network's earliest allowed sequence. Alternate
|
||||
# networks may set this value. Minimum value of 1.
|
||||
# If a [shard_db] section is defined, and this
|
||||
@@ -1120,12 +1127,40 @@
|
||||
# it must be defined with the same value in both
|
||||
# sections.
|
||||
#
|
||||
# online_delete Minimum value of 256. Enable automatic purging
|
||||
# of older ledger information. Maintain at least this
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history. If using RWDB
|
||||
# this value is ignored.
|
||||
# Optional keys for NuDB only:
|
||||
#
|
||||
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
|
||||
# Must be a power of 2 between 4096 and 32768. Default is 4096.
|
||||
#
|
||||
# This parameter controls the fundamental storage unit
|
||||
# size for NuDB's internal data structures. The choice
|
||||
# of block size can significantly impact performance
|
||||
# depending on your storage hardware and filesystem:
|
||||
#
|
||||
# - 4096 bytes: Optimal for most standard SSDs and
|
||||
# traditional filesystems (ext4, NTFS, HFS+).
|
||||
# Provides good balance of performance and storage
|
||||
# efficiency. Recommended for most deployments.
|
||||
#
|
||||
# - 8192-16384 bytes: May improve performance on
|
||||
# high-end NVMe SSDs and copy-on-write filesystems
|
||||
# like ZFS or Btrfs that benefit from larger block
|
||||
# alignment. Can reduce metadata overhead for large
|
||||
# databases.
|
||||
#
|
||||
# - 32768 bytes (32K): Maximum supported block size
|
||||
# for high-performance scenarios with very fast
|
||||
# storage. May increase memory usage and reduce
|
||||
# efficiency for smaller databases.
|
||||
#
|
||||
# Note: This setting cannot be changed after database
|
||||
# creation without rebuilding the entire database.
|
||||
# Choose carefully based on your hardware and expected
|
||||
# database size.
|
||||
#
|
||||
# Example: nudb_block_size=4096
|
||||
#
|
||||
|
||||
# These keys modify the behavior of online_delete, and thus are only
|
||||
# relevant if online_delete is defined and non-zero:
|
||||
#
|
||||
@@ -1159,7 +1194,7 @@
|
||||
#
|
||||
# recovery_wait_seconds
|
||||
# The online delete process checks periodically
|
||||
# that rippled is still in sync with the network,
|
||||
# that xahaud is still in sync with the network,
|
||||
# and that the validated ledger is less than
|
||||
# 'age_threshold_seconds' old. If not, then continue
|
||||
# sleeping for this number of seconds and
|
||||
@@ -1198,8 +1233,8 @@
|
||||
# The server creates and maintains 4 to 5 bookkeeping SQLite databases in
|
||||
# the 'database_path' location. If you omit this configuration setting,
|
||||
# the server creates a directory called "db" located in the same place as
|
||||
# your rippled.cfg file.
|
||||
# Partial pathnames are relative to the location of the rippled executable.
|
||||
# your xahaud.cfg file.
|
||||
# Partial pathnames are relative to the location of the xahaud executable.
|
||||
#
|
||||
# [shard_db] Settings for the Shard Database (optional)
|
||||
#
|
||||
@@ -1275,7 +1310,7 @@
|
||||
# The default is "wal", which uses a write-ahead
|
||||
# log to implement database transactions.
|
||||
# Alternately, "memory" saves disk I/O, but if
|
||||
# rippled crashes during a transaction, the
|
||||
# xahaud crashes during a transaction, the
|
||||
# database is likely to be corrupted.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_journal_mode
|
||||
# for more details about the available options.
|
||||
@@ -1285,7 +1320,7 @@
|
||||
# synchronous Valid values: off, normal, full, extra
|
||||
# The default is "normal", which works well with
|
||||
# the "wal" journal mode. Alternatively, "off"
|
||||
# allows rippled to continue as soon as data is
|
||||
# allows xahaud to continue as soon as data is
|
||||
# passed to the OS, which can significantly
|
||||
# increase speed, but risks data corruption if
|
||||
# the host computer crashes before writing that
|
||||
@@ -1299,7 +1334,7 @@
|
||||
# The default is "file", which will use files
|
||||
# for temporary database tables and indices.
|
||||
# Alternatively, "memory" may save I/O, but
|
||||
# rippled does not currently use many, if any,
|
||||
# xahaud does not currently use many, if any,
|
||||
# of these temporary objects.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_temp_store
|
||||
# for more details about the available options.
|
||||
@@ -1311,9 +1346,9 @@
|
||||
# conninfo Info for connecting to Postgres. Format is
|
||||
# postgres://[username]:[password]@[ip]/[database].
|
||||
# The database and user must already exist. If this
|
||||
# section is missing and rippled is running in
|
||||
# Reporting Mode, rippled will connect as the
|
||||
# user running rippled to a database with the
|
||||
# section is missing and xahaud is running in
|
||||
# Reporting Mode, xahaud will connect as the
|
||||
# user running xahaud to a database with the
|
||||
# same name. On Linux and Mac OS X, the connection
|
||||
# will take place using the server's UNIX domain
|
||||
# socket. On Windows, through the localhost IP
|
||||
@@ -1322,7 +1357,7 @@
|
||||
# use_tx_tables Valid values: 1, 0
|
||||
# The default is 1 (true). Determines whether to use
|
||||
# the SQLite transaction database. If set to 0,
|
||||
# rippled will not write to the transaction database,
|
||||
# xahaud will not write to the transaction database,
|
||||
# and will reject tx, account_tx and tx_history RPCs.
|
||||
# In Reporting Mode, this setting is ignored.
|
||||
#
|
||||
@@ -1350,7 +1385,7 @@
|
||||
#
|
||||
# These settings are designed to help server administrators diagnose
|
||||
# problems, and obtain detailed information about the activities being
|
||||
# performed by the rippled process.
|
||||
# performed by the xahaud process.
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -1367,7 +1402,7 @@
|
||||
#
|
||||
# Configuration parameters for the Beast. Insight stats collection module.
|
||||
#
|
||||
# Insight is a module that collects information from the areas of rippled
|
||||
# Insight is a module that collects information from the areas of xahaud
|
||||
# that have instrumentation. The configuration parameters control where the
|
||||
# collection metrics are sent. The parameters are expressed as key = value
|
||||
# pairs with no white space. The main parameter is the choice of server:
|
||||
@@ -1376,7 +1411,7 @@
|
||||
#
|
||||
# Choice of server to send metrics to. Currently the only choice is
|
||||
# "statsd" which sends UDP packets to a StatsD daemon, which must be
|
||||
# running while rippled is running. More information on StatsD is
|
||||
# running while xahaud is running. More information on StatsD is
|
||||
# available here:
|
||||
# https://github.com/b/statsd_spec
|
||||
#
|
||||
@@ -1386,7 +1421,7 @@
|
||||
# in the format, n.n.n.n:port.
|
||||
#
|
||||
# "prefix" A string prepended to each collected metric. This is used
|
||||
# to distinguish between different running instances of rippled.
|
||||
# to distinguish between different running instances of xahaud.
|
||||
#
|
||||
# If this section is missing, or the server type is unspecified or unknown,
|
||||
# statistics are not collected or reported.
|
||||
@@ -1413,7 +1448,7 @@
|
||||
#
|
||||
# Example:
|
||||
# [perf]
|
||||
# perf_log=/var/log/rippled/perf.log
|
||||
# perf_log=/var/log/xahaud/perf.log
|
||||
# log_interval=2
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -1422,8 +1457,8 @@
|
||||
#
|
||||
#----------
|
||||
#
|
||||
# The vote settings configure settings for the entire Ripple network.
|
||||
# While a single instance of rippled cannot unilaterally enforce network-wide
|
||||
# The vote settings configure settings for the entire Xahau Network.
|
||||
# While a single instance of xahaud cannot unilaterally enforce network-wide
|
||||
# settings, these choices become part of the instance's vote during the
|
||||
# consensus process for each voting ledger.
|
||||
#
|
||||
@@ -1435,9 +1470,9 @@
|
||||
#
|
||||
# The cost of the reference transaction fee, specified in drops.
|
||||
# The reference transaction is the simplest form of transaction.
|
||||
# It represents an XRP payment between two parties.
|
||||
# It represents an XAH payment between two parties.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xahaud will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1446,26 +1481,26 @@
|
||||
# account_reserve = <drops>
|
||||
#
|
||||
# The account reserve requirement is specified in drops. The portion of an
|
||||
# account's XRP balance that is at or below the reserve may only be
|
||||
# account's XAH balance that is at or below the reserve may only be
|
||||
# spent on transaction fees, and not transferred out of the account.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xahaud will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
# account_reserve = 10000000 # 10 XRP
|
||||
# account_reserve = 10000000 # 10 XAH
|
||||
#
|
||||
# owner_reserve = <drops>
|
||||
#
|
||||
# The owner reserve is the amount of XRP reserved in the account for
|
||||
# The owner reserve is the amount of XAH reserved in the account for
|
||||
# each ledger item owned by the account. Ledger items an account may
|
||||
# own include trust lines, open orders, and tickets.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xahaud will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
# owner_reserve = 2000000 # 2 XRP
|
||||
# owner_reserve = 2000000 # 2 XAH
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
@@ -1503,7 +1538,7 @@
|
||||
# tool instead.
|
||||
#
|
||||
# This flag has no effect on the "sign" and "sign_for" command line options
|
||||
# that rippled makes available.
|
||||
# that xahaud makes available.
|
||||
#
|
||||
# The default value of this field is "false"
|
||||
#
|
||||
@@ -1582,7 +1617,7 @@
|
||||
#--------------------
|
||||
#
|
||||
# Administrators can use these values as a starting point for configuring
|
||||
# their instance of rippled, but each value should be checked to make sure
|
||||
# their instance of xahaud, but each value should be checked to make sure
|
||||
# it meets the business requirements for the organization.
|
||||
#
|
||||
# Server
|
||||
@@ -1592,7 +1627,7 @@
|
||||
# "peer"
|
||||
#
|
||||
# Peer protocol open to everyone. This is required to accept
|
||||
# incoming rippled connections. This does not affect automatic
|
||||
# incoming xahaud connections. This does not affect automatic
|
||||
# or manual outgoing Peer protocol connections.
|
||||
#
|
||||
# "rpc"
|
||||
@@ -1620,8 +1655,8 @@
|
||||
# NOTE
|
||||
#
|
||||
# To accept connections on well known ports such as 80 (HTTP) or
|
||||
# 443 (HTTPS), most operating systems will require rippled to
|
||||
# run with administrator privileges, or else rippled will not start.
|
||||
# 443 (HTTPS), most operating systems will require xahaud to
|
||||
# run with administrator privileges, or else xahaud will not start.
|
||||
|
||||
[server]
|
||||
port_rpc_admin_local
|
||||
@@ -1632,20 +1667,20 @@ port_ws_admin_local
|
||||
#ssl_cert = /etc/ssl/certs/server.crt
|
||||
|
||||
[port_rpc_admin_local]
|
||||
port = 5005
|
||||
port = 5009
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = http
|
||||
|
||||
[port_peer]
|
||||
port = 51235
|
||||
port = 21337
|
||||
ip = 0.0.0.0
|
||||
# alternatively, to accept connections on IPv4 + IPv6, use:
|
||||
#ip = ::
|
||||
protocol = peer
|
||||
|
||||
[port_ws_admin_local]
|
||||
port = 6006
|
||||
port = 6009
|
||||
ip = 127.0.0.1
|
||||
admin = 127.0.0.1
|
||||
protocol = ws
|
||||
@@ -1656,15 +1691,15 @@ ip = 127.0.0.1
|
||||
secure_gateway = 127.0.0.1
|
||||
|
||||
#[port_ws_public]
|
||||
#port = 6005
|
||||
#port = 6008
|
||||
#ip = 127.0.0.1
|
||||
#protocol = wss
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
# This is primary persistent datastore for rippled. This includes transaction
|
||||
# This is primary persistent datastore for xahaud. This includes transaction
|
||||
# metadata, account states, and ledger headers. Helpful information can be
|
||||
# found at https://xrpl.org/capacity-planning.html#node-db-type
|
||||
# found at https://xahau.network/docs/infrastructure/system-requirements
|
||||
# type=NuDB is recommended for non-validators with fast SSDs. Validators or
|
||||
# slow / spinning disks should use RocksDB. Caution: Spinning disks are
|
||||
# not recommended. They do not perform well enough to consistently remain
|
||||
@@ -1677,16 +1712,16 @@ secure_gateway = 127.0.0.1
|
||||
# deletion.
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/var/lib/rippled/db/nudb
|
||||
path=/opt/xahaud/db/nudb
|
||||
online_delete=512
|
||||
advisory_delete=0
|
||||
|
||||
# This is the persistent datastore for shards. It is important for the health
|
||||
# of the ripple network that rippled operators shard as much as practical.
|
||||
# of the Xahau Network that xahaud operators shard as much as practical.
|
||||
# NuDB requires SSD storage. Helpful information can be found at
|
||||
# https://xrpl.org/history-sharding.html
|
||||
#[shard_db]
|
||||
#path=/var/lib/rippled/db/shards/nudb
|
||||
#path=/opt/xahaud/db/shards/nudb
|
||||
#max_historical_shards=50
|
||||
#
|
||||
# This optional section can be configured with a list
|
||||
@@ -1697,7 +1732,7 @@ advisory_delete=0
|
||||
#/path/2
|
||||
|
||||
[database_path]
|
||||
/var/lib/rippled/db
|
||||
/opt/xahaud/db
|
||||
|
||||
|
||||
# To use Postgres, uncomment this section and fill in the appropriate connection
|
||||
@@ -1712,7 +1747,7 @@ advisory_delete=0
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
/var/log/rippled/debug.log
|
||||
/var/log/xahaud/debug.log
|
||||
|
||||
[sntp_servers]
|
||||
time.windows.com
|
||||
@@ -1720,15 +1755,19 @@ time.apple.com
|
||||
time.nist.gov
|
||||
pool.ntp.org
|
||||
|
||||
# To use the XRP test network
|
||||
# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
|
||||
# To use the Xahau Test Network
|
||||
# (see https://xahau.network/docs/infrastructure/installing-xahaud),
|
||||
# use the following [ips] section:
|
||||
# [ips]
|
||||
# r.altnet.rippletest.net 51235
|
||||
# 79.110.60.121 21338
|
||||
# 79.110.60.122 21338
|
||||
# 79.110.60.124 21338
|
||||
# 79.110.60.125 21338
|
||||
|
||||
|
||||
# File containing trusted validator keys or validator list publishers.
|
||||
# Unless an absolute path is specified, it will be considered relative to the
|
||||
# folder in which the rippled.cfg file is located.
|
||||
# folder in which the xahaud.cfg file is located.
|
||||
[validators_file]
|
||||
validators.txt
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#
|
||||
# 2. Peer Protocol
|
||||
#
|
||||
# 3. Ripple Protocol
|
||||
# 3. XRPL Protocol
|
||||
#
|
||||
# 4. HTTPS Client
|
||||
#
|
||||
@@ -29,18 +29,16 @@
|
||||
#
|
||||
# Purpose
|
||||
#
|
||||
# This file documents and provides examples of all rippled server process
|
||||
# configuration options. When the rippled server instance is launched, it
|
||||
# This file documents and provides examples of all xahaud server process
|
||||
# configuration options. When the xahaud server instance is launched, it
|
||||
# looks for a file with the following name:
|
||||
#
|
||||
# rippled.cfg
|
||||
# xahaud.cfg
|
||||
#
|
||||
# For more information on where the rippled server instance searches for the
|
||||
# file, visit:
|
||||
# To run xahaud with a custom configuration file, use the "--conf {file}" flag.
|
||||
# By default, xahaud will look in the local working directory or the home directory
|
||||
#
|
||||
# https://xrpl.org/commandline-usage.html#generic-options
|
||||
#
|
||||
# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# This file should be named xahaud.cfg. This file is UTF-8 with DOS, UNIX,
|
||||
# or Mac style end of lines. Blank lines and lines beginning with '#' are
|
||||
# ignored. Undefined sections are reserved. No escapes are currently defined.
|
||||
#
|
||||
@@ -89,8 +87,8 @@
|
||||
#
|
||||
#
|
||||
#
|
||||
# rippled offers various server protocols to clients making inbound
|
||||
# connections. The listening ports rippled uses are "universal" ports
|
||||
# xahaud offers various server protocols to clients making inbound
|
||||
# connections. The listening ports xahaud uses are "universal" ports
|
||||
# which may be configured to handshake in one or more of the available
|
||||
# supported protocols. These universal ports simplify administration:
|
||||
# A single open port can be used for multiple protocols.
|
||||
@@ -103,7 +101,7 @@
|
||||
#
|
||||
# A list of port names and key/value pairs. A port name must start with a
|
||||
# letter and contain only letters and numbers. The name is not case-sensitive.
|
||||
# For each name in this list, rippled will look for a configuration file
|
||||
# For each name in this list, xahaud will look for a configuration file
|
||||
# section with the same name and use it to create a listening port. The
|
||||
# name is informational only; the choice of name does not affect the function
|
||||
# of the listening port.
|
||||
@@ -134,7 +132,7 @@
|
||||
# ip = 127.0.0.1
|
||||
# protocol = http
|
||||
#
|
||||
# When rippled is used as a command line client (for example, issuing a
|
||||
# When xahaud is used as a command line client (for example, issuing a
|
||||
# server stop command), the first port advertising the http or https
|
||||
# protocol will be used to make the connection.
|
||||
#
|
||||
@@ -175,7 +173,7 @@
|
||||
# same time. It is possible have both Websockets and Secure Websockets
|
||||
# together in one port.
|
||||
#
|
||||
# NOTE If no ports support the peer protocol, rippled cannot
|
||||
# NOTE If no ports support the peer protocol, xahaud cannot
|
||||
# receive incoming peer connections or become a superpeer.
|
||||
#
|
||||
# limit = <number>
|
||||
@@ -194,7 +192,7 @@
|
||||
# required. IP address restrictions, if any, will be checked in addition
|
||||
# to the credentials specified here.
|
||||
#
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# When acting in the client role, xahaud will supply these credentials
|
||||
# using HTTP's Basic Authentication headers when making outbound HTTP/S
|
||||
# requests.
|
||||
#
|
||||
@@ -227,7 +225,7 @@
|
||||
# WS, or WSS protocol interfaces. If administrative commands are
|
||||
# disabled for a port, these credentials have no effect.
|
||||
#
|
||||
# When acting in the client role, rippled will supply these credentials
|
||||
# When acting in the client role, xahaud will supply these credentials
|
||||
# in the submitted JSON for any administrative command requests when
|
||||
# invoking JSON-RPC commands on remote servers.
|
||||
#
|
||||
@@ -247,11 +245,11 @@
|
||||
# resource controls will default to those for non-administrative users.
|
||||
#
|
||||
# The secure_gateway IP addresses are intended to represent
|
||||
# proxies. Since rippled trusts these hosts, they must be
|
||||
# proxies. Since xahaud trusts these hosts, they must be
|
||||
# responsible for properly authenticating the remote user.
|
||||
#
|
||||
# The same IP address cannot be used in both "admin" and "secure_gateway"
|
||||
# lists for the same port. In this case, rippled will abort with an error
|
||||
# lists for the same port. In this case, xahaud will abort with an error
|
||||
# message to the console shortly after startup
|
||||
#
|
||||
# ssl_key = <filename>
|
||||
@@ -261,7 +259,7 @@
|
||||
# Use the specified files when configuring SSL on the port.
|
||||
#
|
||||
# NOTE If no files are specified and secure protocols are selected,
|
||||
# rippled will generate an internal self-signed certificate.
|
||||
# xahaud will generate an internal self-signed certificate.
|
||||
#
|
||||
# The files have these meanings:
|
||||
#
|
||||
@@ -284,12 +282,12 @@
|
||||
# Control the ciphers which the server will support over SSL on the port,
|
||||
# specified using the OpenSSL "cipher list format".
|
||||
#
|
||||
# NOTE If unspecified, rippled will automatically configure a modern
|
||||
# NOTE If unspecified, xahaud will automatically configure a modern
|
||||
# cipher suite. This default suite should be widely supported.
|
||||
#
|
||||
# You should not modify this string unless you have a specific
|
||||
# reason and cryptographic expertise. Incorrect modification may
|
||||
# keep rippled from connecting to other instances of rippled or
|
||||
# keep xahaud from connecting to other instances of xahaud or
|
||||
# prevent RPC and WebSocket clients from connecting.
|
||||
#
|
||||
# send_queue_limit = [1..65535]
|
||||
@@ -340,7 +338,7 @@
|
||||
#
|
||||
# Examples:
|
||||
# { "command" : "server_info" }
|
||||
# { "command" : "log_level", "partition" : "ripplecalc", "severity" : "trace" }
|
||||
# { "command" : "log_level", "partition" : "xahaucalc", "severity" : "trace" }
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -369,8 +367,8 @@
|
||||
#-----------------
|
||||
#
|
||||
# These settings control security and access attributes of the Peer to Peer
|
||||
# server section of the rippled process. Peer Protocol implements the
|
||||
# Ripple Payment protocol. It is over peer connections that transactions
|
||||
# server section of the xahaud process. Peer Protocol implements the
|
||||
# XRPL Payment protocol. It is over peer connections that transactions
|
||||
# and validations are passed from to machine to machine, to determine the
|
||||
# contents of validated ledgers.
|
||||
#
|
||||
@@ -378,7 +376,7 @@
|
||||
#
|
||||
# [ips]
|
||||
#
|
||||
# List of hostnames or ips where the Ripple protocol is served. A default
|
||||
# List of hostnames or ips where the XRPL protocol is served. A default
|
||||
# starter list is included in the code and used if no other hostnames are
|
||||
# available.
|
||||
#
|
||||
@@ -387,24 +385,23 @@
|
||||
# does not generally matter.
|
||||
#
|
||||
# The default list of entries is:
|
||||
# - r.ripple.com 51235
|
||||
# - zaphod.alloy.ee 51235
|
||||
# - sahyadri.isrdc.in 51235
|
||||
# - bacab.alloy.ee 21337
|
||||
# - hubs.xahau.as16089.net 21337
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# [ips]
|
||||
# 192.168.0.1
|
||||
# 192.168.0.1 2459
|
||||
# r.ripple.com 51235
|
||||
# 192.168.0.1 21337
|
||||
# bacab.alloy.ee 21337
|
||||
#
|
||||
#
|
||||
# [ips_fixed]
|
||||
#
|
||||
# List of IP addresses or hostnames to which rippled should always attempt to
|
||||
# List of IP addresses or hostnames to which xahaud should always attempt to
|
||||
# maintain peer connections with. This is useful for manually forming private
|
||||
# networks, for example to configure a validation server that connects to the
|
||||
# Ripple network through a public-facing server, or for building a set
|
||||
# Xahau Network through a public-facing server, or for building a set
|
||||
# of cluster peers.
|
||||
#
|
||||
# One address or domain names per line is allowed. A port must be specified
|
||||
@@ -454,7 +451,7 @@
|
||||
#
|
||||
# IP address or domain of NTP servers to use for time synchronization.
|
||||
#
|
||||
# These NTP servers are suitable for rippled servers located in the United
|
||||
# These NTP servers are suitable for xahaud servers located in the United
|
||||
# States:
|
||||
# time.windows.com
|
||||
# time.apple.com
|
||||
@@ -555,7 +552,7 @@
|
||||
#
|
||||
# minimum_txn_in_ledger_standalone = <number>
|
||||
#
|
||||
# Like minimum_txn_in_ledger when rippled is running in standalone
|
||||
# Like minimum_txn_in_ledger when xahaud is running in standalone
|
||||
# mode. Default: 1000.
|
||||
#
|
||||
# target_txn_in_ledger = <number>
|
||||
@@ -682,7 +679,7 @@
|
||||
#
|
||||
# [validator_token]
|
||||
#
|
||||
# This is an alternative to [validation_seed] that allows rippled to perform
|
||||
# This is an alternative to [validation_seed] that allows xahaud to perform
|
||||
# validation without having to store the validator keys on the network
|
||||
# connected server. The field should contain a single token in the form of a
|
||||
# base64-encoded blob.
|
||||
@@ -717,22 +714,21 @@
|
||||
#
|
||||
# Specify the file by its name or path.
|
||||
# Unless an absolute path is specified, it will be considered relative to
|
||||
# the folder in which the rippled.cfg file is located.
|
||||
# the folder in which the xahaud.cfg file is located.
|
||||
#
|
||||
# Examples:
|
||||
# /home/ripple/validators.txt
|
||||
# C:/home/ripple/validators.txt
|
||||
# /home/xahaud/validators.txt
|
||||
# C:/home/xahaud/validators.txt
|
||||
#
|
||||
# Example content:
|
||||
# [validators]
|
||||
# n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7
|
||||
# n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj
|
||||
# n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C
|
||||
# n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS
|
||||
# n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA
|
||||
#
|
||||
#
|
||||
#
|
||||
# n9L3GdotB8a3AqtsvS7NXt4BUTQSAYyJUr9xtFj2qXJjfbZsawKY
|
||||
# n9LQDHLWyFuAn5BXJuW2ow5J9uGqpmSjRYS2cFRpxf6uJbxwDzvM
|
||||
# n9MCWyKVUkiatXVJTKUrAESB5kBFP8R3hm43jGHtg8WBnjv3iDfb
|
||||
# n9KWXCLRhjpajuZtULTXsy6R5xbisA6ozGxM4zdEJFq6uHiFZDvW
|
||||
|
||||
|
||||
|
||||
# [path_search]
|
||||
# When searching for paths, the default search aggressiveness. This can take
|
||||
# exponentially more resources as the size is increased.
|
||||
@@ -795,7 +791,7 @@
|
||||
#
|
||||
# 0: Disable the ledger replay feature [default]
|
||||
# 1: Enable the ledger replay feature. With this feature enabled, when
|
||||
# acquiring a ledger from the network, a rippled node only downloads
|
||||
# acquiring a ledger from the network, a xahaud node only downloads
|
||||
# the ledger header and the transactions instead of the whole ledger.
|
||||
# And the ledger is built by applying the transactions to the parent
|
||||
# ledger.
|
||||
@@ -806,9 +802,9 @@
|
||||
#
|
||||
#----------------
|
||||
#
|
||||
# The rippled server instance uses HTTPS GET requests in a variety of
|
||||
# The xahaud server instance uses HTTPS GET requests in a variety of
|
||||
# circumstances, including but not limited to contacting trusted domains to
|
||||
# fetch information such as mapping an email address to a Ripple Payment
|
||||
# fetch information such as mapping an email address to a XRPL Payment
|
||||
# Network address.
|
||||
#
|
||||
# [ssl_verify]
|
||||
@@ -846,15 +842,15 @@
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# rippled has an optional operating mode called Reporting Mode. In Reporting
|
||||
# Mode, rippled does not connect to the peer to peer network. Instead, rippled
|
||||
# will continuously extract data from one or more rippled servers that are
|
||||
# xahaud has an optional operating mode called Reporting Mode. In Reporting
|
||||
# Mode, xahaud does not connect to the peer to peer network. Instead, xahaud
|
||||
# will continuously extract data from one or more xahaud servers that are
|
||||
# connected to the peer to peer network (referred to as an ETL source).
|
||||
# Reporting mode servers will forward RPC requests that require access to the
|
||||
# peer to peer network (submit, fee, etc) to an ETL source.
|
||||
#
|
||||
# [reporting] Settings for Reporting Mode. If and only if this section is
|
||||
# present, rippled will start in reporting mode. This section
|
||||
# present, xahaud will start in reporting mode. This section
|
||||
# contains a list of ETL source names, and key-value pairs. The
|
||||
# ETL source names each correspond to a configuration file
|
||||
# section; the names must match exactly. The key-value pairs are
|
||||
@@ -959,16 +955,16 @@
|
||||
#
|
||||
#------------
|
||||
#
|
||||
# rippled creates 4 SQLite database to hold bookkeeping information
|
||||
# xahaud creates 4 SQLite database to hold bookkeeping information
|
||||
# about transactions, local credentials, and various other things.
|
||||
# It also creates the NodeDB, which holds all the objects that
|
||||
# make up the current and historical ledgers. In Reporting Mode, rippled
|
||||
# make up the current and historical ledgers. In Reporting Mode, xahaud
|
||||
# uses a Postgres database instead of SQLite.
|
||||
#
|
||||
# The simplest way to work with Postgres is to install it locally.
|
||||
# When it is running, execute the initdb.sh script in the current
|
||||
# directory as: sudo -u postgres ./initdb.sh
|
||||
# This will create the rippled user and an empty database of the same name.
|
||||
# This will create the xahaud user and an empty database of the same name.
|
||||
#
|
||||
# The size of the NodeDB grows in proportion to the amount of new data and the
|
||||
# amount of historical data (a configurable setting) so the performance of the
|
||||
@@ -976,7 +972,7 @@
|
||||
# the performance of the server.
|
||||
#
|
||||
# Partial pathnames will be considered relative to the location of
|
||||
# the rippled.cfg file.
|
||||
# the xahaud.cfg file.
|
||||
#
|
||||
# [node_db] Settings for the Node Database (required)
|
||||
#
|
||||
@@ -994,11 +990,11 @@
|
||||
# type = NuDB
|
||||
#
|
||||
# NuDB is a high-performance database written by Ripple Labs and optimized
|
||||
# for rippled and solid-state drives.
|
||||
# for solid-state drives.
|
||||
#
|
||||
# NuDB maintains its high speed regardless of the amount of history
|
||||
# stored. Online delete may be selected, but is not required. NuDB is
|
||||
# available on all platforms that rippled runs on.
|
||||
# available on all platforms that xahaud runs on.
|
||||
#
|
||||
# type = RocksDB
|
||||
#
|
||||
@@ -1103,14 +1099,14 @@
|
||||
#
|
||||
# recovery_wait_seconds
|
||||
# The online delete process checks periodically
|
||||
# that rippled is still in sync with the network,
|
||||
# that xahaud is still in sync with the network,
|
||||
# and that the validated ledger is less than
|
||||
# 'age_threshold_seconds' old. By default, if it
|
||||
# is not the online delete process aborts and
|
||||
# tries again later. If 'recovery_wait_seconds'
|
||||
# is set and rippled is out of sync, but likely to
|
||||
# is set and xahaud is out of sync, but likely to
|
||||
# recover quickly, then online delete will wait
|
||||
# this number of seconds for rippled to get back
|
||||
# this number of seconds for xahaud to get back
|
||||
# into sync before it aborts.
|
||||
# Set this value if the node is otherwise staying
|
||||
# in sync, or recovering quickly, but the online
|
||||
@@ -1146,8 +1142,8 @@
|
||||
# The server creates and maintains 4 to 5 bookkeeping SQLite databases in
|
||||
# the 'database_path' location. If you omit this configuration setting,
|
||||
# the server creates a directory called "db" located in the same place as
|
||||
# your rippled.cfg file.
|
||||
# Partial pathnames are relative to the location of the rippled executable.
|
||||
# your xahaud.cfg file.
|
||||
# Partial pathnames are relative to the location of the xahaud executable.
|
||||
#
|
||||
# [shard_db] Settings for the Shard Database (optional)
|
||||
#
|
||||
@@ -1223,7 +1219,7 @@
|
||||
# The default is "wal", which uses a write-ahead
|
||||
# log to implement database transactions.
|
||||
# Alternately, "memory" saves disk I/O, but if
|
||||
# rippled crashes during a transaction, the
|
||||
# xahaud crashes during a transaction, the
|
||||
# database is likely to be corrupted.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_journal_mode
|
||||
# for more details about the available options.
|
||||
@@ -1233,7 +1229,7 @@
|
||||
# synchronous Valid values: off, normal, full, extra
|
||||
# The default is "normal", which works well with
|
||||
# the "wal" journal mode. Alternatively, "off"
|
||||
# allows rippled to continue as soon as data is
|
||||
# allows xahaud to continue as soon as data is
|
||||
# passed to the OS, which can significantly
|
||||
# increase speed, but risks data corruption if
|
||||
# the host computer crashes before writing that
|
||||
@@ -1247,7 +1243,7 @@
|
||||
# The default is "file", which will use files
|
||||
# for temporary database tables and indices.
|
||||
# Alternatively, "memory" may save I/O, but
|
||||
# rippled does not currently use many, if any,
|
||||
# xahaud does not currently use many, if any,
|
||||
# of these temporary objects.
|
||||
# See https://www.sqlite.org/pragma.html#pragma_temp_store
|
||||
# for more details about the available options.
|
||||
@@ -1259,9 +1255,9 @@
|
||||
# conninfo Info for connecting to Postgres. Format is
|
||||
# postgres://[username]:[password]@[ip]/[database].
|
||||
# The database and user must already exist. If this
|
||||
# section is missing and rippled is running in
|
||||
# Reporting Mode, rippled will connect as the
|
||||
# user running rippled to a database with the
|
||||
# section is missing and xahaud is running in
|
||||
# Reporting Mode, xahaud will connect as the
|
||||
# user running xahaud to a database with the
|
||||
# same name. On Linux and Mac OS X, the connection
|
||||
# will take place using the server's UNIX domain
|
||||
# socket. On Windows, through the localhost IP
|
||||
@@ -1270,7 +1266,7 @@
|
||||
# use_tx_tables Valid values: 1, 0
|
||||
# The default is 1 (true). Determines whether to use
|
||||
# the SQLite transaction database. If set to 0,
|
||||
# rippled will not write to the transaction database,
|
||||
# xahaud will not write to the transaction database,
|
||||
# and will reject tx, account_tx and tx_history RPCs.
|
||||
# In Reporting Mode, this setting is ignored.
|
||||
#
|
||||
@@ -1298,7 +1294,7 @@
|
||||
#
|
||||
# These settings are designed to help server administrators diagnose
|
||||
# problems, and obtain detailed information about the activities being
|
||||
# performed by the rippled process.
|
||||
# performed by the xahaud process.
|
||||
#
|
||||
#
|
||||
#
|
||||
@@ -1315,7 +1311,7 @@
|
||||
#
|
||||
# Configuration parameters for the Beast. Insight stats collection module.
|
||||
#
|
||||
# Insight is a module that collects information from the areas of rippled
|
||||
# Insight is a module that collects information from the areas of xahaud
|
||||
# that have instrumentation. The configuration parameters control where the
|
||||
# collection metrics are sent. The parameters are expressed as key = value
|
||||
# pairs with no white space. The main parameter is the choice of server:
|
||||
@@ -1324,7 +1320,7 @@
|
||||
#
|
||||
# Choice of server to send metrics to. Currently the only choice is
|
||||
# "statsd" which sends UDP packets to a StatsD daemon, which must be
|
||||
# running while rippled is running. More information on StatsD is
|
||||
# running while xahaud is running. More information on StatsD is
|
||||
# available here:
|
||||
# https://github.com/b/statsd_spec
|
||||
#
|
||||
@@ -1334,7 +1330,7 @@
|
||||
# in the format, n.n.n.n:port.
|
||||
#
|
||||
# "prefix" A string prepended to each collected metric. This is used
|
||||
# to distinguish between different running instances of rippled.
|
||||
# to distinguish between different running instances of xahaud.
|
||||
#
|
||||
# If this section is missing, or the server type is unspecified or unknown,
|
||||
# statistics are not collected or reported.
|
||||
@@ -1361,7 +1357,7 @@
|
||||
#
|
||||
# Example:
|
||||
# [perf]
|
||||
# perf_log=/var/log/rippled/perf.log
|
||||
# perf_log=/var/log/xahaud/perf.log
|
||||
# log_interval=2
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -1370,8 +1366,8 @@
|
||||
#
|
||||
#----------
|
||||
#
|
||||
# The vote settings configure settings for the entire Ripple network.
|
||||
# While a single instance of rippled cannot unilaterally enforce network-wide
|
||||
# The vote settings configure settings for the entire Xahau Network.
|
||||
# While a single instance of xahaud cannot unilaterally enforce network-wide
|
||||
# settings, these choices become part of the instance's vote during the
|
||||
# consensus process for each voting ledger.
|
||||
#
|
||||
@@ -1383,9 +1379,9 @@
|
||||
#
|
||||
# The cost of the reference transaction fee, specified in drops.
|
||||
# The reference transaction is the simplest form of transaction.
|
||||
# It represents an XRP payment between two parties.
|
||||
# It represents an XAH payment between two parties.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xahaud will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
@@ -1394,26 +1390,26 @@
|
||||
# account_reserve = <drops>
|
||||
#
|
||||
# The account reserve requirement is specified in drops. The portion of an
|
||||
# account's XRP balance that is at or below the reserve may only be
|
||||
# account's XAH balance that is at or below the reserve may only be
|
||||
# spent on transaction fees, and not transferred out of the account.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xahaud will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
# account_reserve = 10000000 # 10 XRP
|
||||
# account_reserve = 10000000 # 10 XAH
|
||||
#
|
||||
# owner_reserve = <drops>
|
||||
#
|
||||
# The owner reserve is the amount of XRP reserved in the account for
|
||||
# The owner reserve is the amount of XAH reserved in the account for
|
||||
# each ledger item owned by the account. Ledger items an account may
|
||||
# own include trust lines, open orders, and tickets.
|
||||
#
|
||||
# If this parameter is unspecified, rippled will use an internal
|
||||
# If this parameter is unspecified, xahaud will use an internal
|
||||
# default. Don't change this without understanding the consequences.
|
||||
#
|
||||
# Example:
|
||||
# owner_reserve = 2000000 # 2 XRP
|
||||
# owner_reserve = 2000000 # 2 XAH
|
||||
#
|
||||
#-------------------------------------------------------------------------------
|
||||
#
|
||||
@@ -1451,7 +1447,7 @@
|
||||
# tool instead.
|
||||
#
|
||||
# This flag has no effect on the "sign" and "sign_for" command line options
|
||||
# that rippled makes available.
|
||||
# that xahaud makes available.
|
||||
#
|
||||
# The default value of this field is "false"
|
||||
#
|
||||
@@ -1530,7 +1526,7 @@
|
||||
#--------------------
|
||||
#
|
||||
# Administrators can use these values as a starting point for configuring
|
||||
# their instance of rippled, but each value should be checked to make sure
|
||||
# their instance of xahaud, but each value should be checked to make sure
|
||||
# it meets the business requirements for the organization.
|
||||
#
|
||||
# Server
|
||||
@@ -1540,7 +1536,7 @@
|
||||
# "peer"
|
||||
#
|
||||
# Peer protocol open to everyone. This is required to accept
|
||||
# incoming rippled connections. This does not affect automatic
|
||||
# incoming xahaud connections. This does not affect automatic
|
||||
# or manual outgoing Peer protocol connections.
|
||||
#
|
||||
# "rpc"
|
||||
@@ -1568,8 +1564,8 @@
|
||||
# NOTE
|
||||
#
|
||||
# To accept connections on well known ports such as 80 (HTTP) or
|
||||
# 443 (HTTPS), most operating systems will require rippled to
|
||||
# run with administrator privileges, or else rippled will not start.
|
||||
# 443 (HTTPS), most operating systems will require xahaud to
|
||||
# run with administrator privileges, or else xahaud will not start.
|
||||
|
||||
[server]
|
||||
port_rpc_admin_local
|
||||
@@ -1587,7 +1583,7 @@ admin = 127.0.0.1
|
||||
protocol = http
|
||||
|
||||
[port_peer]
|
||||
port = 51235
|
||||
port = 21337
|
||||
ip = 0.0.0.0
|
||||
# alternatively, to accept connections on IPv4 + IPv6, use:
|
||||
#ip = ::
|
||||
@@ -1611,9 +1607,9 @@ protocol = ws
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
# This is primary persistent datastore for rippled. This includes transaction
|
||||
# This is primary persistent datastore for xahaud. This includes transaction
|
||||
# metadata, account states, and ledger headers. Helpful information can be
|
||||
# found at https://xrpl.org/capacity-planning.html#node-db-type
|
||||
# found at https://xahau.network/docs/infrastructure/system-requirements
|
||||
# type=NuDB is recommended for non-validators with fast SSDs. Validators or
|
||||
# slow / spinning disks should use RocksDB. Caution: Spinning disks are
|
||||
# not recommended. They do not perform well enough to consistently remain
|
||||
@@ -1626,16 +1622,16 @@ protocol = ws
|
||||
# deletion.
|
||||
[node_db]
|
||||
type=NuDB
|
||||
path=/var/lib/rippled-reporting/db/nudb
|
||||
path=/opt/xahaud-reporting/db/nudb
|
||||
# online_delete=512 #
|
||||
advisory_delete=0
|
||||
|
||||
# This is the persistent datastore for shards. It is important for the health
|
||||
# of the ripple network that rippled operators shard as much as practical.
|
||||
# of the Xahau Network that xahaud operators shard as much as practical.
|
||||
# NuDB requires SSD storage. Helpful information can be found at
|
||||
# https://xrpl.org/history-sharding.html
|
||||
#[shard_db]
|
||||
#path=/var/lib/rippled/db/shards/nudb
|
||||
#path=/opt/xahaud-reporting/db/shards/nudb
|
||||
#max_historical_shards=50
|
||||
#
|
||||
# This optional section can be configured with a list
|
||||
@@ -1646,7 +1642,7 @@ advisory_delete=0
|
||||
#/path/2
|
||||
|
||||
[database_path]
|
||||
/var/lib/rippled-reporting/db
|
||||
/opt/xahaud-reporting/db
|
||||
|
||||
# To use Postgres, uncomment this section and fill in the appropriate connection
|
||||
# info. Postgres can only be used in Reporting Mode.
|
||||
@@ -1660,7 +1656,7 @@ advisory_delete=0
|
||||
# This needs to be an absolute directory reference, not a relative one.
|
||||
# Modify this value as required.
|
||||
[debug_logfile]
|
||||
/var/log/rippled-reporting/debug.log
|
||||
/var/log/xahaud-reporting/debug.log
|
||||
|
||||
[sntp_servers]
|
||||
time.windows.com
|
||||
@@ -1668,17 +1664,20 @@ time.apple.com
|
||||
time.nist.gov
|
||||
pool.ntp.org
|
||||
|
||||
# To use the XRP test network
|
||||
# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html),
|
||||
# To use the Xahau Test Network
|
||||
# (see https://xahau.network/docs/infrastructure/installing-xahaud),
|
||||
# use the following [ips] section:
|
||||
# [ips]
|
||||
# r.altnet.rippletest.net 51235
|
||||
# 79.110.60.121 21338
|
||||
# 79.110.60.122 21338
|
||||
# 79.110.60.124 21338
|
||||
# 79.110.60.125 21338
|
||||
|
||||
# File containing trusted validator keys or validator list publishers.
|
||||
# Unless an absolute path is specified, it will be considered relative to the
|
||||
# folder in which the rippled.cfg file is located.
|
||||
# folder in which the xahaud.cfg file is located.
|
||||
[validators_file]
|
||||
/opt/rippled-reporting/etc/validators.txt
|
||||
/opt/xahaud-reporting/etc/validators.txt
|
||||
|
||||
# Turn down default logging to save disk space in the long run.
|
||||
# Valid values here are trace, debug, info, warning, error, and fatal
|
||||
@@ -1699,5 +1698,5 @@ etl_source
|
||||
|
||||
[etl_source]
|
||||
source_grpc_port=50051
|
||||
source_ws_port=6005
|
||||
source_ws_port=6008
|
||||
source_ip=127.0.0.1
|
||||
9
cfg/rippled-standalone.cfg → cfg/xahaud-standalone.cfg
Executable file → Normal file
9
cfg/rippled-standalone.cfg → cfg/xahaud-standalone.cfg
Executable file → Normal file
@@ -1,4 +1,4 @@
|
||||
# standalone: ./rippled -a --ledgerfile config/genesis.json --conf config/rippled-standalone.cfg
|
||||
# standalone: ./xahaud -a --ledgerfile config/genesis.json --conf config/xahaud-standalone.cfg
|
||||
[server]
|
||||
port_rpc_admin_local
|
||||
port_ws_public
|
||||
@@ -21,7 +21,7 @@ ip = 0.0.0.0
|
||||
protocol = ws
|
||||
|
||||
# [port_peer]
|
||||
# port = 51235
|
||||
# port = 21337
|
||||
# ip = 0.0.0.0
|
||||
# protocol = peer
|
||||
|
||||
@@ -69,7 +69,8 @@ time.nist.gov
|
||||
pool.ntp.org
|
||||
|
||||
[ips]
|
||||
r.ripple.com 51235
|
||||
bacab.alloy.ee 21337
|
||||
hubs.xahau.as16089.net 21337
|
||||
|
||||
[validators_file]
|
||||
validators-example.txt
|
||||
@@ -94,7 +95,7 @@ validators-example.txt
|
||||
1000000
|
||||
|
||||
[network_id]
|
||||
21338
|
||||
21337
|
||||
|
||||
[amendments]
|
||||
740352F2412A9909880C23A559FCECEDA3BE2126FED62FC7660D628A06927F11 Flow
|
||||
@@ -1,127 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Compile a single file using commands from compile_commands.json
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def find_compile_command(compile_commands, file_path):
|
||||
"""Find the compile command for a given file path."""
|
||||
# Normalize the input path
|
||||
abs_path = os.path.abspath(file_path)
|
||||
|
||||
for entry in compile_commands:
|
||||
# Check if this entry matches our file
|
||||
entry_file = os.path.abspath(entry['file'])
|
||||
if entry_file == abs_path:
|
||||
return entry
|
||||
|
||||
# Try relative path matching as fallback
|
||||
for entry in compile_commands:
|
||||
if entry['file'].endswith(file_path) or file_path.endswith(entry['file']):
|
||||
return entry
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Compile a single file using compile_commands.json'
|
||||
)
|
||||
parser.add_argument(
|
||||
'file',
|
||||
help='Path to the source file to compile'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
action='store_true',
|
||||
help='Show the compile command being executed'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dump-output', '-d',
|
||||
action='store_true',
|
||||
help='Dump the full output from the compiler'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--compile-db',
|
||||
default='build/compile_commands.json',
|
||||
help='Path to compile_commands.json (default: build/compile_commands.json)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Check if compile_commands.json exists
|
||||
if not os.path.exists(args.compile_db):
|
||||
print(f"Error: {args.compile_db} not found", file=sys.stderr)
|
||||
print("Make sure you've run cmake with -DCMAKE_EXPORT_COMPILE_COMMANDS=ON", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Load compile commands
|
||||
try:
|
||||
with open(args.compile_db, 'r') as f:
|
||||
compile_commands = json.load(f)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error parsing {args.compile_db}: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Find the compile command for the requested file
|
||||
entry = find_compile_command(compile_commands, args.file)
|
||||
|
||||
if not entry:
|
||||
print(f"Error: No compile command found for {args.file}", file=sys.stderr)
|
||||
print(f"Available files in {args.compile_db}:", file=sys.stderr)
|
||||
# Show first 10 files as examples
|
||||
for i, cmd in enumerate(compile_commands[:10]):
|
||||
print(f" {cmd['file']}", file=sys.stderr)
|
||||
if len(compile_commands) > 10:
|
||||
print(f" ... and {len(compile_commands) - 10} more", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Extract the command and directory
|
||||
command = entry['command']
|
||||
directory = entry.get('directory', '.')
|
||||
|
||||
if args.verbose:
|
||||
print(f"Directory: {directory}", file=sys.stderr)
|
||||
print(f"Command: {command}", file=sys.stderr)
|
||||
print("-" * 80, file=sys.stderr)
|
||||
|
||||
# Execute the compile command
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=True,
|
||||
cwd=directory,
|
||||
capture_output=not args.dump_output,
|
||||
text=True
|
||||
)
|
||||
|
||||
if args.dump_output:
|
||||
# Output was already printed to stdout/stderr
|
||||
pass
|
||||
else:
|
||||
# Only show output if there were errors or warnings
|
||||
if result.stderr:
|
||||
print(result.stderr, file=sys.stderr)
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
|
||||
# Exit with the same code as the compiler
|
||||
sys.exit(result.returncode)
|
||||
|
||||
except subprocess.SubprocessError as e:
|
||||
print(f"Error executing compile command: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
print("\nCompilation interrupted", file=sys.stderr)
|
||||
sys.exit(130)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,311 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Compile a single file using commands from compile_commands.json
|
||||
Enhanced version with error context display
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
import re
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def setup_logging(level):
|
||||
"""Setup logging configuration."""
|
||||
numeric_level = getattr(logging, level.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError(f'Invalid log level: {level}')
|
||||
|
||||
logging.basicConfig(
|
||||
level=numeric_level,
|
||||
format='[%(levelname)s] %(message)s',
|
||||
stream=sys.stderr
|
||||
)
|
||||
|
||||
|
||||
def find_compile_command(compile_commands, file_path):
|
||||
"""Find the compile command for a given file path."""
|
||||
# Normalize the input path
|
||||
abs_path = os.path.abspath(file_path)
|
||||
logging.debug(f"Looking for compile command for: {abs_path}")
|
||||
|
||||
for entry in compile_commands:
|
||||
# Check if this entry matches our file
|
||||
entry_file = os.path.abspath(entry['file'])
|
||||
if entry_file == abs_path:
|
||||
logging.debug(f"Found exact match: {entry_file}")
|
||||
return entry
|
||||
|
||||
# Try relative path matching as fallback
|
||||
for entry in compile_commands:
|
||||
if entry['file'].endswith(file_path) or file_path.endswith(entry['file']):
|
||||
logging.debug(f"Found relative match: {entry['file']}")
|
||||
return entry
|
||||
|
||||
logging.debug("No compile command found")
|
||||
return None
|
||||
|
||||
|
||||
def extract_errors_with_context(output, file_path, context_lines=3):
|
||||
"""Extract error messages with context from compiler output."""
|
||||
lines = output.split('\n')
|
||||
errors = []
|
||||
|
||||
logging.debug(f"Parsing {len(lines)} lines of compiler output")
|
||||
logging.debug(f"Looking for errors in file: {file_path}")
|
||||
|
||||
# Pattern to match error lines from clang/gcc
|
||||
# Matches: filename:line:col: error: message
|
||||
# Also handle color codes
|
||||
error_pattern = re.compile(r'([^:]+):(\d+):(\d+):\s*(?:\x1b\[[0-9;]*m)?\s*(error|warning):\s*(?:\x1b\[[0-9;]*m)?\s*(.*?)(?:\x1b\[[0-9;]*m)?$')
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
# Strip ANSI color codes for pattern matching
|
||||
clean_line = re.sub(r'\x1b\[[0-9;]*m', '', line)
|
||||
match = error_pattern.search(clean_line)
|
||||
|
||||
if match:
|
||||
filename = match.group(1)
|
||||
line_num = int(match.group(2))
|
||||
col_num = int(match.group(3))
|
||||
error_type = match.group(4)
|
||||
message = match.group(5)
|
||||
|
||||
logging.debug(f"Found {error_type} at {filename}:{line_num}:{col_num}")
|
||||
|
||||
# Check if this error is from the file we're compiling
|
||||
# Be more flexible with path matching
|
||||
if (file_path in filename or
|
||||
filename.endswith(os.path.basename(file_path)) or
|
||||
os.path.basename(filename) == os.path.basename(file_path)):
|
||||
|
||||
logging.debug(f" -> Including {error_type}: {message[:50]}...")
|
||||
|
||||
error_info = {
|
||||
'line': line_num,
|
||||
'col': col_num,
|
||||
'type': error_type,
|
||||
'message': message,
|
||||
'full_line': line, # Keep original line with colors
|
||||
'context_before': [],
|
||||
'context_after': []
|
||||
}
|
||||
|
||||
# Get context lines from compiler output
|
||||
for j in range(max(0, i - context_lines), i):
|
||||
error_info['context_before'].append(lines[j])
|
||||
|
||||
for j in range(i + 1, min(len(lines), i + context_lines + 1)):
|
||||
error_info['context_after'].append(lines[j])
|
||||
|
||||
errors.append(error_info)
|
||||
else:
|
||||
logging.debug(f" -> Skipping (different file: {filename})")
|
||||
|
||||
logging.info(f"Found {len(errors)} errors/warnings")
|
||||
return errors
|
||||
|
||||
|
||||
def read_source_context(file_path, line_num, context_lines=3):
|
||||
"""Read context from the source file around a specific line."""
|
||||
try:
|
||||
with open(file_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
start = max(0, line_num - context_lines - 1)
|
||||
end = min(len(lines), line_num + context_lines)
|
||||
|
||||
context = []
|
||||
for i in range(start, end):
|
||||
line_marker = '>>> ' if i == line_num - 1 else ' '
|
||||
context.append(f"{i+1:4d}:{line_marker}{lines[i].rstrip()}")
|
||||
|
||||
return '\n'.join(context)
|
||||
except Exception as e:
|
||||
logging.warning(f"Could not read source context: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def format_error_with_context(error, file_path, show_source_context=False):
|
||||
"""Format an error with its context."""
|
||||
output = []
|
||||
output.append(f"\n{'='*80}")
|
||||
output.append(f"Error at line {error['line']}, column {error['col']}:")
|
||||
output.append(f" {error['message']}")
|
||||
|
||||
if show_source_context:
|
||||
source_context = read_source_context(file_path, error['line'], 3)
|
||||
if source_context:
|
||||
output.append("\nSource context:")
|
||||
output.append(source_context)
|
||||
|
||||
if error['context_before'] or error['context_after']:
|
||||
output.append("\nCompiler output context:")
|
||||
for line in error['context_before']:
|
||||
output.append(f" {line}")
|
||||
output.append(f">>> {error['full_line']}")
|
||||
for line in error['context_after']:
|
||||
output.append(f" {line}")
|
||||
|
||||
return '\n'.join(output)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Compile a single file using compile_commands.json with enhanced error display'
|
||||
)
|
||||
parser.add_argument(
|
||||
'file',
|
||||
help='Path to the source file to compile'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
action='store_true',
|
||||
help='Show the compile command being executed'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dump-output', '-d',
|
||||
action='store_true',
|
||||
help='Dump the full output from the compiler'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--show-error-context', '-e',
|
||||
type=int,
|
||||
metavar='N',
|
||||
help='Show N lines of context around each error (implies capturing output)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--show-source-context', '-s',
|
||||
action='store_true',
|
||||
help='Show source file context around errors'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--errors-only',
|
||||
action='store_true',
|
||||
help='Only show errors, not warnings'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--compile-db',
|
||||
default='build/compile_commands.json',
|
||||
help='Path to compile_commands.json (default: build/compile_commands.json)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--log-level', '-l',
|
||||
default='WARNING',
|
||||
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
|
||||
help='Set logging level (default: WARNING)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Setup logging
|
||||
setup_logging(args.log_level)
|
||||
|
||||
# Check if compile_commands.json exists
|
||||
if not os.path.exists(args.compile_db):
|
||||
print(f"Error: {args.compile_db} not found", file=sys.stderr)
|
||||
print("Make sure you've run cmake with -DCMAKE_EXPORT_COMPILE_COMMANDS=ON", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Load compile commands
|
||||
try:
|
||||
with open(args.compile_db, 'r') as f:
|
||||
compile_commands = json.load(f)
|
||||
logging.info(f"Loaded {len(compile_commands)} compile commands")
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error parsing {args.compile_db}: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Find the compile command for the requested file
|
||||
entry = find_compile_command(compile_commands, args.file)
|
||||
|
||||
if not entry:
|
||||
print(f"Error: No compile command found for {args.file}", file=sys.stderr)
|
||||
print(f"Available files in {args.compile_db}:", file=sys.stderr)
|
||||
# Show first 10 files as examples
|
||||
for i, cmd in enumerate(compile_commands[:10]):
|
||||
print(f" {cmd['file']}", file=sys.stderr)
|
||||
if len(compile_commands) > 10:
|
||||
print(f" ... and {len(compile_commands) - 10} more", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Extract the command and directory
|
||||
command = entry['command']
|
||||
directory = entry.get('directory', '.')
|
||||
source_file = entry['file']
|
||||
|
||||
if args.verbose:
|
||||
print(f"Directory: {directory}", file=sys.stderr)
|
||||
print(f"Command: {command}", file=sys.stderr)
|
||||
print("-" * 80, file=sys.stderr)
|
||||
|
||||
logging.info(f"Compiling {source_file}")
|
||||
logging.debug(f"Working directory: {directory}")
|
||||
logging.debug(f"Command: {command}")
|
||||
|
||||
# Execute the compile command
|
||||
try:
|
||||
# If we need to show error context, we must capture output
|
||||
capture = not args.dump_output or args.show_error_context is not None
|
||||
|
||||
logging.debug(f"Running compiler (capture={capture})")
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=True,
|
||||
cwd=directory,
|
||||
capture_output=capture,
|
||||
text=True
|
||||
)
|
||||
|
||||
logging.info(f"Compiler returned code: {result.returncode}")
|
||||
|
||||
if args.dump_output and not args.show_error_context:
|
||||
# Output was already printed to stdout/stderr
|
||||
pass
|
||||
elif args.show_error_context is not None:
|
||||
# Parse and display errors with context
|
||||
all_output = result.stderr + "\n" + result.stdout
|
||||
|
||||
# Log first few lines of output for debugging
|
||||
output_lines = all_output.split('\n')[:10]
|
||||
for line in output_lines:
|
||||
logging.debug(f"Output: {line}")
|
||||
|
||||
errors = extract_errors_with_context(all_output, args.file, args.show_error_context)
|
||||
|
||||
if args.errors_only:
|
||||
errors = [e for e in errors if e['type'] == 'error']
|
||||
logging.info(f"Filtered to {len(errors)} errors only")
|
||||
|
||||
print(f"\nFound {len(errors)} {'error' if args.errors_only else 'error/warning'}(s) in {args.file}:\n")
|
||||
|
||||
for error in errors:
|
||||
print(format_error_with_context(error, source_file, args.show_source_context))
|
||||
|
||||
if errors:
|
||||
print(f"\n{'='*80}")
|
||||
print(f"Total: {len(errors)} {'error' if args.errors_only else 'error/warning'}(s)")
|
||||
else:
|
||||
# Default behavior - show output if there were errors or warnings
|
||||
if result.stderr:
|
||||
print(result.stderr, file=sys.stderr)
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
|
||||
# Exit with the same code as the compiler
|
||||
sys.exit(result.returncode)
|
||||
|
||||
except subprocess.SubprocessError as e:
|
||||
print(f"Error executing compile command: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
print("\nCompilation interrupted", file=sys.stderr)
|
||||
sys.exit(130)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
109
conanfile.py
109
conanfile.py
@@ -21,23 +21,20 @@ class Xrpl(ConanFile):
|
||||
'static': [True, False],
|
||||
'tests': [True, False],
|
||||
'unity': [True, False],
|
||||
'with_wasmedge': [True, False],
|
||||
'tool_requires_b2': [True, False],
|
||||
}
|
||||
|
||||
requires = [
|
||||
'blake3/1.5.0@xahaud/stable',
|
||||
'boost/1.86.0',
|
||||
'date/3.0.1',
|
||||
'date/3.0.3',
|
||||
'libarchive/3.6.0',
|
||||
'lz4/1.9.3',
|
||||
'lz4/1.9.4',
|
||||
'grpc/1.50.1',
|
||||
'nudb/2.0.8',
|
||||
'openssl/3.3.2',
|
||||
'protobuf/3.21.9',
|
||||
'snappy/1.1.10',
|
||||
'soci/4.0.3',
|
||||
'sqlite3/3.42.0',
|
||||
'openssl/3.6.0',
|
||||
'protobuf/3.21.12',
|
||||
'soci/4.0.3@xahaud/stable',
|
||||
'zlib/1.3.1',
|
||||
'wasmedge/0.11.2',
|
||||
]
|
||||
|
||||
default_options = {
|
||||
@@ -51,43 +48,44 @@ class Xrpl(ConanFile):
|
||||
'static': True,
|
||||
'tests': True,
|
||||
'unity': False,
|
||||
'with_wasmedge': True,
|
||||
'tool_requires_b2': False,
|
||||
|
||||
'blake3:simd': False, # Disable SIMD for testing
|
||||
'cassandra-cpp-driver:shared': False,
|
||||
'date:header_only': True,
|
||||
'grpc:shared': False,
|
||||
'grpc:secure': True,
|
||||
'libarchive:shared': False,
|
||||
'libarchive:with_acl': False,
|
||||
'libarchive:with_bzip2': False,
|
||||
'libarchive:with_cng': False,
|
||||
'libarchive:with_expat': False,
|
||||
'libarchive:with_iconv': False,
|
||||
'libarchive:with_libxml2': False,
|
||||
'libarchive:with_lz4': True,
|
||||
'libarchive:with_lzma': False,
|
||||
'libarchive:with_lzo': False,
|
||||
'libarchive:with_nettle': False,
|
||||
'libarchive:with_openssl': False,
|
||||
'libarchive:with_pcreposix': False,
|
||||
'libarchive:with_xattr': False,
|
||||
'libarchive:with_zlib': False,
|
||||
'libpq:shared': False,
|
||||
'lz4:shared': False,
|
||||
'openssl:shared': False,
|
||||
'protobuf:shared': False,
|
||||
'protobuf:with_zlib': True,
|
||||
'rocksdb:enable_sse': False,
|
||||
'rocksdb:lite': False,
|
||||
'rocksdb:shared': False,
|
||||
'rocksdb:use_rtti': True,
|
||||
'rocksdb:with_jemalloc': False,
|
||||
'rocksdb:with_lz4': True,
|
||||
'rocksdb:with_snappy': True,
|
||||
'snappy:shared': False,
|
||||
'soci:shared': False,
|
||||
'soci:with_sqlite3': True,
|
||||
'soci:with_boost': True,
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
'date/*:header_only': False,
|
||||
'grpc/*:shared': False,
|
||||
'grpc/*:secure': True,
|
||||
'libarchive/*:shared': False,
|
||||
'libarchive/*:with_acl': False,
|
||||
'libarchive/*:with_bzip2': False,
|
||||
'libarchive/*:with_cng': False,
|
||||
'libarchive/*:with_expat': False,
|
||||
'libarchive/*:with_iconv': False,
|
||||
'libarchive/*:with_libxml2': False,
|
||||
'libarchive/*:with_lz4': True,
|
||||
'libarchive/*:with_lzma': False,
|
||||
'libarchive/*:with_lzo': False,
|
||||
'libarchive/*:with_nettle': False,
|
||||
'libarchive/*:with_openssl': False,
|
||||
'libarchive/*:with_pcreposix': False,
|
||||
'libarchive/*:with_xattr': False,
|
||||
'libarchive/*:with_zlib': False,
|
||||
'libpq/*:shared': False,
|
||||
'lz4/*:shared': False,
|
||||
'openssl/*:shared': False,
|
||||
'protobuf/*:shared': False,
|
||||
'protobuf/*:with_zlib': True,
|
||||
'rocksdb/*:enable_sse': False,
|
||||
'rocksdb/*:lite': False,
|
||||
'rocksdb/*:shared': False,
|
||||
'rocksdb/*:use_rtti': True,
|
||||
'rocksdb/*:with_jemalloc': False,
|
||||
'rocksdb/*:with_lz4': True,
|
||||
'rocksdb/*:with_snappy': True,
|
||||
'snappy/*:shared': False,
|
||||
'soci/*:shared': False,
|
||||
'soci/*:with_sqlite3': True,
|
||||
'soci/*:with_boost': True,
|
||||
}
|
||||
|
||||
def set_version(self):
|
||||
@@ -98,11 +96,28 @@ class Xrpl(ConanFile):
|
||||
match = next(m for m in matches if m)
|
||||
self.version = match.group(1)
|
||||
|
||||
def build_requirements(self):
|
||||
# These provide build tools (protoc, grpc plugins) that run during build
|
||||
self.tool_requires('protobuf/3.21.12')
|
||||
self.tool_requires('grpc/1.50.1')
|
||||
# Explicitly require b2 (e.g. for building from source for glibc compatibility)
|
||||
if self.options.tool_requires_b2:
|
||||
self.tool_requires('b2/5.3.2')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
self.options['boost'].visibility = 'global'
|
||||
self.options['boost/*'].visibility = 'global'
|
||||
|
||||
def requirements(self):
|
||||
# Force sqlite3 version to avoid conflicts with soci
|
||||
self.requires('sqlite3/3.42.0', override=True)
|
||||
# Force our custom snappy build for all dependencies
|
||||
self.requires('snappy/1.1.10@xahaud/stable', override=True)
|
||||
# Force boost version for all dependencies to avoid conflicts
|
||||
self.requires('boost/1.86.0', override=True)
|
||||
|
||||
if self.options.with_wasmedge:
|
||||
self.requires('wasmedge/0.11.2@xahaud/stable')
|
||||
if self.options.jemalloc:
|
||||
self.requires('jemalloc/5.2.1')
|
||||
if self.options.reporting:
|
||||
|
||||
@@ -8,4 +8,4 @@ if [[ "$GITHUB_REPOSITORY" == "" ]]; then
|
||||
fi
|
||||
|
||||
echo "Mounting $(pwd)/io in ubuntu and running unit tests"
|
||||
docker run --rm -i -v $(pwd):/io -e BUILD_CORES=$BUILD_CORES ubuntu sh -c '/io/release-build/xahaud --unittest-jobs $BUILD_CORES -u'
|
||||
docker run --rm -i -v $(pwd):/io --platform=linux/amd64 -e BUILD_CORES=$BUILD_CORES ubuntu sh -c '/io/release-build/xahaud --unittest-jobs $BUILD_CORES -u'
|
||||
|
||||
10
external/blake3/conandata.yml
vendored
10
external/blake3/conandata.yml
vendored
@@ -1,10 +0,0 @@
|
||||
sources:
|
||||
"1.5.0":
|
||||
url: "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.5.0.tar.gz"
|
||||
sha256: "f506140bc3af41d3432a4ce18b3b83b08eaa240e94ef161eb72b2e57cdc94c69"
|
||||
"1.4.1":
|
||||
url: "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.4.1.tar.gz"
|
||||
sha256: "33020ac83a8169b2e847cc6fb1dd38806ffab6efe79fe6c320e322154a3bea2c"
|
||||
"1.4.0":
|
||||
url: "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.4.0.tar.gz"
|
||||
sha256: "658e1c75e2d9bbed9f426385f02d2a188dc19978a39e067ba93e837861e5fe58"
|
||||
115
external/blake3/conanfile.py
vendored
115
external/blake3/conanfile.py
vendored
@@ -1,115 +0,0 @@
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
from conan.tools.files import copy, get
|
||||
from conan.tools.scm import Version
|
||||
import os
|
||||
|
||||
required_conan_version = ">=1.54.0"
|
||||
|
||||
|
||||
class Blake3Conan(ConanFile):
|
||||
name = "blake3"
|
||||
version = "1.5.0"
|
||||
description = "BLAKE3 cryptographic hash function"
|
||||
topics = ("blake3", "hash", "cryptography")
|
||||
url = "https://github.com/BLAKE3-team/BLAKE3"
|
||||
homepage = "https://github.com/BLAKE3-team/BLAKE3"
|
||||
license = "CC0-1.0 OR Apache-2.0"
|
||||
|
||||
package_type = "library"
|
||||
settings = "os", "arch", "compiler", "build_type"
|
||||
options = {
|
||||
"shared": [True, False],
|
||||
"fPIC": [True, False],
|
||||
"simd": [True, False],
|
||||
}
|
||||
default_options = {
|
||||
"shared": False,
|
||||
"fPIC": True,
|
||||
"simd": False, # Default to NO SIMD for testing
|
||||
}
|
||||
|
||||
def config_options(self):
|
||||
if self.settings.os == 'Windows':
|
||||
del self.options.fPIC
|
||||
|
||||
def configure(self):
|
||||
if self.options.shared:
|
||||
self.options.rm_safe("fPIC")
|
||||
# BLAKE3 is C code
|
||||
self.settings.rm_safe("compiler.cppstd")
|
||||
self.settings.rm_safe("compiler.libcxx")
|
||||
|
||||
def layout(self):
|
||||
cmake_layout(self, src_folder="src")
|
||||
|
||||
def source(self):
|
||||
get(self, **self.conan_data["sources"][self.version], strip_root=True)
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
# BLAKE3's CMake options
|
||||
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
|
||||
if not self.options.simd:
|
||||
# For v1.5.0, we'll need to manually patch the CMakeLists.txt
|
||||
# These flags don't work with the old CMake
|
||||
tc.preprocessor_definitions["BLAKE3_USE_NEON"] = "0"
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
# Patch CMakeLists.txt if SIMD is disabled
|
||||
if not self.options.simd:
|
||||
cmake_file = os.path.join(self.source_folder, "c", "CMakeLists.txt")
|
||||
# Read the file
|
||||
with open(cmake_file, 'r') as f:
|
||||
content = f.read()
|
||||
# Replace the ARM detection line to never match
|
||||
content = content.replace(
|
||||
'elseif(CMAKE_SYSTEM_PROCESSOR IN_LIST BLAKE3_ARMv8_NAMES',
|
||||
'elseif(FALSE # Disabled by conan simd=False'
|
||||
)
|
||||
# Write it back
|
||||
with open(cmake_file, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
cmake = CMake(self)
|
||||
# BLAKE3's C implementation has its CMakeLists.txt in the c/ subdirectory
|
||||
cmake.configure(build_script_folder=os.path.join(self.source_folder, "c"))
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
# Copy license files
|
||||
copy(self, "LICENSE*", src=self.source_folder,
|
||||
dst=os.path.join(self.package_folder, "licenses"))
|
||||
# Copy header
|
||||
copy(self, "blake3.h",
|
||||
src=os.path.join(self.source_folder, "c"),
|
||||
dst=os.path.join(self.package_folder, "include"))
|
||||
# Copy library
|
||||
copy(self, "*.a", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.lib", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.dylib", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.so*", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "lib"), keep_path=False)
|
||||
copy(self, "*.dll", src=self.build_folder,
|
||||
dst=os.path.join(self.package_folder, "bin"), keep_path=False)
|
||||
|
||||
def package_info(self):
|
||||
self.cpp_info.set_property("cmake_file_name", "BLAKE3")
|
||||
self.cpp_info.set_property("cmake_target_name", "BLAKE3::blake3")
|
||||
|
||||
# IMPORTANT: Explicitly set include directories to fix Conan CMakeDeps generation
|
||||
self.cpp_info.includedirs = ["include"]
|
||||
self.cpp_info.libs = ["blake3"]
|
||||
|
||||
# System libraries
|
||||
if self.settings.os in ["Linux", "FreeBSD"]:
|
||||
self.cpp_info.system_libs.append("m")
|
||||
self.cpp_info.system_libs.append("pthread")
|
||||
|
||||
# TODO: to remove in conan v2 once cmake_find_package* generators removed
|
||||
self.cpp_info.names["cmake_find_package"] = "BLAKE3"
|
||||
self.cpp_info.names["cmake_find_package_multi"] = "BLAKE3"
|
||||
11
external/snappy/conanfile.py
vendored
11
external/snappy/conanfile.py
vendored
@@ -77,9 +77,14 @@ class SnappyConan(ConanFile):
|
||||
self.cpp_info.set_property("cmake_target_name", "Snappy::snappy")
|
||||
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
|
||||
self.cpp_info.components["snappylib"].libs = ["snappy"]
|
||||
if not self.options.shared:
|
||||
if self.settings.os in ["Linux", "FreeBSD"]:
|
||||
self.cpp_info.components["snappylib"].system_libs.append("m")
|
||||
# The following block is commented out as a workaround for a bug in the
|
||||
# Conan 1.x CMakeDeps generator. Including system_libs ("m") here
|
||||
# incorrectly triggers a heuristic that adds a dynamic link to `stdc++`
|
||||
# (-lstdc++), preventing a fully static build.
|
||||
# This behavior is expected to be corrected in Conan 2.
|
||||
# if not self.options.shared:
|
||||
# if self.settings.os in ["Linux", "FreeBSD"]:
|
||||
# self.cpp_info.components["snappylib"].system_libs.append("m")
|
||||
|
||||
# TODO: to remove in conan v2 once cmake_find_package* generators removed
|
||||
self.cpp_info.names["cmake_find_package"] = "Snappy"
|
||||
|
||||
2
external/soci/conanfile.py
vendored
2
external/soci/conanfile.py
vendored
@@ -154,7 +154,7 @@ class SociConan(ConanFile):
|
||||
self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix))
|
||||
self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)]
|
||||
if self.options.with_boost:
|
||||
self.cpp_info.components["soci_core"].requires.append("boost::boost")
|
||||
self.cpp_info.components["soci_core"].requires.append("boost::headers")
|
||||
|
||||
# soci_empty
|
||||
if self.options.empty:
|
||||
|
||||
11
external/wasmedge/conanfile.py
vendored
11
external/wasmedge/conanfile.py
vendored
@@ -38,8 +38,15 @@ class WasmedgeConan(ConanFile):
|
||||
raise ConanInvalidConfiguration("Binaries for this combination of version/os/arch/compiler are not available")
|
||||
|
||||
def package_id(self):
|
||||
del self.info.settings.compiler.version
|
||||
self.info.settings.compiler = self._compiler_alias
|
||||
# Make binary compatible across compiler versions (since we're downloading prebuilt)
|
||||
self.info.settings.rm_safe("compiler.version")
|
||||
# Group compilers by their binary compatibility
|
||||
# Note: We must use self.info.settings here, not self.settings (forbidden in Conan 2)
|
||||
compiler_name = str(self.info.settings.compiler)
|
||||
if compiler_name in ["Visual Studio", "msvc"]:
|
||||
self.info.settings.compiler = "Visual Studio"
|
||||
else:
|
||||
self.info.settings.compiler = "gcc"
|
||||
|
||||
def build(self):
|
||||
# This is packaging binaries so the download needs to be in build
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_keylet_without_hash_options(filepath: Path) -> int:
|
||||
"""Fix keylet calls without hash_options in test files."""
|
||||
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
print(f"Error reading {filepath}: {e}")
|
||||
return 0
|
||||
|
||||
original_content = content
|
||||
replacements = 0
|
||||
|
||||
# Pattern to match keylet calls without hash_options
|
||||
# E.g., keylet::ownerDir(acct.id()) or keylet::account(alice)
|
||||
keylet_funcs = {
|
||||
'ownerDir': 'KEYLET_OWNER_DIR',
|
||||
'account': 'KEYLET_ACCOUNT',
|
||||
'signers': 'KEYLET_SIGNERS',
|
||||
'offer': 'KEYLET_OFFER',
|
||||
'line': 'KEYLET_TRUSTLINE',
|
||||
'check': 'KEYLET_CHECK',
|
||||
'escrow': 'KEYLET_ESCROW',
|
||||
'payChan': 'KEYLET_PAYCHAN',
|
||||
'depositPreauth': 'KEYLET_DEPOSIT_PREAUTH',
|
||||
'ticket': 'KEYLET_TICKET',
|
||||
'nftoffer': 'KEYLET_NFT_OFFER',
|
||||
'fees': 'KEYLET_FEES',
|
||||
'amendments': 'KEYLET_AMENDMENTS',
|
||||
'negativeUNL': 'KEYLET_NEGATIVE_UNL',
|
||||
'skip': 'KEYLET_SKIP_LIST',
|
||||
'hook': 'KEYLET_HOOK',
|
||||
'hookDefinition': 'KEYLET_HOOK_DEFINITION',
|
||||
'hookState': 'KEYLET_HOOK_STATE',
|
||||
'hookStateDir': 'KEYLET_HOOK_STATE_DIR',
|
||||
'emittedDir': 'KEYLET_EMITTED_DIR',
|
||||
'emittedTxn': 'KEYLET_EMITTED_TXN',
|
||||
'import_vlseq': 'KEYLET_IMPORT_VLSEQ',
|
||||
'unchecked': 'KEYLET_UNCHECKED',
|
||||
'uritoken': 'KEYLET_URI_TOKEN',
|
||||
'nftpage': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_min': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_max': 'KEYLET_NFT_PAGE',
|
||||
'nft_buys': 'KEYLET_NFT_BUYS',
|
||||
'nft_sells': 'KEYLET_NFT_SELLS',
|
||||
'child': 'KEYLET_CHILD',
|
||||
'page': 'KEYLET_DIR_PAGE',
|
||||
'UNLReport': 'KEYLET_UNL_REPORT',
|
||||
'book': 'KEYLET_BOOK'
|
||||
}
|
||||
|
||||
for func, classifier in keylet_funcs.items():
|
||||
# Pattern to match keylet::<func>(...) where the args don't start with hash_options
|
||||
pattern = re.compile(
|
||||
rf'\bkeylet::{re.escape(func)}\s*\(\s*(?!hash_options)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
matches = list(pattern.finditer(content))
|
||||
|
||||
# Process matches in reverse order to maintain positions
|
||||
for match in reversed(matches):
|
||||
start = match.start()
|
||||
# Find the matching closing parenthesis
|
||||
paren_count = 1
|
||||
pos = match.end()
|
||||
while pos < len(content) and paren_count > 0:
|
||||
if content[pos] == '(':
|
||||
paren_count += 1
|
||||
elif content[pos] == ')':
|
||||
paren_count -= 1
|
||||
pos += 1
|
||||
|
||||
if paren_count == 0:
|
||||
# Extract the full function call
|
||||
full_call = content[start:pos]
|
||||
args_start = match.end()
|
||||
args_end = pos - 1
|
||||
args = content[args_start:args_end]
|
||||
|
||||
# Determine ledger sequence to use
|
||||
ledger_seq = None
|
||||
if 'view' in content[max(0, start-500):start]:
|
||||
if 'view.seq()' in content[max(0, start-500):start]:
|
||||
ledger_seq = '(view.seq())'
|
||||
else:
|
||||
ledger_seq = '0'
|
||||
elif 'env' in content[max(0, start-500):start]:
|
||||
if 'env.current()' in content[max(0, start-500):start]:
|
||||
ledger_seq = '(env.current()->seq())'
|
||||
else:
|
||||
ledger_seq = '0'
|
||||
else:
|
||||
ledger_seq = '0'
|
||||
|
||||
# Build the new call
|
||||
new_call = f'keylet::{func}(hash_options{{{ledger_seq}, {classifier}}}, {args})'
|
||||
|
||||
# Replace in content
|
||||
content = content[:start] + new_call + content[pos:]
|
||||
replacements += 1
|
||||
|
||||
if replacements > 0 and content != original_content:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
return replacements
|
||||
|
||||
return 0
|
||||
|
||||
def main():
|
||||
project_root = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc")
|
||||
|
||||
# Files to fix
|
||||
test_files = [
|
||||
"src/test/app/URIToken_test.cpp",
|
||||
"src/test/app/Touch_test.cpp",
|
||||
"src/test/app/SetRemarks_test.cpp",
|
||||
"src/test/app/Remit_test.cpp",
|
||||
"src/test/app/HookNegatives_test.cpp",
|
||||
"src/test/app/Hook_test.cpp",
|
||||
"src/test/app/NFTokenBurn_test.cpp",
|
||||
"src/test/app/NFToken_test.cpp",
|
||||
"src/test/app/TxMeta_test.cpp",
|
||||
"src/test/app/AccountTxPaging_test.cpp"
|
||||
]
|
||||
|
||||
total_replacements = 0
|
||||
|
||||
for rel_path in test_files:
|
||||
filepath = project_root / rel_path
|
||||
if filepath.exists():
|
||||
replacements = fix_keylet_without_hash_options(filepath)
|
||||
if replacements > 0:
|
||||
print(f"Fixed {rel_path}: {replacements} replacements")
|
||||
total_replacements += replacements
|
||||
|
||||
print(f"\nTotal replacements: {total_replacements}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,130 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_file(filepath: Path) -> int:
|
||||
"""Fix various hash_options issues in a single file."""
|
||||
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
print(f"Error reading {filepath}: {e}")
|
||||
return 0
|
||||
|
||||
original_content = content
|
||||
replacements = 0
|
||||
|
||||
# Fix duplicate keylet calls with hash_options
|
||||
# Pattern: keylet::X(keylet::X(hash_options{...}, ...))
|
||||
pattern1 = re.compile(
|
||||
r'keylet::(\w+)\s*\(\s*keylet::\1\s*\(\s*hash_options\s*\{[^}]+\}[^)]*\)\s*\)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def fix_duplicate(match):
|
||||
nonlocal replacements
|
||||
# Extract just the inner keylet call
|
||||
inner = match.group(0)
|
||||
# Find the position of the second keylet::
|
||||
second_keylet_pos = inner.find('keylet::', 8) # Skip first occurrence
|
||||
if second_keylet_pos != -1:
|
||||
# Extract everything after the second keylet::
|
||||
fixed = 'keylet::' + inner[second_keylet_pos + 8:]
|
||||
# Remove the extra closing paren at the end
|
||||
if fixed.endswith('))'):
|
||||
fixed = fixed[:-1]
|
||||
replacements += 1
|
||||
return fixed
|
||||
return match.group(0)
|
||||
|
||||
content = pattern1.sub(fix_duplicate, content)
|
||||
|
||||
# Fix keylet calls without hash_options (like keylet::ownerDir(acc.id()))
|
||||
# These need hash_options added
|
||||
keylet_funcs = ['ownerDir', 'account', 'signers', 'offer']
|
||||
for func in keylet_funcs:
|
||||
# Pattern to match keylet::func(args) where args doesn't start with hash_options
|
||||
pattern2 = re.compile(
|
||||
rf'keylet::{func}\s*\(\s*(?!hash_options)([^)]+)\)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def add_hash_options(match):
|
||||
nonlocal replacements
|
||||
args = match.group(1).strip()
|
||||
# Determine the classifier based on function name
|
||||
classifier_map = {
|
||||
'ownerDir': 'KEYLET_OWNER_DIR',
|
||||
'account': 'KEYLET_ACCOUNT',
|
||||
'signers': 'KEYLET_SIGNERS',
|
||||
'offer': 'KEYLET_OFFER'
|
||||
}
|
||||
classifier = classifier_map.get(func, 'LEDGER_INDEX_UNNEEDED')
|
||||
|
||||
# Check if we're in a context where we can get env.current()->seq()
|
||||
# Look back in the content to see if we're in a lambda or function with env
|
||||
pos = match.start()
|
||||
# Simple heuristic: if we see "env" within 500 chars before, use it
|
||||
context = content[max(0, pos-500):pos]
|
||||
if 'env.' in context or 'env)' in context or '&env' in context:
|
||||
replacements += 1
|
||||
return f'keylet::{func}(hash_options{{(env.current()->seq()), {classifier}}}, {args})'
|
||||
else:
|
||||
# Try view instead
|
||||
if 'view' in context or 'ReadView' in context:
|
||||
replacements += 1
|
||||
return f'keylet::{func}(hash_options{{0, {classifier}}}, {args})'
|
||||
return match.group(0)
|
||||
|
||||
content = pattern2.sub(add_hash_options, content)
|
||||
|
||||
# Fix missing closing parenthesis for keylet::account calls
|
||||
pattern3 = re.compile(
|
||||
r'(keylet::account\s*\(\s*hash_options\s*\{[^}]+\}\s*,\s*\w+(?:\.\w+\(\))?\s*)(\);)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def fix_paren(match):
|
||||
nonlocal replacements
|
||||
replacements += 1
|
||||
return match.group(1) + '));'
|
||||
|
||||
content = pattern3.sub(fix_paren, content)
|
||||
|
||||
if replacements > 0 and content != original_content:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
return replacements
|
||||
|
||||
return 0
|
||||
|
||||
def main():
|
||||
project_root = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc")
|
||||
|
||||
# Files to fix
|
||||
test_files = [
|
||||
"src/test/app/RCLValidations_test.cpp",
|
||||
"src/test/app/PayStrand_test.cpp",
|
||||
"src/test/app/PayChan_test.cpp",
|
||||
"src/test/app/ClaimReward_test.cpp",
|
||||
"src/test/app/Import_test.cpp",
|
||||
"src/test/app/LedgerReplay_test.cpp",
|
||||
"src/test/app/Offer_test.cpp"
|
||||
]
|
||||
|
||||
total_replacements = 0
|
||||
|
||||
for rel_path in test_files:
|
||||
filepath = project_root / rel_path
|
||||
if filepath.exists():
|
||||
replacements = fix_file(filepath)
|
||||
if replacements > 0:
|
||||
print(f"Fixed {rel_path}: {replacements} replacements")
|
||||
total_replacements += replacements
|
||||
|
||||
print(f"\nTotal replacements: {total_replacements}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_sethook_ledger_sequences():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/SetHook_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix keylet::hookState calls with hash_options{0, KEYLET_HOOK_STATE}
|
||||
# These are inside test functions where env is available
|
||||
content = re.sub(
|
||||
r'keylet::hookState\(hash_options\{0, KEYLET_HOOK_STATE\}',
|
||||
r'keylet::hookState(hash_options{(env.current()->seq()), KEYLET_HOOK_STATE}',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::hookStateDir calls with hash_options{0, KEYLET_HOOK_STATE_DIR}
|
||||
content = re.sub(
|
||||
r'keylet::hookStateDir\(hash_options\{0, KEYLET_HOOK_STATE_DIR\}',
|
||||
r'keylet::hookStateDir(hash_options{(env.current()->seq()), KEYLET_HOOK_STATE_DIR}',
|
||||
content
|
||||
)
|
||||
|
||||
# The sha512Half_s and sha512Half calls with LEDGER_INDEX_UNNEEDED are CORRECT
|
||||
# because they're hashing non-ledger data (WASM bytecode, nonces, etc.)
|
||||
# So we leave those alone.
|
||||
|
||||
# The HASH_WASM macro uses are also CORRECT with LEDGER_INDEX_UNNEEDED
|
||||
# because they're computing hashes of WASM bytecode at compile time,
|
||||
# not ledger objects.
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
print("Note: sha512Half* calls with LEDGER_INDEX_UNNEEDED are correct (hashing non-ledger data)")
|
||||
print("Note: HASH_WASM macro uses are correct (compile-time WASM bytecode hashing)")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_sethook_ledger_sequences()
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_sethook_test():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/SetHook_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix keylet::hook(Account(...).id()) patterns
|
||||
# These are looking up hooks on accounts in the ledger, so need env.current()->seq()
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::hook\(Account\(([^)]+)\)\.id\(\)\)\)',
|
||||
r'env.le(keylet::hook(hash_options{(env.current()->seq()), KEYLET_HOOK}, Account(\1).id()))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix other keylet::hook patterns with just an ID
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::hook\((\w+)\.id\(\)\)\)',
|
||||
r'env.le(keylet::hook(hash_options{(env.current()->seq()), KEYLET_HOOK}, \1.id()))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix patterns like keylet::hook(alice)
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::hook\((\w+)\)\)',
|
||||
r'env.le(keylet::hook(hash_options{(env.current()->seq()), KEYLET_HOOK}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_sethook_test()
|
||||
@@ -1,47 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_sethook_test():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/SetHook_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix ALL keylet::hookState patterns including those with ripple:: prefix
|
||||
# Pattern 1: ripple::keylet::hookState(id, key, ns)
|
||||
content = re.sub(
|
||||
r'ripple::keylet::hookState\(([^,]+),\s*([^,]+),\s*([^)]+)\)',
|
||||
r'ripple::keylet::hookState(hash_options{(env.current()->seq()), KEYLET_HOOK_STATE}, \1, \2, \3)',
|
||||
content
|
||||
)
|
||||
|
||||
# Pattern 2: keylet::hookState without ripple:: prefix (if not already fixed)
|
||||
content = re.sub(
|
||||
r'(?<!ripple::)keylet::hookState\(([^h][^,]+),\s*([^,]+),\s*([^)]+)\)',
|
||||
r'keylet::hookState(hash_options{(env.current()->seq()), KEYLET_HOOK_STATE}, \1, \2, \3)',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix ripple::keylet::hookStateDir patterns
|
||||
content = re.sub(
|
||||
r'ripple::keylet::hookStateDir\(([^,]+),\s*([^)]+)\)',
|
||||
r'ripple::keylet::hookStateDir(hash_options{(env.current()->seq()), KEYLET_HOOK_STATE_DIR}, \1, \2)',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix ripple::keylet::hook patterns
|
||||
content = re.sub(
|
||||
r'ripple::keylet::hook\(([^)]+)\)(?!\))',
|
||||
r'ripple::keylet::hook(hash_options{(env.current()->seq()), KEYLET_HOOK}, \1)',
|
||||
content
|
||||
)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_sethook_test()
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_sethook_test():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/SetHook_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix keylet::hookState - it takes 4 args now (hash_options + 3 original)
|
||||
# Match patterns like: keylet::hookState(Account("alice").id(), key, ns)
|
||||
content = re.sub(
|
||||
r'keylet::hookState\(Account\(([^)]+)\)\.id\(\),\s*([^,]+),\s*([^)]+)\)',
|
||||
r'keylet::hookState(hash_options{0, KEYLET_HOOK_STATE}, Account(\1).id(), \2, \3)',
|
||||
content
|
||||
)
|
||||
|
||||
# Match patterns with variables like: keylet::hookState(alice.id(), key, ns)
|
||||
content = re.sub(
|
||||
r'keylet::hookState\((\w+)\.id\(\),\s*([^,]+),\s*([^)]+)\)',
|
||||
r'keylet::hookState(hash_options{0, KEYLET_HOOK_STATE}, \1.id(), \2, \3)',
|
||||
content
|
||||
)
|
||||
|
||||
# Match patterns with just IDs: keylet::hookState(accid, key, ns)
|
||||
content = re.sub(
|
||||
r'keylet::hookState\((\w+),\s*([^,]+),\s*([^)]+)\)(?!\))',
|
||||
r'keylet::hookState(hash_options{0, KEYLET_HOOK_STATE}, \1, \2, \3)',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::hookStateDir patterns
|
||||
content = re.sub(
|
||||
r'keylet::hookStateDir\((\w+),\s*([^,]+),\s*([^)]+)\)',
|
||||
r'keylet::hookStateDir(hash_options{0, KEYLET_HOOK_STATE_DIR}, \1, \2, \3)',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix sha512Half_s calls without hash_options
|
||||
content = re.sub(
|
||||
r'sha512Half_s\(ripple::Slice\(',
|
||||
r'sha512Half_s(hash_options{0, LEDGER_INDEX_UNNEEDED}, ripple::Slice(',
|
||||
content
|
||||
)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_sethook_test()
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_sethook_test():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/SetHook_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix keylet::hookStateDir - it takes 3 args now (hash_options + 2 original)
|
||||
# Match patterns like: keylet::hookStateDir(Account("alice").id(), ns)
|
||||
content = re.sub(
|
||||
r'keylet::hookStateDir\(Account\(([^)]+)\)\.id\(\),\s*([^)]+)\)',
|
||||
r'keylet::hookStateDir(hash_options{0, KEYLET_HOOK_STATE_DIR}, Account(\1).id(), \2)',
|
||||
content
|
||||
)
|
||||
|
||||
# Match with variables
|
||||
content = re.sub(
|
||||
r'keylet::hookStateDir\((\w+)\.id\(\),\s*([^)]+)\)',
|
||||
r'keylet::hookStateDir(hash_options{0, KEYLET_HOOK_STATE_DIR}, \1.id(), \2)',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix multiline hookStateDir patterns
|
||||
content = re.sub(
|
||||
r'keylet::hookStateDir\(\s*\n\s*Account\(([^)]+)\)\.id\(\),\s*([^)]+)\)',
|
||||
r'keylet::hookStateDir(hash_options{0, KEYLET_HOOK_STATE_DIR},\n Account(\1).id(), \2)',
|
||||
content,
|
||||
flags=re.MULTILINE
|
||||
)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_sethook_test()
|
||||
@@ -1,120 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
def fix_test_files(root_dir: str):
|
||||
"""Fix hash_options in test files by adding appropriate classifiers."""
|
||||
|
||||
# Pattern to match hash_options with only ledger sequence
|
||||
# This will match things like hash_options{(env.current()->seq())}
|
||||
pattern = re.compile(
|
||||
r'(keylet::(\w+)\s*\([^)]*\s*)hash_options\s*\{\s*\(([^}]+)\)\s*\}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
# Mapping of keylet functions to their classifiers
|
||||
keylet_classifiers = {
|
||||
'account': 'KEYLET_ACCOUNT',
|
||||
'ownerDir': 'KEYLET_OWNER_DIR',
|
||||
'signers': 'KEYLET_SIGNERS',
|
||||
'offer': 'KEYLET_OFFER',
|
||||
'check': 'KEYLET_CHECK',
|
||||
'depositPreauth': 'KEYLET_DEPOSIT_PREAUTH',
|
||||
'escrow': 'KEYLET_ESCROW',
|
||||
'payChan': 'KEYLET_PAYCHAN',
|
||||
'line': 'KEYLET_TRUSTLINE',
|
||||
'ticket': 'KEYLET_TICKET',
|
||||
'hook': 'KEYLET_HOOK',
|
||||
'hookDefinition': 'KEYLET_HOOK_DEFINITION',
|
||||
'hookState': 'KEYLET_HOOK_STATE',
|
||||
'hookStateDir': 'KEYLET_HOOK_STATE_DIR',
|
||||
'child': 'KEYLET_CHILD',
|
||||
'page': 'KEYLET_DIR_PAGE',
|
||||
'nftpage_min': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_max': 'KEYLET_NFT_PAGE',
|
||||
'nftoffer': 'KEYLET_NFT_OFFER',
|
||||
'nft_buys': 'KEYLET_NFT_BUYS',
|
||||
'nft_sells': 'KEYLET_NFT_SELLS',
|
||||
'uritoken': 'KEYLET_URI_TOKEN',
|
||||
}
|
||||
|
||||
files_fixed = 0
|
||||
total_replacements = 0
|
||||
|
||||
# Find all test files
|
||||
test_dir = Path(root_dir) / "src" / "test"
|
||||
|
||||
for filepath in test_dir.rglob("*.cpp"):
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
original_content = f.read()
|
||||
|
||||
content = original_content
|
||||
replacements = 0
|
||||
|
||||
def replacer(match):
|
||||
nonlocal replacements
|
||||
prefix = match.group(1)
|
||||
keylet_func = match.group(2)
|
||||
ledger_expr = match.group(3)
|
||||
|
||||
# Get the classifier for this keylet function
|
||||
classifier = keylet_classifiers.get(keylet_func)
|
||||
if not classifier:
|
||||
print(f"WARNING: No classifier for keylet::{keylet_func} in {filepath}")
|
||||
# Default to a generic one
|
||||
classifier = 'KEYLET_UNCHECKED'
|
||||
|
||||
replacements += 1
|
||||
# Reconstruct with the classifier
|
||||
return f'{prefix}hash_options{{({ledger_expr}), {classifier}}}'
|
||||
|
||||
new_content = pattern.sub(replacer, content)
|
||||
|
||||
# Also fix standalone hash_options calls (not in keylet context)
|
||||
# These are likely in helper functions or direct usage
|
||||
standalone_pattern = re.compile(
|
||||
r'(?<!keylet::\w{1,50}\s{0,10}\([^)]*\s{0,10})hash_options\s*\{\s*\(([^}]+)\)\s*\}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def standalone_replacer(match):
|
||||
nonlocal replacements
|
||||
ledger_expr = match.group(1)
|
||||
replacements += 1
|
||||
# For standalone ones in tests, use a test context
|
||||
return f'hash_options{{({ledger_expr}), LEDGER_INDEX_UNNEEDED}}'
|
||||
|
||||
new_content = standalone_pattern.sub(standalone_replacer, new_content)
|
||||
|
||||
if replacements > 0:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(new_content)
|
||||
|
||||
rel_path = filepath.relative_to(root_dir)
|
||||
print(f"Fixed {rel_path}: {replacements} replacements")
|
||||
files_fixed += 1
|
||||
total_replacements += replacements
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing {filepath}: {e}")
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Fixed {files_fixed} test files")
|
||||
print(f"Total replacements: {total_replacements}")
|
||||
|
||||
return files_fixed, total_replacements
|
||||
|
||||
if __name__ == "__main__":
|
||||
project_root = "/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc"
|
||||
|
||||
print("Fixing hash_options in test files...")
|
||||
files_fixed, total_replacements = fix_test_files(project_root)
|
||||
|
||||
if files_fixed > 0:
|
||||
print("\nDone! Now rebuild to see if there are any remaining issues.")
|
||||
else:
|
||||
print("\nNo test files needed fixing.")
|
||||
@@ -1,130 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
def fix_test_files(root_dir: str):
|
||||
"""Fix hash_options in test files by adding appropriate classifiers."""
|
||||
|
||||
# Mapping of keylet functions to their classifiers
|
||||
keylet_classifiers = {
|
||||
'account': 'KEYLET_ACCOUNT',
|
||||
'ownerDir': 'KEYLET_OWNER_DIR',
|
||||
'signers': 'KEYLET_SIGNERS',
|
||||
'offer': 'KEYLET_OFFER',
|
||||
'check': 'KEYLET_CHECK',
|
||||
'depositPreauth': 'KEYLET_DEPOSIT_PREAUTH',
|
||||
'escrow': 'KEYLET_ESCROW',
|
||||
'payChan': 'KEYLET_PAYCHAN',
|
||||
'line': 'KEYLET_TRUSTLINE',
|
||||
'ticket': 'KEYLET_TICKET',
|
||||
'hook': 'KEYLET_HOOK',
|
||||
'hookDefinition': 'KEYLET_HOOK_DEFINITION',
|
||||
'hookState': 'KEYLET_HOOK_STATE',
|
||||
'hookStateDir': 'KEYLET_HOOK_STATE_DIR',
|
||||
'child': 'KEYLET_CHILD',
|
||||
'page': 'KEYLET_DIR_PAGE',
|
||||
'nftpage_min': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_max': 'KEYLET_NFT_PAGE',
|
||||
'nftoffer': 'KEYLET_NFT_OFFER',
|
||||
'nft_buys': 'KEYLET_NFT_BUYS',
|
||||
'nft_sells': 'KEYLET_NFT_SELLS',
|
||||
'uritoken': 'KEYLET_URI_TOKEN',
|
||||
'fees': 'KEYLET_FEES',
|
||||
'amendments': 'KEYLET_AMENDMENTS',
|
||||
'negativeUNL': 'KEYLET_NEGATIVE_UNL',
|
||||
'skip': 'KEYLET_SKIP_LIST',
|
||||
'unchecked': 'KEYLET_UNCHECKED',
|
||||
'import_vlseq': 'KEYLET_IMPORT_VLSEQ',
|
||||
'UNLReport': 'KEYLET_UNL_REPORT',
|
||||
'emittedDir': 'KEYLET_EMITTED_DIR',
|
||||
'emittedTxn': 'KEYLET_EMITTED_TXN',
|
||||
'book': 'KEYLET_BOOK',
|
||||
}
|
||||
|
||||
files_fixed = 0
|
||||
total_replacements = 0
|
||||
|
||||
# Find all test files
|
||||
test_dir = Path(root_dir) / "src" / "test"
|
||||
|
||||
for filepath in test_dir.rglob("*.cpp"):
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
original_content = f.read()
|
||||
|
||||
content = original_content
|
||||
replacements = 0
|
||||
|
||||
# Process line by line for better control
|
||||
lines = content.split('\n')
|
||||
new_lines = []
|
||||
|
||||
for line in lines:
|
||||
modified = False
|
||||
# Look for keylet:: calls with hash_options that only have ledger seq
|
||||
for func_name, classifier in keylet_classifiers.items():
|
||||
# Pattern for keylet::func(...hash_options{(ledger_seq)}...)
|
||||
pattern = f'keylet::{func_name}\\s*\\([^)]*hash_options\\s*\\{{\\s*\\(([^}}]+)\\)\\s*\\}}'
|
||||
|
||||
matches = list(re.finditer(pattern, line))
|
||||
if matches:
|
||||
# Process from end to start to maintain positions
|
||||
for match in reversed(matches):
|
||||
ledger_expr = match.group(1)
|
||||
# Check if it already has a classifier (contains comma)
|
||||
if ',' not in ledger_expr:
|
||||
# Replace with classifier added
|
||||
new_text = f'keylet::{func_name}(' + line[match.start():match.end()].replace(
|
||||
f'hash_options{{({ledger_expr})}}',
|
||||
f'hash_options{{({ledger_expr}), {classifier}}}'
|
||||
)
|
||||
line = line[:match.start()] + new_text + line[match.end():]
|
||||
replacements += 1
|
||||
modified = True
|
||||
|
||||
# Also look for standalone hash_options (not in keylet context)
|
||||
if not modified and 'hash_options{(' in line and '),' not in line:
|
||||
# Simple pattern for standalone hash_options{(expr)}
|
||||
standalone_pattern = r'(?<!keylet::\w{1,30}\([^)]*\s{0,5})hash_options\s*\{\s*\(([^}]+)\)\s*\}'
|
||||
matches = list(re.finditer(standalone_pattern, line))
|
||||
for match in reversed(matches):
|
||||
ledger_expr = match.group(1)
|
||||
if ',' not in ledger_expr:
|
||||
# For standalone ones in tests, use LEDGER_INDEX_UNNEEDED
|
||||
line = line[:match.start()] + f'hash_options{{({ledger_expr}), LEDGER_INDEX_UNNEEDED}}' + line[match.end():]
|
||||
replacements += 1
|
||||
|
||||
new_lines.append(line)
|
||||
|
||||
if replacements > 0:
|
||||
new_content = '\n'.join(new_lines)
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(new_content)
|
||||
|
||||
rel_path = filepath.relative_to(root_dir)
|
||||
print(f"Fixed {rel_path}: {replacements} replacements")
|
||||
files_fixed += 1
|
||||
total_replacements += replacements
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing {filepath}: {e}")
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Fixed {files_fixed} test files")
|
||||
print(f"Total replacements: {total_replacements}")
|
||||
|
||||
return files_fixed, total_replacements
|
||||
|
||||
if __name__ == "__main__":
|
||||
project_root = "/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc"
|
||||
|
||||
print("Fixing hash_options in test files...")
|
||||
files_fixed, total_replacements = fix_test_files(project_root)
|
||||
|
||||
if files_fixed > 0:
|
||||
print("\nDone! Now rebuild to see if there are any remaining issues.")
|
||||
else:
|
||||
print("\nNo test files needed fixing.")
|
||||
@@ -1,133 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Mapping of keylet functions to their specific HashContext classifiers
|
||||
KEYLET_CLASSIFIERS = {
|
||||
'account': 'KEYLET_ACCOUNT',
|
||||
'amendments': 'KEYLET_AMENDMENTS',
|
||||
'book': 'KEYLET_BOOK',
|
||||
'check': 'KEYLET_CHECK',
|
||||
'child': 'KEYLET_CHILD',
|
||||
'depositPreauth': 'KEYLET_DEPOSIT_PREAUTH',
|
||||
'emittedDir': 'KEYLET_EMITTED_DIR',
|
||||
'emittedTxn': 'KEYLET_EMITTED_TXN',
|
||||
'escrow': 'KEYLET_ESCROW',
|
||||
'fees': 'KEYLET_FEES',
|
||||
'hook': 'KEYLET_HOOK',
|
||||
'hookDefinition': 'KEYLET_HOOK_DEFINITION',
|
||||
'hookState': 'KEYLET_HOOK_STATE',
|
||||
'hookStateDir': 'KEYLET_HOOK_STATE_DIR',
|
||||
'import_vlseq': 'KEYLET_IMPORT_VLSEQ',
|
||||
'line': 'KEYLET_TRUSTLINE',
|
||||
'negativeUNL': 'KEYLET_NEGATIVE_UNL',
|
||||
'nft_buys': 'KEYLET_NFT_BUYS',
|
||||
'nft_sells': 'KEYLET_NFT_SELLS',
|
||||
'nftoffer': 'KEYLET_NFT_OFFER',
|
||||
'nftpage': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_max': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_min': 'KEYLET_NFT_PAGE',
|
||||
'offer': 'KEYLET_OFFER',
|
||||
'ownerDir': 'KEYLET_OWNER_DIR',
|
||||
'page': 'KEYLET_DIR_PAGE',
|
||||
'payChan': 'KEYLET_PAYCHAN',
|
||||
'signers': 'KEYLET_SIGNERS',
|
||||
'skip': 'KEYLET_SKIP_LIST',
|
||||
'ticket': 'KEYLET_TICKET',
|
||||
'UNLReport': 'KEYLET_UNL_REPORT',
|
||||
'unchecked': 'KEYLET_UNCHECKED',
|
||||
'uritoken': 'KEYLET_URI_TOKEN',
|
||||
}
|
||||
|
||||
def fix_keylet_calls_in_file(filepath: Path) -> int:
|
||||
"""Fix hash_options in a single file by adding appropriate classifiers."""
|
||||
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
print(f"Error reading {filepath}: {e}")
|
||||
return 0
|
||||
|
||||
original_content = content
|
||||
replacements = 0
|
||||
|
||||
# Process each keylet function
|
||||
for func_name, classifier in KEYLET_CLASSIFIERS.items():
|
||||
# Pattern to match keylet::<func>(hash_options{<ledger_seq>}, ...)
|
||||
# where ledger_seq doesn't already contain a comma (no classifier yet)
|
||||
pattern = re.compile(
|
||||
rf'keylet::{re.escape(func_name)}\s*\(\s*hash_options\s*\{{\s*\(([^,}}]+)\)\s*\}}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def replacer(match):
|
||||
nonlocal replacements
|
||||
ledger_seq = match.group(1).strip()
|
||||
replacements += 1
|
||||
# Add the classifier
|
||||
return f'keylet::{func_name}(hash_options{{({ledger_seq}), {classifier}}}'
|
||||
|
||||
content = pattern.sub(replacer, content)
|
||||
|
||||
# Also fix standalone hash_options that aren't in keylet calls
|
||||
# These might be in test helper functions or other places
|
||||
standalone_pattern = re.compile(
|
||||
r'(?<!keylet::\w+\s*\(\s*)hash_options\s*\{\s*\(([^,}]+)\)\s*\}(?!\s*,)',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def standalone_replacer(match):
|
||||
nonlocal replacements
|
||||
ledger_seq = match.group(1).strip()
|
||||
# Skip if it already has a classifier
|
||||
if ',' in ledger_seq:
|
||||
return match.group(0)
|
||||
replacements += 1
|
||||
# For standalone ones in tests, use LEDGER_INDEX_UNNEEDED
|
||||
return f'hash_options{{({ledger_seq}), LEDGER_INDEX_UNNEEDED}}'
|
||||
|
||||
# Apply standalone pattern only if we're in a test file
|
||||
if '/test/' in str(filepath):
|
||||
content = standalone_pattern.sub(standalone_replacer, content)
|
||||
|
||||
if replacements > 0 and content != original_content:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
return replacements
|
||||
|
||||
return 0
|
||||
|
||||
def main():
|
||||
project_root = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc")
|
||||
|
||||
# Find all test cpp files that might have hash_options
|
||||
test_files = list((project_root / "src" / "test").rglob("*.cpp"))
|
||||
|
||||
print(f"Found {len(test_files)} test files to check...")
|
||||
|
||||
total_files_fixed = 0
|
||||
total_replacements = 0
|
||||
|
||||
for filepath in test_files:
|
||||
replacements = fix_keylet_calls_in_file(filepath)
|
||||
if replacements > 0:
|
||||
rel_path = filepath.relative_to(project_root)
|
||||
print(f"Fixed {rel_path}: {replacements} replacements")
|
||||
total_files_fixed += 1
|
||||
total_replacements += replacements
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Fixed {total_files_fixed} test files")
|
||||
print(f"Total replacements: {total_replacements}")
|
||||
|
||||
if total_files_fixed > 0:
|
||||
print("\nNow rebuild to see if there are any remaining issues.")
|
||||
else:
|
||||
print("\nNo test files needed fixing.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,117 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Mapping of keylet functions to their specific HashContext classifiers
|
||||
KEYLET_CLASSIFIERS = {
|
||||
'account': 'KEYLET_ACCOUNT',
|
||||
'amendments': 'KEYLET_AMENDMENTS',
|
||||
'book': 'KEYLET_BOOK',
|
||||
'check': 'KEYLET_CHECK',
|
||||
'child': 'KEYLET_CHILD',
|
||||
'depositPreauth': 'KEYLET_DEPOSIT_PREAUTH',
|
||||
'emittedDir': 'KEYLET_EMITTED_DIR',
|
||||
'emittedTxn': 'KEYLET_EMITTED_TXN',
|
||||
'escrow': 'KEYLET_ESCROW',
|
||||
'fees': 'KEYLET_FEES',
|
||||
'hook': 'KEYLET_HOOK',
|
||||
'hookDefinition': 'KEYLET_HOOK_DEFINITION',
|
||||
'hookState': 'KEYLET_HOOK_STATE',
|
||||
'hookStateDir': 'KEYLET_HOOK_STATE_DIR',
|
||||
'import_vlseq': 'KEYLET_IMPORT_VLSEQ',
|
||||
'line': 'KEYLET_TRUSTLINE',
|
||||
'negativeUNL': 'KEYLET_NEGATIVE_UNL',
|
||||
'nft_buys': 'KEYLET_NFT_BUYS',
|
||||
'nft_sells': 'KEYLET_NFT_SELLS',
|
||||
'nftoffer': 'KEYLET_NFT_OFFER',
|
||||
'nftpage': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_max': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_min': 'KEYLET_NFT_PAGE',
|
||||
'offer': 'KEYLET_OFFER',
|
||||
'ownerDir': 'KEYLET_OWNER_DIR',
|
||||
'page': 'KEYLET_DIR_PAGE',
|
||||
'payChan': 'KEYLET_PAYCHAN',
|
||||
'signers': 'KEYLET_SIGNERS',
|
||||
'skip': 'KEYLET_SKIP_LIST',
|
||||
'ticket': 'KEYLET_TICKET',
|
||||
'UNLReport': 'KEYLET_UNL_REPORT',
|
||||
'unchecked': 'KEYLET_UNCHECKED',
|
||||
'uritoken': 'KEYLET_URI_TOKEN',
|
||||
}
|
||||
|
||||
def fix_keylet_calls_in_file(filepath: Path) -> int:
|
||||
"""Fix hash_options in a single file by adding appropriate classifiers."""
|
||||
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
print(f"Error reading {filepath}: {e}")
|
||||
return 0
|
||||
|
||||
original_content = content
|
||||
replacements = 0
|
||||
|
||||
# Process each keylet function
|
||||
for func_name, classifier in KEYLET_CLASSIFIERS.items():
|
||||
# Pattern to match keylet::<func>(hash_options{<ledger_seq>}, ...)
|
||||
# where ledger_seq doesn't already contain a comma (no classifier yet)
|
||||
pattern = re.compile(
|
||||
rf'keylet::{re.escape(func_name)}\s*\(\s*hash_options\s*\{{\s*\(([^}}]+)\)\s*\}}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
def replacer(match):
|
||||
nonlocal replacements
|
||||
ledger_seq = match.group(1).strip()
|
||||
# Check if it already has a classifier (contains comma after the ledger expression)
|
||||
# But be careful - the ledger expression itself might contain commas
|
||||
# Look for a comma followed by a KEYLET_ or other classifier
|
||||
if ', KEYLET_' in match.group(0) or ', LEDGER_' in match.group(0):
|
||||
return match.group(0)
|
||||
replacements += 1
|
||||
# Add the classifier
|
||||
return f'keylet::{func_name}(hash_options{{({ledger_seq}), {classifier}}}'
|
||||
|
||||
content = pattern.sub(replacer, content)
|
||||
|
||||
if replacements > 0 and content != original_content:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
return replacements
|
||||
|
||||
return 0
|
||||
|
||||
def main():
|
||||
project_root = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc")
|
||||
|
||||
# Find all test cpp files that might have hash_options
|
||||
test_files = list((project_root / "src" / "test").rglob("*.cpp"))
|
||||
|
||||
print(f"Found {len(test_files)} test files to check...")
|
||||
|
||||
total_files_fixed = 0
|
||||
total_replacements = 0
|
||||
|
||||
for filepath in test_files:
|
||||
replacements = fix_keylet_calls_in_file(filepath)
|
||||
if replacements > 0:
|
||||
rel_path = filepath.relative_to(project_root)
|
||||
print(f"Fixed {rel_path}: {replacements} replacements")
|
||||
total_files_fixed += 1
|
||||
total_replacements += replacements
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Fixed {total_files_fixed} test files")
|
||||
print(f"Total replacements: {total_replacements}")
|
||||
|
||||
if total_files_fixed > 0:
|
||||
print("\nNow rebuild to see if there are any remaining issues.")
|
||||
else:
|
||||
print("\nNo test files needed fixing.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,67 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_xahau_genesis():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/XahauGenesis_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix sha512Half_s calls - they now need hash_options
|
||||
content = re.sub(
|
||||
r'ripple::sha512Half_s\(ripple::Slice\(',
|
||||
r'ripple::sha512Half_s(hash_options{0, LEDGER_INDEX_UNNEEDED}, ripple::Slice(',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::amendments()
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::amendments\(\)\)',
|
||||
r'env.le(keylet::amendments(hash_options{(env.current()->seq()), KEYLET_AMENDMENTS}))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::account(id) calls
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::account\((\w+)\)\)',
|
||||
r'env.le(keylet::account(hash_options{(env.current()->seq()), KEYLET_ACCOUNT}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::hook(id) calls
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::hook\((\w+)\)\)',
|
||||
r'env.le(keylet::hook(hash_options{(env.current()->seq()), KEYLET_HOOK}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::hookDefinition calls
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::hookDefinition\(([^)]+)\)\)',
|
||||
r'env.le(keylet::hookDefinition(hash_options{(env.current()->seq()), KEYLET_HOOK_DEFINITION}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::fees() calls
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::fees\(\)\)',
|
||||
r'env.le(keylet::fees(hash_options{(env.current()->seq()), KEYLET_FEES}))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix standalone keylet::account assignments
|
||||
content = re.sub(
|
||||
r'(\s+auto\s+const\s+\w+Key\s*=\s*)keylet::account\((\w+)\);',
|
||||
r'\1keylet::account(hash_options{(env.current()->seq()), KEYLET_ACCOUNT}, \2);',
|
||||
content
|
||||
)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_xahau_genesis()
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_xahau_genesis():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/XahauGenesis_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix ALL sha512Half_s calls - they now need hash_options
|
||||
# Match multi-line patterns too
|
||||
content = re.sub(
|
||||
r'ripple::sha512Half_s\(',
|
||||
r'ripple::sha512Half_s(hash_options{0, LEDGER_INDEX_UNNEEDED}, ',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::hookDefinition without hash_options
|
||||
content = re.sub(
|
||||
r'keylet::hookDefinition\(([^,)]+)\)(?!\.)',
|
||||
r'keylet::hookDefinition(hash_options{0, KEYLET_HOOK_DEFINITION}, \1)',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix env.le(keylet::hookDefinition calls that might have been missed
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::hookDefinition\(hash_options\{0, KEYLET_HOOK_DEFINITION\}, ([^)]+)\)\)',
|
||||
r'env.le(keylet::hookDefinition(hash_options{(env.current()->seq()), KEYLET_HOOK_DEFINITION}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::account in view.read() calls
|
||||
content = re.sub(
|
||||
r'view->read\(keylet::account\((\w+)\)\)',
|
||||
r'view->read(keylet::account(hash_options{(view->seq()), KEYLET_ACCOUNT}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix env.current()->read(keylet::account calls
|
||||
content = re.sub(
|
||||
r'env\.current\(\)->read\(keylet::account\((\w+)\)\)',
|
||||
r'env.current()->read(keylet::account(hash_options{(env.current()->seq()), KEYLET_ACCOUNT}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_xahau_genesis()
|
||||
@@ -1,39 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
def fix_xahau_genesis():
|
||||
filepath = Path("/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc/src/test/app/XahauGenesis_test.cpp")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Fix keylet::signers without hash_options
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::signers\((\w+)\)\)',
|
||||
r'env.le(keylet::signers(hash_options{(env.current()->seq()), KEYLET_SIGNERS}, \1))',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::hookState - it takes 4 arguments now (hash_options + 3 original)
|
||||
content = re.sub(
|
||||
r'keylet::hookState\(\s*([^,]+),\s*([^,]+),\s*([^)]+)\)',
|
||||
r'keylet::hookState(hash_options{(env.current()->seq()), KEYLET_HOOK_STATE}, \1, \2, \3)',
|
||||
content
|
||||
)
|
||||
|
||||
# Fix keylet::negativeUNL
|
||||
content = re.sub(
|
||||
r'env\.le\(keylet::negativeUNL\(\)\)',
|
||||
r'env.le(keylet::negativeUNL(hash_options{(env.current()->seq()), KEYLET_NEGATIVE_UNL}))',
|
||||
content
|
||||
)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Fixed {filepath}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_xahau_genesis()
|
||||
@@ -1,263 +0,0 @@
|
||||
|
||||
# **A Performance and Security Analysis of Modern Hash Functions for Small-Input Payloads: Selecting a High-Speed Successor to SHA-512/256**
|
||||
|
||||
## **Executive Summary & Introduction**
|
||||
|
||||
### **The Challenge: The Need for Speed in Small-Payload Hashing**
|
||||
|
||||
In modern computing systems, the performance of cryptographic hash functions is a critical design consideration. While functions from the SHA-2 family, such as SHA-512, are widely deployed and trusted for their robust security, they can represent a significant performance bottleneck in applications that process a high volume of small data payloads.1 Use cases such as the generation of authentication tokens, per-request key derivation, and the indexing of data in secure databases frequently involve hashing inputs of 128 bytes or less. In these scenarios, the computational overhead of legacy algorithms can impede system throughput and increase latency.
|
||||
|
||||
This report addresses the specific challenge of selecting a high-performance, cryptographically secure replacement for sha512\_half, which is formally specified as SHA-512/256. The objective is to identify the fastest hash function that produces a 256-bit digest, thereby providing a 128-bit security level against collision attacks, while being optimized for inputs up to 128 bytes.3 The analysis is conducted within the context of modern 64-bit CPU architectures (x86-64 and ARMv8) and must account for the profound impact of hardware acceleration features, including both general-purpose Single Instruction, Multiple Data (SIMD) extensions and dedicated cryptographic instructions.
|
||||
|
||||
### **The Contenders: Introducing the Candidates**
|
||||
|
||||
To meet these requirements, this analysis will evaluate two leading-edge cryptographic hash functions against the established NIST standard, SHA-512/256, which serves as the performance and security baseline.
|
||||
|
||||
* **The Incumbent (Baseline): SHA-512/256.** As a member of the venerable SHA-2 family, SHA-512/256 is a FIPS-standardized algorithm built upon the Merkle-Damgård construction.3 It leverages 64-bit arithmetic, which historically offered a performance advantage over its 32-bit counterpart, SHA-256, on 64-bit processors.6 A key feature of this truncated variant is its inherent resistance to length-extension attacks, a known vulnerability of SHA-512 and SHA-256.8 Its performance, particularly in the context of hardware acceleration, will serve as the primary benchmark for comparison.
|
||||
* **The Modern Challengers: BLAKE3 and KangarooTwelve.** Two primary candidates have been identified based on their design goals, which explicitly target substantial performance improvements over legacy standards.
|
||||
* **BLAKE3:** Released in 2020, BLAKE3 represents the latest evolution of the BLAKE family of hash functions. It was engineered from the ground up for extreme speed and massive parallelism, utilizing a tree-based structure over a highly optimized compression function derived from ChaCha20.9 It is a single, unified algorithm designed to deliver exceptional performance across a wide array of platforms, from high-end servers to resource-constrained embedded systems.
|
||||
* **KangarooTwelve (K12):** KangarooTwelve is a high-speed eXtendable-Output Function (XOF) derived from the Keccak permutation, the same primitive that underpins the FIPS 202 SHA-3 standard.12 By significantly reducing the number of rounds from 24 (in SHA-3) to 12, K12 achieves a major speedup while leveraging the extensive security analysis of its parent algorithm.12
|
||||
|
||||
### **Scope and Methodology**
|
||||
|
||||
The scope of this report is strictly confined to cryptographic hash functions that provide a minimum 128-bit security level against all standard attack vectors, including collision, preimage, and second-preimage attacks. This focus necessitates the exclusion of non-cryptographic hash functions, despite their often-superior performance. Algorithms such as xxHash are explicitly designed for speed in non-adversarial contexts like hash tables and checksums, and they make no claims of cryptographic security.15
|
||||
|
||||
The case of MeowHash serves as a potent cautionary tale. Designed for extreme speed on systems with AES hardware acceleration, it was initially promoted for certain security-adjacent use cases.18 However, subsequent public cryptanalysis revealed catastrophic vulnerabilities, including a practical key-recovery attack and the ability to generate collisions with probabilities far exceeding theoretical security bounds.19 These findings underscore the profound risks of employing algorithms outside their rigorously defined security context and firmly justify their exclusion from this analysis.
|
||||
|
||||
The methodology employed herein is a multi-faceted evaluation that synthesizes empirical data with theoretical analysis. It comprises three core pillars:
|
||||
|
||||
1. **Algorithmic Design Analysis:** An examination of the underlying construction (e.g., Merkle-Damgård, Sponge, Tree) and core cryptographic primitives of each candidate to understand their intrinsic performance characteristics and security properties.
|
||||
2. **Security Posture Assessment:** A review of the stated security goals, the justification for design choices (such as reduced round counts), and the body of public cryptanalysis for each algorithm.
|
||||
3. **Quantitative Performance Synthesis:** A comprehensive analysis of performance data from reputable, independent sources, including the eBACS/SUPERCOP benchmarking project, peer-reviewed academic papers, and official documentation from the algorithm designers. Performance will be normalized and compared across relevant architectures and input sizes to provide a clear, data-driven conclusion.
|
||||
|
||||
## **Architectural Underpinnings of High-Speed Hashing**
|
||||
|
||||
The performance of a hash function is not merely a product of its internal mathematics but is fundamentally dictated by its high-level construction and its interaction with the underlying CPU architecture. The evolution from serial, iterative designs to highly parallelizable tree structures, combined with the proliferation of hardware acceleration, has created a complex performance landscape.
|
||||
|
||||
### **The Evolution of Hash Constructions: From Serial to Parallel**
|
||||
|
||||
The way a hash function processes an input message is its most defining architectural characteristic, directly influencing its speed, security, and potential for parallelism.
|
||||
|
||||
#### **Merkle-Damgård Construction (SHA-2)**
|
||||
|
||||
The Merkle-Damgård construction is the foundational design of the most widely deployed hash functions, including the entire SHA-2 family.5 Its operation is inherently sequential. The input message is padded and divided into fixed-size blocks. A compression function,
|
||||
|
||||
f, processes these blocks iteratively. The process begins with a fixed initialization vector (IV). For each message block Mi, the compression function computes a new chaining value Hi=f(Hi−1,Mi). The final hash output is derived from the last chaining value, Hn.22
|
||||
|
||||
This iterative dependency, where the input to one step is the output of the previous, makes the construction simple to implement but fundamentally limits parallelism for a single message. The processing of block Mi cannot begin until the processing of Mi−1 is complete. Furthermore, the standard Merkle-Damgård construction is susceptible to length-extension attacks, where an attacker who knows the hash of a message M can compute the hash of M∥P∥Mnew for some padding P without knowing M. This vulnerability is a primary reason why truncated variants like SHA-512/256, which do not expose the full internal state in their output, are recommended for many security protocols.8
|
||||
|
||||
#### **Sponge Construction (SHA-3 & KangarooTwelve)**
|
||||
|
||||
The Sponge construction, standardized with SHA-3, represents a significant departure from the Merkle-Damgård paradigm.13 It operates on a fixed-size internal state,
|
||||
|
||||
S, which is larger than the desired output size. The state is conceptually divided into two parts: an outer part, the *rate* (r), and an inner part, the *capacity* (c). The security of the function is determined by the size of the capacity.
|
||||
|
||||
The process involves two phases 22:
|
||||
|
||||
1. **Absorbing Phase:** The input message is padded and broken into blocks of size r. Each block is XORed into the rate portion of the state, after which a fixed, unkeyed permutation, f, is applied to the entire state. This process is repeated for all message blocks.
|
||||
2. **Squeezing Phase:** Once all input has been absorbed, the output hash is generated. The rate portion of the state is extracted as the first block of output. If more output is required, the permutation f is applied again, and the new rate is extracted as the next block. This can be repeated to produce an output of arbitrary length, a capability known as an eXtendable-Output Function (XOF).24
|
||||
|
||||
This design provides robust immunity to length-extension attacks because the capacity portion of the state is never directly modified by the message blocks nor directly outputted.25 This flexibility and security are central to KangarooTwelve's design.
|
||||
|
||||
#### **Tree-Based Hashing (BLAKE3 & K12's Parallel Mode)**
|
||||
|
||||
Tree-based hashing is the key innovation enabling the massive throughput of modern hash functions on large inputs.26 Instead of processing a message sequentially, the input is divided into a large number of independent chunks. These chunks form the leaves of a Merkle tree.27 Each chunk can be hashed in parallel, utilizing multiple CPU cores or the multiple "lanes" of a wide SIMD vector. The resulting intermediate hash values are then paired and hashed together to form parent nodes, continuing up the tree until a single root hash is produced.11
|
||||
|
||||
This structure allows for a degree of parallelism limited only by the number of chunks, making it exceptionally well-suited to modern hardware. However, this parallelism comes with a crucial caveat for the use case in question. The tree hashing modes of both BLAKE3 and KangarooTwelve are only activated for inputs that exceed a certain threshold. For BLAKE3, this threshold is 1024 bytes 11; for KangarooTwelve, it is 8192 bytes.24 As the specified maximum input size is 128 bytes, it falls far below these thresholds. Consequently, the widely advertised parallelism advantage of these modern hashes, which is their primary performance driver for large file hashing, is
|
||||
|
||||
**entirely irrelevant** to this specific analysis. The performance competition for small inputs is therefore not about parallelism but about the raw, single-threaded efficiency of the underlying compression function on a single block of data and the algorithm's initialization overhead. This reframes the entire performance evaluation, shifting the focus from architectural parallelism to the micro-architectural efficiency of the core cryptographic permutation.
|
||||
|
||||
### **The Hardware Acceleration Landscape: SIMD and Dedicated Instructions**
|
||||
|
||||
Modern CPUs are not simple scalar processors; they contain specialized hardware to accelerate common computational tasks, including cryptography. Understanding this landscape is critical, as the availability of acceleration for one algorithm but not another can create performance differences of an order of magnitude.
|
||||
|
||||
#### **General-Purpose SIMD (Single Instruction, Multiple Data)**
|
||||
|
||||
SIMD instruction sets allow a single instruction to operate on multiple data elements packed into a wide vector register. Key examples include SSE2, AVX2, and AVX-512 on x86-64 architectures, and NEON on ARMv8.9 Algorithms whose internal operations can be expressed as parallel, independent computations on smaller words (e.g., 32-bit or 64-bit) are ideal candidates for SIMD optimization. Both BLAKE3 and KangarooTwelve are designed to be highly friendly to SIMD implementation, which is the primary source of their speed in software on modern CPUs.32
|
||||
|
||||
#### **Dedicated Cryptographic Extensions**
|
||||
|
||||
In addition to general-purpose SIMD, many CPUs now include instructions specifically designed to accelerate standardized cryptographic algorithms.
|
||||
|
||||
* **Intel SHA Extensions:** Introduced by Intel and adopted by AMD, these instructions provide hardware acceleration for SHA-1 and SHA-256.34 Their availability on a wide range of modern processors, from Intel Ice Lake and Rocket Lake onwards, and all AMD Zen processors, gives SHA-256 a formidable performance advantage over algorithms that must be implemented in software or with general-purpose SIMD.8 Critically, widespread hardware support for SHA-512 is a very recent development, only appearing in Intel's 2024 Arrow Lake and Lunar Lake architectures, and is not present in the vast majority of currently deployed systems.34
|
||||
* **ARMv8 Cryptography Extensions:** The ARMv8 architecture includes optional cryptography extensions. The baseline extensions provide hardware support for AES, SHA-1, and SHA-256.35 Support for SHA-512 and SHA-3 (Keccak) was introduced as a further optional extension in the ARMv8.2-A revision.35 This means that on many ARMv8 devices, SHA-256 is hardware-accelerated while SHA-512 and Keccak-based functions are not. High-performance cores, such as Apple's M-series processors, do implement these advanced extensions, providing acceleration for all three families.12
|
||||
|
||||
This disparity in hardware support creates a significant performance inversion. Historically, SHA-512 was often faster than SHA-256 on 64-bit CPUs because it processes larger 1024-bit blocks using 64-bit native operations, resulting in more data processed per round compared to SHA-256's 512-bit blocks and 32-bit operations.6 However, the introduction of dedicated SHA-256 hardware instructions provides a performance boost that far outweighs the architectural advantage of SHA-512's 64-bit design. On a modern CPU with SHA-256 extensions but no SHA-512 extensions, SHA-256 will be substantially faster.8 This elevates the performance bar for any proposed replacement for SHA-512/256; to be considered a truly "fast" alternative, a candidate must not only outperform software-based SHA-512 but also be competitive with hardware-accelerated SHA-256.
|
||||
|
||||
## **Candidate Deep Dive: BLAKE3**
|
||||
|
||||
BLAKE3 is a state-of-the-art cryptographic hash function designed with the explicit goal of being the fastest secure hash function available, leveraging parallelism at every level of modern CPU architecture.
|
||||
|
||||
### **Algorithm and Design Rationale**
|
||||
|
||||
BLAKE3 is a single, unified algorithm, avoiding the multiple variants of its predecessors (e.g., BLAKE2b, BLAKE2s).37 Its design is an elegant synthesis of two proven components: the BLAKE2s compression function and the Bao verified tree hashing mode.9
|
||||
|
||||
* **Core Components:** The heart of BLAKE3 is its compression function, which is a modified version of the BLAKE2s compression function. BLAKE2s itself is based on the core permutation of the ChaCha stream cipher, an ARX (Add-Rotate-XOR) design known for its exceptional speed in software.11 BLAKE3 operates exclusively on 32-bit words, a deliberate choice that ensures high performance on both 64-bit and 32-bit architectures, from high-end x86 servers to low-power ARM cores.11
|
||||
* **Reduced Round Count:** One of the most significant optimizations in BLAKE3 is the reduction of the number of rounds in its compression function from 10 (in BLAKE2s) to 7\.11 This 30% reduction in the core computational workload provides a direct and substantial increase in speed for processing each block of data.
|
||||
* **Tree Structure:** As established, for the specified input range of up to 128 bytes, the tree structure is trivial. The input constitutes a single chunk, which is processed as the root node of the tree. This design ensures that for small inputs, there is no additional overhead from the tree mode; the performance is purely that of the highly optimized 7-round compression function.39
|
||||
|
||||
### **Security Posture**
|
||||
|
||||
Despite its focus on speed, BLAKE3 is designed to be a fully secure cryptographic hash function, suitable for a wide range of applications including digital signatures and message authentication codes.10
|
||||
|
||||
* **Security Claims:** BLAKE3 targets a 128-bit security level for all standard goals, including collision resistance, preimage resistance, and differentiability.28 This security level is equivalent to that of SHA-256 and makes a 256-bit output appropriate and secure.
|
||||
* **Justification for Reduced Rounds:** The decision to reduce the round count to 7 is grounded in the extensive public cryptanalysis of the BLAKE family. The original BLAKE was a finalist in the NIST SHA-3 competition, and both it and its successor BLAKE2 have been subjected to intense scrutiny.38 The best known attacks on BLAKE2 are only able to break a small fraction of its total rounds, indicating that the original 10 rounds of BLAKE2s already contained a very large security margin.33 The BLAKE3 designers concluded that 7 rounds still provides a comfortable margin of safety against known attack vectors while yielding a significant performance gain.
|
||||
* **Inherent Security Features:** The tree-based mode of operation, even in its trivial form for small inputs, provides inherent immunity to length-extension attacks, a notable advantage over non-truncated members of the SHA-2 family like SHA-256 and SHA-512.9
|
||||
|
||||
### **Performance Profile for Small Inputs**
|
||||
|
||||
BLAKE3 was explicitly designed to excel not only on large, parallelizable inputs but also on the small inputs relevant to this analysis.
|
||||
|
||||
* **Design Intent:** The official BLAKE3 paper and its authors state that performance for inputs of 64 bytes (the internal block size) or shorter is "best in class".28 The paper's benchmarks claim superior single-message throughput compared to SHA-256 for all input sizes.42
|
||||
* **Benchmark Evidence:** While direct, cross-platform benchmarks for very small inputs are scarce, available data points consistently support BLAKE3's speed claims. In optimized Rust benchmarks on an x86-64 machine, hashing a single block with BLAKE3 (using AVX-512) took 43 ns, compared to 77 ns for BLAKE2s (using SSE4.1).43 This demonstrates the raw speed of the 7-round compression function. This is significant because BLAKE2s itself is already benchmarked as being faster than SHA-512 for most input sizes on modern CPUs.43 Therefore, by extension, BLAKE3's improved performance over BLAKE2s solidifies its position as a top contender for small-input speed.
|
||||
|
||||
## **Candidate Deep Dive: KangarooTwelve**
|
||||
|
||||
KangarooTwelve (K12) is a high-speed cryptographic hash function from the designers of Keccak/SHA-3. It aims to provide a much faster alternative to the official FIPS 202 standards while retaining the same underlying security principles and benefiting from the same extensive cryptanalysis.
|
||||
|
||||
### **Algorithm and Design Rationale**
|
||||
|
||||
K12 is best understood as a performance-tuned variant of the SHAKE eXtendable-Output Functions.
|
||||
|
||||
* **Core Components:** The core primitive of K12 is the Keccak-p permutation.12 This is the same Keccak-p permutation used in all SHA-3 and SHAKE functions, but with the number of rounds reduced from 24 to 12\. For inputs up to its parallel threshold of 8192 bytes, K12's operation is a simple, flat sponge construction, functionally equivalent to a round-reduced version of SHAKE128.31 It uses a capacity of 256 bits, targeting a 128-bit security level.41
|
||||
* **Reduced Round Count:** The primary source of K12's significant performance advantage over the standardized SHA-3 functions is the halving of the round count from 24 to 12\.13 This directly cuts the computational work of the core permutation in half, leading to a nearly 2x speedup for short messages compared to SHAKE128, the fastest of the FIPS 202 instances.12
|
||||
|
||||
### **Security Posture**
|
||||
|
||||
The security case for KangarooTwelve is directly inherited from the decade of intense international scrutiny applied to its parent, Keccak.
|
||||
|
||||
* **Security Claims:** K12 targets a 128-bit security level against all standard attacks, including collision and preimage attacks, making it directly comparable to BLAKE3 and SHA-256.24
|
||||
* **Justification for Reduced Rounds:** The decision to use 12 rounds is based on a conservative evaluation of the existing cryptanalysis of the Keccak permutation. At the time of K12's design, the best known practical collision attacks were only applicable up to 6 rounds of the permutation.49 The most powerful theoretical distinguishers could only reach 9 rounds.49 By selecting 12 rounds, the designers established a 100% security margin over the best known collision attacks and a 33% margin over the best theoretical distinguishers, a level they argue is comfortable and well-justified.49
|
||||
|
||||
### **Performance Profile for Small Inputs**
|
||||
|
||||
KangarooTwelve was designed to be fast for both long and short messages, addressing a perceived performance gap in the official SHA-3 standard.
|
||||
|
||||
* **Design Intent:** The explicit goal for short messages was to be approximately twice as fast as SHAKE128.12 This makes it a compelling high-speed alternative for applications that require or prefer a Keccak-based construction.
|
||||
* **Future-Proofing through Hardware Acceleration:** A key strategic advantage of K12 is its direct lineage from SHA-3. As CPU manufacturers increasingly adopt optional hardware acceleration for SHA-3 (as seen in ARMv8.2-A and later), K12 stands to benefit directly from these instructions.36 This provides a potential future performance pathway that is unavailable to algorithms like BLAKE3, which rely on general-purpose SIMD. On an Apple M1 processor, which includes these SHA-3 extensions, K12 is reported to be 1.7 times faster than hardware-accelerated SHA-256 and 3 times faster than hardware-accelerated SHA-512 for long messages, demonstrating the power of this dedicated hardware support.12
|
||||
|
||||
## **Quantitative Performance Showdown**
|
||||
|
||||
To provide a definitive recommendation, it is essential to move beyond theoretical designs and analyze empirical performance data. This section synthesizes results from multiple high-quality sources to build a comparative performance profile of the candidates across relevant architectures and the specified input range.
|
||||
|
||||
### **Benchmarking Methodology and Caveats**
|
||||
|
||||
Obtaining a single, perfectly consistent benchmark that compares all three candidates across all target architectures and input sizes is challenging. Therefore, this analysis relies on a synthesis of data from the eBACS/SUPERCOP project, which provides standardized performance metrics in cycles per byte (cpb) 53, supplemented by figures from the algorithms' design papers and other academic sources. The primary metric for comparison will be
|
||||
|
||||
**single-message latency**, which measures the time required to hash one message from start to finish. This is the most relevant metric for general-purpose applications.
|
||||
|
||||
It is important to distinguish this from multi-message throughput, which measures the aggregate performance when hashing many independent messages in parallel on a single core. As demonstrated in a high-throughput use case for the Solana platform, an optimized, batched implementation of hardware-accelerated SHA-256 can outperform BLAKE3 on small messages due to the simpler scheduling of the Merkle-Damgård construction into SIMD lanes.42 While this is a valid consideration for highly specialized, high-volume workloads, single-message latency remains the more universal measure of a hash function's "speed."
|
||||
|
||||
### **Cross-Architectural Benchmark Synthesis**
|
||||
|
||||
The following table presents a synthesized view of the performance of BLAKE3, KangarooTwelve, and the baseline SHA-512/256 for the specified input sizes. Performance is measured in median cycles per byte (cpb); lower values are better. The data represents estimates derived from a combination of official benchmarks and independent analyses on representative modern CPUs.
|
||||
|
||||
**Comparative Performance of Hash Functions for Small Inputs (Median Cycles/Byte)**
|
||||
|
||||
| Input Size (Bytes) | Intel Cascade Lake-SP (AVX-512) | Apple M1 (ARMv8 \+ Crypto Ext.) |
|
||||
| :---- | :---- | :---- |
|
||||
| | **BLAKE3** | **KangarooTwelve** |
|
||||
| **16** | \~17 cpb | \~22 cpb |
|
||||
| **32** | \~10 cpb | \~14 cpb |
|
||||
| **64** | **\~5 cpb** | \~9 cpb |
|
||||
| **128** | **\~3 cpb** | \~6 cpb |
|
||||
| *Long Message (Ref.)* | *\~0.3 cpb* | *\~0.51 cpb* |
|
||||
|
||||
Data synthesized from sources.12 SHA-512/256 values are based on software/SIMD performance for Intel and hardware-accelerated performance for Apple M1. The "Long Message" row is for reference to show peak throughput.
|
||||
|
||||
### **Analysis of Performance Deltas and Architectural Nuances**
|
||||
|
||||
The benchmark data reveals several critical trends that are essential for making an informed decision.
|
||||
|
||||
* **Initialization Overhead:** For all algorithms, the cycles-per-byte metric is significantly higher for the smallest inputs (e.g., 16 bytes) and decreases as the input size grows. This reflects the fixed computational cost of initializing the hash state and performing finalization, which is amortized over a larger number of bytes for longer messages. The algorithm with the lowest fixed overhead will have an advantage on the smallest payloads.
|
||||
* **x86-64 (AVX) Performance:** On the Intel Cascade Lake-SP platform, which lacks dedicated hardware acceleration for any of the candidates, **BLAKE3 demonstrates a clear and decisive performance advantage across the entire input range.** Its ARX-based design, inherited from ChaCha, is exceptionally well-suited to implementation with general-purpose SIMD instruction sets like AVX2 and AVX-512.9 As the input size approaches and fills its 64-byte block, BLAKE3's efficiency becomes particularly pronounced. KangarooTwelve also performs very well, vastly outperforming the SHA-2 baseline, but its Keccak-p permutation is slightly less efficient to implement with general-purpose SIMD than BLAKE3's core. SHA-512/256, relying on a serial software implementation, is an order of magnitude slower.
|
||||
* **ARMv8 Performance:** The performance landscape shifts on the Apple M1 platform, which features dedicated hardware acceleration for both the SHA-2 and SHA-3 families. Here, **KangarooTwelve emerges as the performance leader.** The availability of SHA-3 instructions dramatically accelerates its Keccak-p core, allowing it to edge out the already-fast SIMD implementation of BLAKE3.12 This result highlights a key strategic consideration: K12's performance is intrinsically linked to the presence of these specialized hardware extensions. BLAKE3's performance, while excellent, relies on the universal availability of general-purpose SIMD. The baseline, SHA-512/256, is also significantly more competitive on this platform due to its own hardware acceleration, though it still lags behind the two modern contenders.
|
||||
|
||||
## **Strategic Recommendation and Implementation Guidance**
|
||||
|
||||
The analysis of algorithmic design, security posture, and quantitative performance data leads to a clear primary recommendation, qualified by important contextual considerations for specific deployment environments.
|
||||
|
||||
### **Definitive Recommendation: BLAKE3**
|
||||
|
||||
For the primary objective of identifying the single fastest cryptographic hash function for inputs up to 128 bytes, intended as a replacement for SHA-512/256 on a wide range of modern server and desktop hardware, **BLAKE3 is the definitive choice.**
|
||||
|
||||
This recommendation is based on the following justifications:
|
||||
|
||||
1. **Superior Performance on x86-64:** On the most common server and desktop architecture (x86-64), which largely lacks dedicated hardware acceleration for SHA-512 or SHA-3, BLAKE3's highly optimized SIMD implementation delivers the lowest single-message latency across the entire specified input range.
|
||||
2. **Efficient Core Function:** Its performance advantage stems from a combination of a reduced round count (7 vs. 10 in BLAKE2s) and an ARX-based compression function that is exceptionally well-suited to modern CPU pipelines and SIMD execution.11
|
||||
3. **Zero Overhead for Small Inputs:** The tree-based construction, which is central to its performance on large inputs, is designed to incur zero overhead for inputs smaller than 1024 bytes, ensuring that small-payload performance is not compromised.39
|
||||
4. **Robust Security:** BLAKE3 provides a 128-bit security level, is immune to length-extension attacks, and its reduced round count is justified by extensive public cryptanalysis of its predecessors.33
|
||||
|
||||
### **Contextual Considerations and Alternative Scenarios**
|
||||
|
||||
While BLAKE3 is the best general-purpose choice, specific deployment targets or workload characteristics may favor an alternative.
|
||||
|
||||
* **Scenario A: ARM-Dominant or Future-Proofed Environments.** If the target deployment environment consists exclusively of modern ARMv8.2+ processors that include the optional SHA-3 cryptography extensions (e.g., Apple Silicon-based systems), or if the primary goal is to future-proof an application against the broader adoption of these instructions, **KangarooTwelve is an exceptionally strong and likely faster alternative.** Its ability to leverage dedicated hardware gives it a performance edge in these specific environments.12
|
||||
* **Scenario B: High-Throughput Batch Processing.** If the specific workload involves hashing millions of independent small messages in parallel on a single core, the recommendation becomes more nuanced. As demonstrated by the Solana use case, the simpler scheduling of the Merkle-Damgård construction can allow a highly optimized, multi-message implementation of **hardware-accelerated SHA-256** to achieve higher aggregate throughput.42 In this specialized scenario, the single-message latency advantage of BLAKE3 may not translate to a throughput advantage, and direct, workload-specific benchmarking is essential.
|
||||
* **Library Maturity and Ecosystem Integration:** SHA-512 holds the advantage of being a long-standing FIPS standard, included in virtually every cryptographic library, including OpenSSL and OS-native APIs.38 BLAKE3 has mature, highly optimized official implementations in Rust and C, and is gaining widespread adoption, but may not be present in older, legacy systems.9 KangarooTwelve is the least common of the three, though stable implementations are available from its designers and in libraries like PyCryptodome.24
|
||||
|
||||
### **Implementation Best Practices**
|
||||
|
||||
To successfully deploy a new hash function and realize its performance benefits, the following practices are recommended:
|
||||
|
||||
* **Use Official, Optimized Libraries:** The performance gains of modern algorithms like BLAKE3 are contingent on using implementations that correctly leverage hardware features. It is critical to use the official blake3 Rust crate or the C implementation, which include runtime CPU feature detection to automatically enable the fastest available SIMD instruction set (e.g., SSE2, AVX2, AVX-512).9 Using a generic or unoptimized implementation will fail to deliver the expected speed.
|
||||
* **Avoid Performance Measurement Pitfalls:** The performance of hashing very small inputs is highly susceptible to measurement error caused by the overhead of the calling language or benchmarking framework. As seen in several community benchmarks, measuring performance from a high-level interpreted language like Python can lead to misleading results where the function call overhead dominates the actual hashing time.39 Meaningful benchmarks must be conducted in a compiled language (C, C++, Rust) to accurately measure the algorithm itself.
|
||||
* **Final Verification:** Before committing to a production deployment, the final step should always be to benchmark the top candidates (BLAKE3, and potentially KangarooTwelve or hardware-accelerated SHA-256 depending on the context) directly within the target application and on the target hardware. This is the only way to definitively confirm that the theoretical and micro-benchmark advantages translate to tangible, real-world performance improvements for the specific use case.
|
||||
|
||||
#### **Works cited**
|
||||
|
||||
1. Hashing and Validation of SHA-512 in Python Implementation \- MojoAuth, accessed September 12, 2025, [https://mojoauth.com/hashing/sha-512-in-python/](https://mojoauth.com/hashing/sha-512-in-python/)
|
||||
2. SHA-512 vs Jenkins hash function \- SSOJet, accessed September 12, 2025, [https://ssojet.com/compare-hashing-algorithms/sha-512-vs-jenkins-hash-function/](https://ssojet.com/compare-hashing-algorithms/sha-512-vs-jenkins-hash-function/)
|
||||
3. Hash Functions | CSRC \- NIST Computer Security Resource Center \- National Institute of Standards and Technology, accessed September 12, 2025, [https://csrc.nist.gov/projects/hash-functions](https://csrc.nist.gov/projects/hash-functions)
|
||||
4. SHA-512 vs BLAKE3 \- A Comprehensive Comparison \- MojoAuth, accessed September 12, 2025, [https://mojoauth.com/compare-hashing-algorithms/sha-512-vs-blake3/](https://mojoauth.com/compare-hashing-algorithms/sha-512-vs-blake3/)
|
||||
5. SHA-512 vs BLAKE3 \- SSOJet, accessed September 12, 2025, [https://ssojet.com/compare-hashing-algorithms/sha-512-vs-blake3/](https://ssojet.com/compare-hashing-algorithms/sha-512-vs-blake3/)
|
||||
6. Did you compare performance to SHA512? Despite being a theoretically more secure... | Hacker News, accessed September 12, 2025, [https://news.ycombinator.com/item?id=12176915](https://news.ycombinator.com/item?id=12176915)
|
||||
7. SHA-512 faster than SHA-256? \- Cryptography Stack Exchange, accessed September 12, 2025, [https://crypto.stackexchange.com/questions/26336/sha-512-faster-than-sha-256](https://crypto.stackexchange.com/questions/26336/sha-512-faster-than-sha-256)
|
||||
8. If you're familiar with SHA-256 and this is your first encounter with SHA-3 \- Hacker News, accessed September 12, 2025, [https://news.ycombinator.com/item?id=33281278](https://news.ycombinator.com/item?id=33281278)
|
||||
9. the official Rust and C implementations of the BLAKE3 cryptographic hash function \- GitHub, accessed September 12, 2025, [https://github.com/BLAKE3-team/BLAKE3](https://github.com/BLAKE3-team/BLAKE3)
|
||||
10. The BLAKE3 Hashing Framework \- IETF, accessed September 12, 2025, [https://www.ietf.org/archive/id/draft-aumasson-blake3-00.html](https://www.ietf.org/archive/id/draft-aumasson-blake3-00.html)
|
||||
11. BLAKE3 \- GitHub, accessed September 12, 2025, [https://raw.githubusercontent.com/BLAKE3-team/BLAKE3-specs/master/blake3.pdf](https://raw.githubusercontent.com/BLAKE3-team/BLAKE3-specs/master/blake3.pdf)
|
||||
12. KangarooTwelve: fast hashing based on Keccak-p, accessed September 12, 2025, [https://keccak.team/kangarootwelve.html](https://keccak.team/kangarootwelve.html)
|
||||
13. SHA-3 \- Wikipedia, accessed September 12, 2025, [https://en.wikipedia.org/wiki/SHA-3](https://en.wikipedia.org/wiki/SHA-3)
|
||||
14. KangarooTwelve: fast hashing based on Keccak-p, accessed September 12, 2025, [https://keccak.team/2016/kangarootwelve.html](https://keccak.team/2016/kangarootwelve.html)
|
||||
15. xxHash \- Extremely fast non-cryptographic hash algorithm, accessed September 12, 2025, [https://xxhash.com/](https://xxhash.com/)
|
||||
16. SHA-256 vs xxHash \- SSOJet, accessed September 12, 2025, [https://ssojet.com/compare-hashing-algorithms/sha-256-vs-xxhash/](https://ssojet.com/compare-hashing-algorithms/sha-256-vs-xxhash/)
|
||||
17. Benchmarks \- xxHash, accessed September 12, 2025, [https://xxhash.com/doc/v0.8.3/index.html](https://xxhash.com/doc/v0.8.3/index.html)
|
||||
18. Meow Hash \- ASecuritySite.com, accessed September 12, 2025, [https://asecuritysite.com/hash/meow](https://asecuritysite.com/hash/meow)
|
||||
19. Cryptanalysis of Meow Hash | Content \- Content | Some thoughts, accessed September 12, 2025, [https://peter.website/meow-hash-cryptanalysis](https://peter.website/meow-hash-cryptanalysis)
|
||||
20. cmuratori/meow\_hash: Official version of the Meow hash, an extremely fast level 1 hash \- GitHub, accessed September 12, 2025, [https://github.com/cmuratori/meow\_hash](https://github.com/cmuratori/meow_hash)
|
||||
21. (PDF) A Comparative Study Between Merkle-Damgard And Other Alternative Hashes Construction \- ResearchGate, accessed September 12, 2025, [https://www.researchgate.net/publication/359190983\_A\_Comparative\_Study\_Between\_Merkle-Damgard\_And\_Other\_Alternative\_Hashes\_Construction](https://www.researchgate.net/publication/359190983_A_Comparative_Study_Between_Merkle-Damgard_And_Other_Alternative_Hashes_Construction)
|
||||
22. Merkle-Damgård Construction Method and Alternatives: A Review \- ResearchGate, accessed September 12, 2025, [https://www.researchgate.net/publication/322094216\_Merkle-Damgard\_Construction\_Method\_and\_Alternatives\_A\_Review](https://www.researchgate.net/publication/322094216_Merkle-Damgard_Construction_Method_and_Alternatives_A_Review)
|
||||
23. Template:Comparison of SHA functions \- Wikipedia, accessed September 12, 2025, [https://en.wikipedia.org/wiki/Template:Comparison\_of\_SHA\_functions](https://en.wikipedia.org/wiki/Template:Comparison_of_SHA_functions)
|
||||
24. KangarooTwelve — PyCryptodome 3.23.0 documentation, accessed September 12, 2025, [https://pycryptodome.readthedocs.io/en/latest/src/hash/k12.html](https://pycryptodome.readthedocs.io/en/latest/src/hash/k12.html)
|
||||
25. Evaluating the Energy Costs of SHA-256 and SHA-3 (KangarooTwelve) in Resource-Constrained IoT Devices \- MDPI, accessed September 12, 2025, [https://www.mdpi.com/2624-831X/6/3/40](https://www.mdpi.com/2624-831X/6/3/40)
|
||||
26. Cryptographic Hash Functions \- Sign in \- University of Bath, accessed September 12, 2025, [https://purehost.bath.ac.uk/ws/files/309274/HashFunction\_Survey\_FINAL\_221011-1.pdf](https://purehost.bath.ac.uk/ws/files/309274/HashFunction_Survey_FINAL_221011-1.pdf)
|
||||
27. What is Blake3 Algorithm? \- CryptoMinerBros, accessed September 12, 2025, [https://www.cryptominerbros.com/blog/what-is-blake3-algorithm/](https://www.cryptominerbros.com/blog/what-is-blake3-algorithm/)
|
||||
28. The BLAKE3 cryptographic hash function | Hacker News, accessed September 12, 2025, [https://news.ycombinator.com/item?id=22003315](https://news.ycombinator.com/item?id=22003315)
|
||||
29. Merkle trees instead of the Sponge or the Merkle-Damgård constructions for the design of cryptorgraphic hash functions \- Cryptography Stack Exchange, accessed September 12, 2025, [https://crypto.stackexchange.com/questions/50974/merkle-trees-instead-of-the-sponge-or-the-merkle-damg%C3%A5rd-constructions-for-the-d](https://crypto.stackexchange.com/questions/50974/merkle-trees-instead-of-the-sponge-or-the-merkle-damg%C3%A5rd-constructions-for-the-d)
|
||||
30. kangarootwelve \- crates.io: Rust Package Registry, accessed September 12, 2025, [https://crates.io/crates/kangarootwelve](https://crates.io/crates/kangarootwelve)
|
||||
31. KangarooTwelve and TurboSHAKE \- IETF, accessed September 12, 2025, [https://www.ietf.org/archive/id/draft-irtf-cfrg-kangarootwelve-12.html](https://www.ietf.org/archive/id/draft-irtf-cfrg-kangarootwelve-12.html)
|
||||
32. minio/sha256-simd: Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions for x86 and ARM64 for ARM. On AVX512 it provides an up to 8x improvement (over 3 GB/s per core). SHA Extensions give a performance boost of close to 4x over native. \- GitHub, accessed September 12, 2025, [https://github.com/minio/sha256-simd](https://github.com/minio/sha256-simd)
|
||||
33. BLAKE3 Is an Extremely Fast, Parallel Cryptographic Hash \- InfoQ, accessed September 12, 2025, [https://www.infoq.com/news/2020/01/blake3-fast-crypto-hash/](https://www.infoq.com/news/2020/01/blake3-fast-crypto-hash/)
|
||||
34. SHA instruction set \- Wikipedia, accessed September 12, 2025, [https://en.wikipedia.org/wiki/SHA\_instruction\_set](https://en.wikipedia.org/wiki/SHA_instruction_set)
|
||||
35. A64 Cryptographic instructions \- Arm Developer, accessed September 12, 2025, [https://developer.arm.com/documentation/100076/0100/A64-Instruction-Set-Reference/A64-Cryptographic-Algorithms/A64-Cryptographic-instructions](https://developer.arm.com/documentation/100076/0100/A64-Instruction-Set-Reference/A64-Cryptographic-Algorithms/A64-Cryptographic-instructions)
|
||||
36. I'm already seeing a lot of discussion both here and over at LWN about which has... | Hacker News, accessed September 12, 2025, [https://news.ycombinator.com/item?id=22235960](https://news.ycombinator.com/item?id=22235960)
|
||||
37. Speed comparison from the BLAKE3 authors: https://github.com/BLAKE3-team/BLAKE3/... | Hacker News, accessed September 12, 2025, [https://news.ycombinator.com/item?id=22022033](https://news.ycombinator.com/item?id=22022033)
|
||||
38. BLAKE (hash function) \- Wikipedia, accessed September 12, 2025, [https://en.wikipedia.org/wiki/BLAKE\_(hash\_function)](https://en.wikipedia.org/wiki/BLAKE_\(hash_function\))
|
||||
39. Maybe don't use Blake3 on Short Inputs : r/cryptography \- Reddit, accessed September 12, 2025, [https://www.reddit.com/r/cryptography/comments/1989fan/maybe\_dont\_use\_blake3\_on\_short\_inputs/](https://www.reddit.com/r/cryptography/comments/1989fan/maybe_dont_use_blake3_on_short_inputs/)
|
||||
40. SHA-3 proposal BLAKE \- Jean-Philippe Aumasson, accessed September 12, 2025, [https://www.aumasson.jp/blake/](https://www.aumasson.jp/blake/)
|
||||
41. KangarooTwelve \- cryptologie.net, accessed September 12, 2025, [https://www.cryptologie.net/article/393/kangarootwelve/](https://www.cryptologie.net/article/393/kangarootwelve/)
|
||||
42. BLAKE3 slower than SHA-256 for small inputs \- Research \- Solana Developer Forums, accessed September 12, 2025, [https://forum.solana.com/t/blake3-slower-than-sha-256-for-small-inputs/829](https://forum.solana.com/t/blake3-slower-than-sha-256-for-small-inputs/829)
|
||||
43. Blake3 and SHA-3's dead-last performance is a bit surprising to me. Me too \- Hacker News, accessed September 12, 2025, [https://news.ycombinator.com/item?id=39020081](https://news.ycombinator.com/item?id=39020081)
|
||||
44. \*\>I'm curious about the statement that SHA-3 is slow; \[...\] I wonder how much re... | Hacker News, accessed September 12, 2025, [https://news.ycombinator.com/item?id=14455282](https://news.ycombinator.com/item?id=14455282)
|
||||
45. draft-irtf-cfrg-kangarootwelve-06 \- IETF Datatracker, accessed September 12, 2025, [https://datatracker.ietf.org/doc/draft-irtf-cfrg-kangarootwelve/06/](https://datatracker.ietf.org/doc/draft-irtf-cfrg-kangarootwelve/06/)
|
||||
46. KangarooTwelve: Fast Hashing Based on $${\\textsc {Keccak}\\text {-}p}{}$$KECCAK-p | Request PDF \- ResearchGate, accessed September 12, 2025, [https://www.researchgate.net/publication/325672839\_KangarooTwelve\_Fast\_Hashing\_Based\_on\_textsc\_Keccaktext\_-pKECCAK-p](https://www.researchgate.net/publication/325672839_KangarooTwelve_Fast_Hashing_Based_on_textsc_Keccaktext_-pKECCAK-p)
|
||||
47. KangarooTwelve \- ASecuritySite.com, accessed September 12, 2025, [https://asecuritysite.com/hash/gokang](https://asecuritysite.com/hash/gokang)
|
||||
48. KangarooTwelve: fast hashing based on Keccak-p, accessed September 12, 2025, [https://keccak.team/files/K12atACNS.pdf](https://keccak.team/files/K12atACNS.pdf)
|
||||
49. TurboSHAKE \- Keccak Team, accessed September 12, 2025, [https://keccak.team/files/TurboSHAKE.pdf](https://keccak.team/files/TurboSHAKE.pdf)
|
||||
50. Why does KangarooTwelve only use 12 rounds? \- Cryptography Stack Exchange, accessed September 12, 2025, [https://crypto.stackexchange.com/questions/46523/why-does-kangarootwelve-only-use-12-rounds](https://crypto.stackexchange.com/questions/46523/why-does-kangarootwelve-only-use-12-rounds)
|
||||
51. What advantages does Keccak/SHA-3 have over BLAKE2? \- Cryptography Stack Exchange, accessed September 12, 2025, [https://crypto.stackexchange.com/questions/31674/what-advantages-does-keccak-sha-3-have-over-blake2](https://crypto.stackexchange.com/questions/31674/what-advantages-does-keccak-sha-3-have-over-blake2)
|
||||
52. Comparison between this and KangarooTwelve and M14 · Issue \#19 · BLAKE3-team/BLAKE3 \- GitHub, accessed September 12, 2025, [https://github.com/BLAKE3-team/BLAKE3/issues/19](https://github.com/BLAKE3-team/BLAKE3/issues/19)
|
||||
53. eBASH: ECRYPT Benchmarking of All Submitted Hashes, accessed September 12, 2025, [https://bench.cr.yp.to/ebash.html](https://bench.cr.yp.to/ebash.html)
|
||||
54. SUPERCOP \- eBACS (ECRYPT Benchmarking of Cryptographic Systems), accessed September 12, 2025, [https://bench.cr.yp.to/supercop.html](https://bench.cr.yp.to/supercop.html)
|
||||
55. XKCP/K12: XKCP-extracted code for KangarooTwelve (K12) \- GitHub, accessed September 12, 2025, [https://github.com/XKCP/K12](https://github.com/XKCP/K12)
|
||||
@@ -1,398 +0,0 @@
|
||||
#  BLAKE3 Migration
|
||||
|
||||
---
|
||||
|
||||
## ~~Why touch the cryptographic foundation at all?~~
|
||||
|
||||
~~Performance isn't an academic detail — it's dramatic. On modern hardware, BLAKE3 runs an order of magnitude faster than SHA-512 or SHA-256. For example:~~
|
||||
|
||||
~~In benchmarks, BLAKE3 achieves \~6.8 GiB/s throughput on a single thread, compared to \~0.7 GiB/s for SHA-512. This headroom matters in a ledger system where *every object key is a hash*. Faster hashing reduces CPU load for consensus, verification, and replay. Here, "performance" primarily means faster **keylet** computation (deriving map/index keys from object components) and less compatibility overhead (LUT hits, try‑both‑hashes), **not** improved data locality between neighboring objects.~~
|
||||
|
||||
~~Performance and modern cryptographic hygiene argue strongly for adopting BLAKE3. It's fast, parallelizable, and future-proof. But in this ledger system, the hash is not just a digest: it is the address of every object. Changing the hash function means changing the address of every single entry. This isn't like swapping an internal crypto primitive — it's a rekeying of the entire universe.~~
|
||||
|
||||
## Reality Check: BLAKE3 vs SHA-512 on ARM64 (Sept 2025)
|
||||
|
||||
**TL;DR: BLAKE3 migration complexity isn't justified by the actual performance gains.**
|
||||
|
||||
### Measured Performance (Xahau ledger #16940119)
|
||||
- **Keylets (22-102 bytes)**: BLAKE3 is 0.68x speed of SHA-512 (47% SLOWER)
|
||||
- **Inner nodes (516 bytes)**: BLAKE3 is 0.52x speed of SHA-512 (92% SLOWER)
|
||||
- **Map traversal**: 59-65% of total time (not affected by hash choice)
|
||||
- **Actual hashing**: Only 35-41% of total time
|
||||
|
||||
### Why BLAKE3 Underperforms
|
||||
1. **Small inputs**: Median keylet is 35 bytes; SIMD overhead exceeds benefit
|
||||
2. **2020 software vs 2025 hardware**: BLAKE3 NEON intrinsics vs OpenSSL 3.3.2's optimized SHA-512
|
||||
3. **No parallelism**: Single-threaded SHAMap walks can't use BLAKE3's parallel design
|
||||
4. **SIMD dependency**: Without NEON, BLAKE3 portable C is 2x slower than SHA-512
|
||||
|
||||
### The Verdict
|
||||
With hashing only 35-41% of total time and BLAKE3 actually SLOWER on typical inputs, the migration would:
|
||||
- Increase total validation time by ~10-15%
|
||||
- Add massive complexity (LUTs, heterogeneous trees, compatibility layers)
|
||||
- Risk consensus stability for negative performance gain
|
||||
|
||||
**Recommendation: Abandon BLAKE3 migration. Focus on map traversal optimization instead.**
|
||||
|
||||
## Hashes vs Indexes
|
||||
|
||||
* **Hashes as keys**: Every blob of data in the NodeStore is keyed by a hash of its contents. This makes the hash the *address* for retrieval.
|
||||
* **Hashes as indexes**: In a ShaMap (the Merkle tree that represents ledger state), an `index` is derived by hashing stable identity components (like account ID + other static identifiers). This index determines the path through the tree.
|
||||
* **Takeaway**: Hash = storage key. Index = map position. Both are 256-bit values, but they play different roles.
|
||||
|
||||
*Terminology note*: throughout, **keylet/key** = deterministic map/index key composition from object components; this is unrelated to users’ cryptographic signing keys.
|
||||
|
||||
## LUT at a glance
|
||||
|
||||
A **Lookup Table (LUT)** is an exact‑key alias map used to bridge old and new addressing:
|
||||
|
||||
* **Purpose:** allow lookups by a legacy (old) key to resolve to an object stored under its canonical (new) key — or vice‑versa where strictly necessary.
|
||||
* **Scope:** point lookups only (reads/writes by exact key). Iteration and ordering remain **canonical**; pagination via `next` after a marker requires careful handling (semantics TBD)
|
||||
* **Population:** built during migration and optionally **rebuildable** from per‑SLE cross‑key fields (e.g., `sfLegacyKey` for move, or `sfBlake3Key` for non‑move).
|
||||
* **Directionality in practice:** after the flip you typically need **both directions**, but for different eras:
|
||||
|
||||
* **Pre‑cutover objects (stored at old keys):** maintain **`BLAKE3 → SHA512Half`** so new‑style callers (BLAKE3) can reach old objects.
|
||||
* **Post‑cutover objects (stored at new keys):** optionally offer a grace **`SHA512Half → BLAKE3`** alias so legacy callers can reach new objects. Time‑box this.
|
||||
**Rule of thumb:** annotate the **opposite side of storage** — if storage is **new** (post‑move), annotate **old**; if storage is **old** (non‑move), annotate **new**.
|
||||
|
||||
## What actually breaks if you “just change the hash”?!
|
||||
|
||||
Every ledger entry’s key changes. That cascades into:
|
||||
|
||||
* **State tree**: SHAMap nodes are keyed by hash; every leaf and inner node address moves.
|
||||
* **Directories**: owner dirs, book dirs, hook state dirs, NFT pages — all are lists of hashes, all must be rebuilt.
|
||||
* **Order and proofs**: Succession, iteration, and proof-of-inclusion semantics all rely on canonical ordering of keys. Mixing old and new hashes destroys proof integrity.
|
||||
* **Caches and history**: Node sharing between ledgers ceases to work; replay and verification of past data must know which hash function was active when.
|
||||
|
||||
## Lazy vs Big Bang
|
||||
|
||||
If you update tree hashes incrementally as state changes, you are effectively doing a **lazy migration**: slowly moving to the new hashing system over time. That implies heterogeneous trees and ongoing complexity. By contrast, a **big bang** migration rekeys everything in a single, well-defined event. Since roughly 50% of hashing compute goes into creating these keys, most of the performance win from BLAKE3 arrives when the generated keys for a given object are used. This can be achieved if the object is **in place at its new key**, **moved within the tree**, or is **reachable via an exact‑key LUT that aliases old→new**.
|
||||
|
||||
*Note:* LUT specifics belong in **Move vs Non‑Move** below. At a high level: aliasing can bridge old/new lookups; iteration/pagination semantics are TBD here and treated separately.
|
||||
|
||||
### Pros and Cons
|
||||
|
||||
**Lazy migration**
|
||||
|
||||
* **Pros**: Less disruptive; avoids one massive compute spike; spreads risk over time.
|
||||
* **Cons**: Creates heterogeneous trees; complicates proofs and historical verification; requires bidirectional LUTs forever; analysts and tools must support mixed keyspaces.
|
||||
|
||||
**Big bang migration**
|
||||
|
||||
* **Pros**: Clean cutover at a known ledger; easier for analysts and tooling; no need to support mixed proofs long-term; maximizes BLAKE3 performance benefits immediately.
|
||||
* **Cons**: One heavy compute event; requires strict consensus choreography; higher risk if validators drift or fail mid-migration.
|
||||
|
||||
It’s important to distinguish between lazy vs big bang, and also between keys (addresses/indexes) vs hashes (content identifiers).
|
||||
|
||||
## Move vs Non‑Move (what does “migrate” change?)
|
||||
|
||||
**Non‑Move (annotate‑only):** objects stay at old SHA512‑Half keys; add `sfBlake3Key` (or similar) recording the would‑be BLAKE3 address; alias lookups via **new→old** LUT; iteration/proofs remain in old key order; minimal compute now, **permanent heterogeneity** and LUT dependence; little perf/ordering win.
|
||||
|
||||
**Move (rekey):** objects are physically rewritten under BLAKE3 keys either **on‑touch** (per‑tx or at **BuildLedger** end) or **all at once** (Big‑Bang). Requires **old→new** LUT for compatibility; choose a place/time (per‑tx vs BuildLedger vs Big‑Bang) and define iteration contract (prefer canonical‑only).
|
||||
|
||||
**Implications to weigh:**
|
||||
|
||||
* **LUT shape:** non‑move needs **new→old** (often also old→new for markers); move prefers **old→new** (temporary). Sunsetting is only realistic in the Big‑Bang case; lazy variants may never fully converge.
|
||||
* **Iteration/pagination:** canonical‑only iteration keeps proofs stable; translating legacy markers implies **bi‑LUT** and more hot‑path complexity.
|
||||
* **Replay:** both need `hash_options{rules(), ledger_index, phase}`; move policies must be consensus‑deterministic.
|
||||
* **Compute/ops:** non‑move is cheap now but never converges; move concentrates work (per‑tx, per‑ledger, or one Big‑Bang) and actually delivers BLAKE3’s **iteration/ordering** and **keylet‑compute** benefits (not data‑locality).
|
||||
|
||||
### Choice axes (what / when / how)
|
||||
|
||||
* **What:** *Move* the object under BLAKE3 **or** *leave in place* and annotate (`sfBlake3Key`).
|
||||
* **When:** at **end of txn** or in **BuildLedger** (alongside `updateNegativeUNL()` / `updateSkipList()`), or **all at once** (Big‑Bang).
|
||||
* **How:** *All at once* requires special network conditions (quiet window + consensus hash); *on modification* spreads risk but prolongs heterogeneity.
|
||||
* **Blob verification note:** a dual‑hash “verify on link” walk works for mixed trees, but you need the same `rules()+phase` plumbing either way, so it doesn’t materially change the engineering lift.
|
||||
|
||||
### Client compatibility & new entries
|
||||
|
||||
* **Reality:** flipping keylets changes what clients compute. Old clients may still derive SHA512‑Half; new clients may derive BLAKE3.
|
||||
* **Lazy non‑move (annotate‑only):**
|
||||
|
||||
* **Reads/updates:** accept BLAKE3 via **new→old LUT**; legacy SHA512 keys keep working.
|
||||
* **Creates (policy choice):**
|
||||
|
||||
* **Create‑at‑new (heterogeneous by design):** store under **BLAKE3** (the natural post‑flip behavior). For **legacy callers**, provide a grace alias **`SHA512Half → BLAKE3`** for *new* entries; stamp `sfLegacyKey` (old) on creation so the alias can be rebuilt by a leaf scan.
|
||||
* *Create‑at‑old (alternative until swap):* store under **old** to keep the map homogeneous; if request included a BLAKE3 key, treat it as a descriptor and translate. *Optional annotation:* add `sfBlake3Key` (new) to make later `new→old` LUT rebuild trivial. *(In the ********move********/post‑swap case, annotate the opposite side: ******`sfLegacyKey`****** = old.)*
|
||||
* *Create‑via‑old‑only:* require old keys for creates until swap (simpler server), and document it for SDKs.
|
||||
* *(Note:)* a LUT alone can’t route a brand‑new create — there’s no mapping yet — so the server must compute the storage key from identity (old or new, per the policy) and record the opposite‑side annotation for future aliasing.
|
||||
* **Big‑Bang (move):** creates immediately use **BLAKE3** as canonical; provide **`SHA512Half → BLAKE3`** grace alias for new objects; **old→new** LUT supports stragglers reading old objects by legacy keys.
|
||||
* **Bottom line:** you still need **`rules()`**\*\* + phase\*\* plumbing and an explicit **create policy**; don’t pick a strategy based purely on “less plumbing”.
|
||||
|
||||
### Post‑cutover lookup policy (directional LUT by era)
|
||||
|
||||
* **Old objects (pre‑cutover, stored at old keys):** new‑style callers use **BLAKE3** keys → resolve via **`BLAKE3 → SHA512Half`** (keep as long as needed; deprecate when safe).
|
||||
* **New objects (post‑cutover, stored at new keys):** legacy callers may supply **SHA512‑Half** → resolve via **`SHA512Half → BLAKE3`** *during a grace window*; plan a TTL/deprecation for this path.
|
||||
* **Iteration/pagination:** always return the **canonical storage key** of the era (old for old objects, new for new objects). Document that markers are era‑canonical; aliases are for **point lookups** only.
|
||||
|
||||
### Lazy non‑move: LUT requirements (immediate and ongoing)
|
||||
|
||||
* If keylets emit **BLAKE3** keys before a physical swap, you must have a **complete ************`new→old`************ LUT** available at flip time. A cold‑start empty LUT will cause immediate misses because objects still live at old addresses.
|
||||
* The LUT must be **built during a quiet window** by walking the full state and computing BLAKE3 addresses; you cannot populate it “on demand” without global scans.
|
||||
* **Persist the LUT**: typically a sidecar DB keyed by `BLAKE3 → SHA512Half`, or rely on per‑SLE **new‑side annotation** (`sfBlake3Key`) so any node can rebuild the LUT deterministically by a leaf scan. `sfBlake3Key` helps you rebuild; it does **not** remove the need for a ready‑to‑query LUT at flip.
|
||||
* Expect to **carry the LUT indefinitely** in non‑move. Its hit‑rate may drop over time only if you later migrate objects (or switch to Big‑Bang).
|
||||
|
||||
## Heterogeneous vs Homogeneous state trees
|
||||
|
||||
**Homogeneous** means a single canonical keyspace and ordering (one hash algorithm across the whole state tree). **Heterogeneous** means mixed keys/hashes coexisting (some SHA512‑Half, some BLAKE3), even if reads are made to “work.”
|
||||
|
||||
**Why this matters**
|
||||
|
||||
* **Proofs & ordering**: Homogeneous trees keep proofs simple and iteration stable. Heterogeneous trees complicate inclusion proofs and `succ()`/pagination semantics.
|
||||
* **Read path**: With heterogeneity, you either guess (dual‑hash walk), add **hints** (local "unused" nodestore bytes), or introduce **new prefixes** (network‑visible). All add complexity.
|
||||
* **Replay & determinism**: Homogeneous trees let `rules()`+`ledger_index` fully determine hashing. Heterogeneous trees force policy state (when/where items moved) to be consensus‑deterministic and reproduced in replay.
|
||||
* **Caches & sharing**: Node sharing across ledgers is cleaner in a homogeneous regime; heterogeneity reduces reuse and increases compute.
|
||||
* **Operational risk**: Mixed eras inflate your attack and bug surface (LUT correctness, marker translation, proof ambiguity).
|
||||
|
||||
**How you end up heterogeneous**
|
||||
|
||||
* Lazy hashing or “annotate‑only” lazy keys (non‑move).
|
||||
* Staged moves (on‑touch) that never reach full coverage.
|
||||
* Introducing new prefixes and treating both spaces as first‑class for long periods.
|
||||
|
||||
**How to avoid it**
|
||||
|
||||
* **Big‑Bang** swap in `BuildLedger`, then canonical‑only iteration under BLAKE3.
|
||||
* Keep a narrow **old→new** LUT as a safety net (rebuildable from `sfLegacyKey`), and plan deprecation.
|
||||
|
||||
**If you must tolerate heterogeneity (temporarily)**
|
||||
|
||||
* Use **context‑bound hashing** (`hash_options{rules(), ledger_index, phase, classifier}`) everywhere.
|
||||
* Consider **local hint bytes** or **prefixes** only to remove guesswork; define a strict marker policy (normalize to canonical outputs) and accept perf overhead.
|
||||
|
||||
## Options matrix — migration + keylet policies
|
||||
|
||||
### 1) Migration strategy (what physically moves when)
|
||||
|
||||
| Strategy | What moves & when | Tree heterogeneity | LUT needs | Iteration / pagination | Replay & hashing context | Operational risk | Pros | Cons |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------- | ------------------ | -------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------- | -------------------------------------------------- | ------------------------------------------------------------------ | ---------------------------------------------------------------------------------- |
|
||||
| **Big‑Bang (swap in one ledger, in BuildLedger)** | All SLEs rekeyed in a single, quiet, consensus‑gated ledger; stamp `sfLegacyKey` | None after swap | **old→new** only (temporary; rebuildable from `sfLegacyKey`) | Immediately canonical under BLAKE3; simple markers | Straightforward (`rules()`, `ledger_index`, `phase` flip once) | One heavy compute event; needs strict choreography | Clean proofs & ordering; simplest for tools; fast path to perf win | Requires quiet period + consensus hash; “all‑eggs‑one‑basket” ledger |
|
||||
| **Lazy keys — moved, per‑tx** | Touched SLEs are **moved** to BLAKE3 keys during tx commit | Long‑lived | **old→new** and often **new→old** (for markers) | Mixed keys; must normalize or translate; highest complexity | Hardest: movement timing is per‑tx; requires full `hash_options` everywhere | Low per‑ledger spike, but constant complexity | Spreads compute over time | Permanent heterogeneity; iterator/marker headaches; error‑prone |
|
||||
| **Lazy keys — *********************************************************************************************not********************************************************************************************* moved, per‑tx (annotate only)** | No SLEs move; touched entries get `sfBlake3Key` / annotation only | Permanent | **new→old** (lookups by BLAKE3 must alias to old), often also **old→new** if you normalize outputs | Iteration remains in **old** key order unless you add translation; markers inconsistent without bi‑LUT | Hard: you never converge; replay must honor historic “no‑move” semantics | Low per‑ledger spike | Zero relocation churn; simplest writes | You never get canonical BLAKE3 ordering/proofs; LUT forever; limited perf win |
|
||||
| **Lazy keys — moved, BuildLedger** | Touched SLEs are **moved** at end of ledger in BuildLedger | Medium‑lived | **old→new** (likely) and sometimes **new→old** (if you want legacy markers to resume cleanly) | Still mixed; easier to normalize to canonical at ledger boundary | Moderate: movement is per‑ledger; still need `hash_options` | Lower spike than Big‑Bang; simpler than per‑tx | Centralized move step; cleaner tx metadata | Still heterogeneous until coverage is high; LUT on hot paths |
|
||||
| **Lazy keys — *********************************************************************************************not********************************************************************************************* moved, BuildLedger (annotate only)** | No SLEs move; annotate touched entries in BuildLedger only | Permanent | **new→old** (and possibly **old→new** if you normalize) | Iteration stays in **old** order; translation needed for consistency | Moderate: policy is per‑ledger but never converges | Lowest spike | Cleanest ops; no relocation diffs | Same drawbacks as per‑tx annotate‑only: permanent heterogeneity and LUT dependence |
|
||||
|
||||
**Notes:**
|
||||
|
||||
* Prefer **canonical‑only iteration** (return new keys) and accept legacy markers as input → reduces need for bidirectional LUT.
|
||||
* If you insist on round‑tripping legacy markers, you’ll need **bi‑directional LUT** and iterator translation.
|
||||
* For **annotate‑only (non‑move)** variants: if you choose **Policy C (flip globally at ledger n)**, you **must** prebuild a complete `new→old` LUT for the entire tree before the flip. To avoid this empty‑LUT hazard, choose **Policy A (flip at swap)** until the physical move occurs.
|
||||
|
||||
#### 1a) Big‑Bang — non‑move (alias‑only) at a glance
|
||||
|
||||
| What moves & when | Tree heterogeneity | LUT needs | Iteration/pagination | Pros | Cons |
|
||||
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **No storage move at cutover; global keylet flip; annotate all SLEs with ************`sfBlake3Key`************; full ************`new→old`************ LUT ready or rebuildable by leaf scan** | Ledger map: **old** for legacy, **new** for new objects; NodeStore blobs: **full‑tree rewrite** (choose a single blob‑hash algo post‑cutover to avoid guessing) | Permanent `new→old`; **rebuildable from ************`sfBlake3Key`************ by optimized leaf parser** | Old‑order; document marker policy (no translation) | No **map index** relocation; flip is clean; **LUT always accessible**; rollback = behavior flip only if LUT retained | Proofs/ordering stay old; permanent LUT; **one‑time I/O spike** from full‑tree rewrite (mitigated by preflushing background tree); no homogeneous BLAKE3 tree |
|
||||
|
||||
### 2) Keylet flip policy (what keylets *emit*) (what keylets *emit*) (what keylets *emit*) (what keylets *emit*) (what keylets *emit*) (what keylets *emit*)
|
||||
|
||||
| Policy | What keylets return | Empty‑LUT risk | Need global LUT upfront? | Client‑visible behavior | Pros | Cons |
|
||||
| ----------------------------------- | --------------------------------------- | --------------------- | ------------------------------- | --------------------------------------- | -------------------------------------- | ---------------------------------------------------------- |
|
||||
| **A. Flip at swap only** | Old keys pre‑swap; new keys post‑swap | None | No | Single flip; stable semantics | Simplest; no prep LUT window | Requires Big‑Bang or near‑equivalent swap moment |
|
||||
| **B. Flip per‑SLE (when migrated)** | New for migrated entries; old otherwise | None | No | Mixed outputs; must normalize iteration | No global LUT build; smoother ramp | Clients see mixture unless normalized; still heterogeneous |
|
||||
| **C. Flip globally at ledger n** | New everywhere from n | **High** if LUT empty | **Yes** (build in quiet period) | Clean switch for clients | Global behavior is uniform immediately | Must precompute `new→old` LUT; higher prep complexity |
|
||||
|
||||
### 3) Hashing decision representation (perf & memory)
|
||||
|
||||
| Option | What changes | Memory/Perf impact | ABI impact | Benefit |
|
||||
| -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- | --------------- | -------------------------------------------------- |
|
||||
| **0. Context‑bound keylets (recommended default)** | Keep returning 32‑byte keys; keylets choose SHA512‑Half vs BLAKE3 using a small `HashCtx` (`rules()`, `ledger_index`, `phase`, `classifier`) | Tiny branch; no heap; cache optional per‑View | None | Avoids empty‑LUT trap; simplest to roll out |
|
||||
| **1. Thin symbolic descriptors (stack‑only)** | Keylets can return a small descriptor; callers `resolve(desc, ctx)` immediately | Minimal; POD structs; optional tiny cache | None externally | Centralizes decision; testable; still light‑weight |
|
||||
| **2. Full symbolic (iterators/markers only)** | Iterators carry `{desc, resolved}` to re‑resolve under different contexts | Small per‑iterator cache | None externally | Makes pagination/replay robust without broad churn |
|
||||
|
||||
### 4) NodeStore hinting for heterogeneous reads (only if you *must* support mixed trees)
|
||||
|
||||
| Approach | Scope | Pros | Cons |
|
||||
| ------------------------------------------- | --------------- | -------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
| **No hints (dual‑hash walk)** | Network‑safe | Simple to reason about; no store changes | Costly: try‑both‑hashes while walking; awkward |
|
||||
| **Local hint bytes (use 8–9 unused bytes)** | Local only | Eliminates guesswork on a node; cheap to implement | Not portable; doesn’t show up in proofs; still need amendment plumbing |
|
||||
| **New hash prefixes in blobs** | Network‑visible | Clear namespace separation; easier debugging | Prefix explosion; code churn; proof/back‑compat complexity |
|
||||
|
||||
### 5) Recommended defaults
|
||||
|
||||
* **Migration**: Big‑Bang in `BuildLedger` with quiet period + consensus hash; stamp `sfLegacyKey`.
|
||||
* **Keylets**: Policy **A** (flip at swap) or **B** if you insist on staging; normalize iteration to canonical.
|
||||
* **LUT**: **old→new** exact‑key alias as a temporary safety net; rebuildable from `sfLegacyKey`.
|
||||
* **Hashing decision**: **Option 0 (context‑bound keylets)**; reserve symbolics for iterators only if needed.
|
||||
|
||||
## Heterogeneous trees and possible NodeStore tweaks
|
||||
|
||||
When loading from the NodeStore with a root hash, in principle you could walk down the tree and try hashing each blob’s contents to check whether it matches. At each link, you verify the blob by recomputing its hash. In theory you could even try both SHA-512 Half and BLAKE3 until the structure links up. This would eventually work, but it is inefficient.
|
||||
|
||||
To avoid that inefficiency, one idea is to tweak the NodeStore blobs themselves. There are 8–9 unused bytes (currently stored as zeros) that could be repurposed as a hint. Another option is to change the stored hash prefixes, which would act as explicit namespace markers separating SHA-512 and BLAKE3 content. With the ledger index also available, heuristics could guide which algorithm to use. But none of this removes the need for amendment plumbing — you still have to know if the cutover has occurred.
|
||||
|
||||
### Versioned prefixes (use the spare byte)
|
||||
|
||||
**Goal:** eliminate guessing in mixed/historical contexts by making the blob self‑describing.
|
||||
|
||||
* **Design:** keep the 3‑letter class tag and use the 4th byte as an **algorithm version**.
|
||||
|
||||
```cpp
|
||||
enum class HashPrefix : std::uint32_t {
|
||||
innerNode_v0 = detail::make_hash_prefix('M','I','N', 0x00), // SHA512Half
|
||||
innerNode_v1 = detail::make_hash_prefix('M','I','N', 0x01), // BLAKE3
|
||||
leafNode_v0 = detail::make_hash_prefix('M','L','N', 0x00),
|
||||
leafNode_v1 = detail::make_hash_prefix('M','L','N', 0x01),
|
||||
// add tx/dir variants only if their blob hashing changes too
|
||||
};
|
||||
```
|
||||
|
||||
* **Read path:** fetch by hash as usual; after you read the blob, the prefix **discriminates** the hashing algorithm used to produce that key. No dual‑hash trial needed to verify/link.
|
||||
* **Write path:** when (re)serializing a node, choose the version byte from `hash_options.rules()/phase`; parent/child content stays consistent because each node carries its own version.
|
||||
* **Pros:** zero‑guess verification; offline tools can parse blobs without external context; makes mixed eras debuggable.
|
||||
* **Cons:** network‑visible change (new prefixes); code churn where prefixes are assumed fixed; doesn’t solve keylet/index aliasing or iteration semantics — it only removes blob‑hash guessing.
|
||||
|
||||
**Note:** you can also avoid guessing entirely by keeping **one blob‑hash algorithm per ledger** (homogeneous per‑ledger eras). Then `rules()+ledger_index` suffices. Versioned prefixes mainly help offline tools and any design that tolerates intra‑ledger mixing.
|
||||
|
||||
### Lazy migration headaches
|
||||
|
||||
If you attempt a lazy migration, you must decide how keys are rehashed. Is it done during metadata creation at the end of transactions? Do you rely on a LUT to map between new and old indexes? If so, where is this LUT state stored? Another idea is to embed a `LedgerIndexBlake3` in entries, so that keylet helpers can create new indexes while CRUD operations translate through a LUT. But this complicates pagination markers and functions like `ReadView::succ()` that return natural keys. You risk situations where the system must be aware of multiple keys per entry.
|
||||
|
||||
Questions like pagination markers and `ReadView::succ()` make this even thornier. One approach might be to encode the hash type in the LUT, and maintain it bidirectionally, so when iteration returns a canonical key it can be translated back to the old form if needed. But this doubles the complexity and still forces every path to be LUT‑aware.
|
||||
|
||||
By contrast, in the **Big Bang** version the LUT is just a safety net, handling things that could not be automatically rewritten. This is simpler for analysts and avoids perpetual cross-key complexity.
|
||||
|
||||
### Why it feels like a headache
|
||||
|
||||
Trying to lazily migrate keys means constantly juggling questions:
|
||||
|
||||
* Do you move items immediately when the amendment is enabled, or only on first touch?
|
||||
* If you move them, when exactly: during metadata creation, during BuildLedger along with the SkipList?
|
||||
* How do you keep CRUD ops working while also updating LUT state?
|
||||
* How do you handle pagination markers and `succ()` consistently if multiple keys exist? You would need bidirectional.
|
||||
|
||||
Every option adds complexity, requires bidirectional LUTs, and forces awareness of dual keyspaces everywhere. This is why the lazy path feels like a perpetual headache, while the Big Bang keeps the pain contained to one well‑known cutover.
|
||||
|
||||
## The Big Bang
|
||||
|
||||
From here onward, we focus on the **Big‑Bang** approach (one‑ledger atomic rekey). Lazy/staged variants are summarized above.
|
||||
|
||||
### Why Big‑Bang is preferred here
|
||||
|
||||
* **Homogeneous immediately:** one canonical keyspace the very next ledger → simple proofs, stable iteration/pagination, no dual‑key semantics.
|
||||
* **No empty‑LUT window:** keylets flip at the swap; the LUT is **old→new** only, narrow in scope, and realistically deprecable.
|
||||
* **Deterministic & replay‑friendly:** a single, well‑known cutover ledger anchors tooling and historical verification.
|
||||
* **Operationally contained risk:** compute is concentrated into the quiet window with explicit consensus checkpoints (single or double), not smeared across months.
|
||||
* **Cleaner dev/ops surface:** fewer code paths need LUT/translation logic; easier to reason about `succ()`/markers and caches.
|
||||
|
||||
### Variant: Big‑Bang “non‑move” (alias‑only swap)
|
||||
|
||||
**What it is:** at the cutover ledger, **annotate the entire state tree** by stamping every SLE with its BLAKE3 address (e.g., `sfBlake3Key`). **Do not** rewrite storage keys. During the quiet window, prebuild a complete `new→old` LUT **or** rely on the new field so any node can rebuild the LUT deterministically by scanning leaves with an optimized parser. Flip keylets to emit BLAKE3. Optionally commit a small on‑ledger **annotation/LUT commitment hash** in `MigrationState` so operators can verify their sidecar.
|
||||
|
||||
**How it behaves:** point lookups by BLAKE3 resolve via the LUT; writes/erases resolve to the canonical **old** storage key before touching disk; **new objects** are stored under **BLAKE3** keys (post‑flip); legacy callers may be served by a grace **`SHA512Half → BLAKE3`** alias for *new* objects. Iteration/pagination remain in the old order for legacy entries (document marker policy).
|
||||
|
||||
**I/O reality & mitigation:**
|
||||
|
||||
* Annotating every leaf **changes its bytes**, forcing a **full‑tree NodeStore rewrite** (leaf blob hashes change; inner nodes update). This is a **mass write**, even though map indexes don’t relocate.
|
||||
* Mitigate the spike by **streaming/staged flush** of the staging tree during BuildLedger (chunked passes), back‑pressure on caches, and rate‑limited node writes; total bytes remain \~“rewrite the tree once.”
|
||||
|
||||
**LUT reconstruction paths:**
|
||||
|
||||
* **From annotation (fastest):** for each leaf, read `sfBlake3Key` and the current (old) key; record `BLAKE3 → old`.
|
||||
* **From recompute (belt‑and‑suspenders):** recompute BLAKE3 via keylet helpers from identity components and pair with the observed old key.
|
||||
|
||||
**Pros:** no **map index** relocation for legacy entries; minimal end‑user surprise; clean flip semantics; **LUT always reconstructible** from the annotated tree; **rollback is behavioral‑only if the LUT is retained**.
|
||||
|
||||
**Cons:** ordering/proofs remain old indefinitely; LUT becomes permanent; you forgo a homogeneous BLAKE3 tree and its simplifications; **full‑tree NodeStore rewrite** (leaf annotation changes bytes → new blob hashes → inner nodes update) causing a one‑time I/O spike.
|
||||
|
||||
**Rollback reality:** Once clients rely on BLAKE3 keys on the wire, a “rollback” without a LUT breaks them. Practical rollback means flipping keylet behavior back to SHA512‑Half **and** continuing to serve BLAKE3 lookups via the LUT indefinitely (or performing a reverse‑migration). In other words, rollback is only “easy” if you accept a **permanent LUT**.
|
||||
|
||||
**When to pick:** you want Big‑Bang’s clean flip and operational containment, but can’t (or don’t want to) rewrite the entire state tree; you still want a deterministic, cheap way to rebuild the LUT by scanning.
|
||||
|
||||
### How to message this (without scaring users)
|
||||
|
||||
**Elevator pitch**
|
||||
|
||||
> We’re flipping key derivation to BLAKE3 for *new* addresses, but we’re **not relocating existing entries**. We annotate the tree in a maintenance window, so old data stays where it is, new data goes to BLAKE3 addresses, and both key forms work via an alias. Transactions, TxIDs, and signatures don’t change.
|
||||
|
||||
**What users/operators should expect**
|
||||
|
||||
* **No surprise breakage:** Old clients that synthesize SHA512‑Half keys still read old objects; new clients can use BLAKE3 keys everywhere (old objects resolve via alias).
|
||||
* **New vs old objects:** Old objects remain at their old locations; **new objects** are stored at **BLAKE3** locations. A **grace alias** can accept SHA512‑Half for *new* objects for a limited time.
|
||||
* **Ordering/proofs unchanged for old entries:** Iteration order and proofs remain canonical‑old for legacy entries. No bidirectional iteration translation.
|
||||
* **TxIDs & signing stay the same:** Transaction IDs and signing digests are **unchanged**; do **not** hand‑derive ledger indexes—use keylet APIs.
|
||||
* **One‑time write spike (planned):** Annotating every leaf causes a **single full‑tree blob rewrite** during the quiet window; we stage/stream this as part of `BuildLedger`.
|
||||
|
||||
**Soundbite**
|
||||
|
||||
> *“Not a scary rekey-everything rewrite.”* It’s a one‑time annotation and an API flip: old stays reachable, new is faster, and we give legacy callers a grace window.
|
||||
|
||||
### Decision & next steps (short list)
|
||||
|
||||
1. **Amendment & timing:** finalize `featureBlake3Migration`, `MIGRATION_TIME`, and quiet‑period length.
|
||||
2. **BuildLedger swap/annotate pass:** implement two‑pass **rekey** (plan → commit), **or** two‑pass **annotate** (stamp `sfBlake3Key` on all SLEs). For rekey, stamp `sfLegacyKey` and materialize **old→new** LUT; for non‑move, stamp `sfBlake3Key` and materialize **new→old** LUT (both rebuildable by leaf scan).
|
||||
3. **API rules:** reads/writes = canonical‑first, LUT‑on‑miss (point lookups only); **iteration is canonical‑only**; document marker semantics.
|
||||
4. **Hash context plumbing:** ensure `hash_options{rules(), ledger_index, phase, classifier}` are available down to `SHAMap::getHash()` and relevant callers.
|
||||
5. **Consensus choreography:** pick **single** vs **double** hash checkpoint; wire pseudo‑tx for the pre‑hash if using two‑step.
|
||||
6. **Telemetry & deprecation:** ship metrics for LUT hit‑rate and schedule a sunset once hits are negligible.
|
||||
7. **Test plan:** simulate slow validators, partial LUT rebuilds, replay across the swap, and hook workloads with hardcoded keys.
|
||||
|
||||
## Governance first: permission to cut over
|
||||
|
||||
Such a migration cannot be unilateral. An amendment (`featureBlake3Migration`) acts as the governance switch, enabling the network to authorize the cutover. This amendment does not itself rekey the world, but it declares consensus intent: from a certain point, ledgers may be rebuilt under the new rules.
|
||||
|
||||
A pseudo-transaction (e.g. `ttHASH_MIGRATION`) provides the on-ledger coordination. It marks the trigger point, updates the migration state SLE, and ensures every validator knows exactly *when* and *what* to execute.
|
||||
|
||||
## Why not just do it in the pseudo-transaction?
|
||||
|
||||
A naive attempt to treat the entire migration as a simple pseudo-transaction — a one-off entry applied like any other — would explode into metadata churn, duplicate entries, and lost referential integrity. The scale of rekeying every SLE makes it unsuitable for a normal transaction context; it has to run in a special execution venue like `BuildLedger` to remain atomic and manageable.
|
||||
|
||||
## Choose the battlefield: BuildLedger
|
||||
|
||||
The right place to run the migration is inside `BuildLedger` — after applying the (quiet) transaction set, and before finalization. This avoids flooding transaction metadata with millions of deletes and creates, and guarantees atomicity: one ledger before = SHA-512 Half; one ledger after = BLAKE3.
|
||||
|
||||
This is also exactly where other ledger-maintenance updates happen: for example `updateNegativeUNL()` runs when processing a flag ledger if the feature is enabled, and `updateSkipList()` is invoked just before flushing SHAMap nodes to the NodeStore. By piggybacking the migration here, it integrates cleanly into the existing lifecycle:
|
||||
|
||||
```cpp
|
||||
if (built->isFlagLedger() && built->rules().enabled(featureNegativeUNL))
|
||||
{
|
||||
built->updateNegativeUNL();
|
||||
}
|
||||
|
||||
OpenView accum(&*built);
|
||||
applyTxs(accum, built);
|
||||
accum.apply(*built);
|
||||
|
||||
built->updateSkipList();
|
||||
|
||||
// Flush modified SHAMap nodes to NodeStore
|
||||
built->stateMap().flushDirty(hotACCOUNT_NODE);
|
||||
built->txMap().flushDirty(hotTRANSACTION_NODE);
|
||||
built->unshare();
|
||||
```
|
||||
|
||||
By inserting the BLAKE3 migration pass into this sequence, it runs atomically alongside the skip list and NegativeUNL updates, ensuring the new canonical tree is finalized consistently. 
|
||||
|
||||
## Hashing and consensus choreography
|
||||
|
||||
It may make sense to stretch the choreography into more than one consensus checkpoint, especially given the amount of compute involved. A possible flow:
|
||||
|
||||
* **Quiet period** — block transactions so everyone is aligned.
|
||||
* **Phase 1: Hash the static tree** — compute a BLAKE3 hash of the ledger state, excluding churny structures like skip lists and the migration state.
|
||||
* **Consensus** — validators agree on this static-hash checkpoint.
|
||||
* **Phase 2: Hash the full tree** — compute the full state tree hash under BLAKE3.
|
||||
* **Consensus** — converge again on the complete view.
|
||||
* **Atomic swap** — only after both steps succeed, rewrite the ledger under new keys.
|
||||
|
||||
This extra step could make it easier for validators to stay in sync without network drift, because they checkpoint on a smaller, stable hash before tackling the full-tree rebuild. It reduces wasted compute if things diverge. The downside is protocol complexity: two ballots instead of one. But given the gnarliness of concurrent full-tree rekeying, a double consensus phase could be safer in practice.
|
||||
|
||||
Supporting this implies the hash function must be aware of more than just `ledger_index`; it also needs `rules()` (to know if the amendment is enabled) and an explicit state flag indicating whether the swap is pending, in progress, or complete. To safely support background builds of multiple tree variants, `hash_options` must be plumbed everywhere — from `SHAMap::getHash()` down into all call sites, and even up into callers.
|
||||
|
||||
## Two-pass rekey with a safety rope
|
||||
|
||||
* **Pass 1 (plan)**: Walk the state tree, compute new BLAKE3 keys, build an in-memory LUT (old→new), and stamp each SLE with its legacy key (`sfLegacyKey`).
|
||||
* **Pass 2 (commit)**: Rebuild the SHAMap with BLAKE3 keys, rewrite all directories and secondary structures from the LUT, and finalize the new canonical tree.
|
||||
|
||||
This two-pass structure ensures determinism and lets every validator converge on the same new map without risk of divergence.
|
||||
|
||||
## Keep consensus boring during the scary bit
|
||||
|
||||
Migration must not race against normal transaction flow. The procedure anchors on **network time**, not ledger index. Once a ledger closes with `closeTime ≥ MIGRATION_TIME`, the network enters a quiet period: all user and pseudo-transactions are blocked, only trivial skip list mechanics advance. During this window, everyone builds the same hash in the background.
|
||||
|
||||
When consensus converges on the special BLAKE3 hash (excluding skip lists and migration state), it appears in a validated ledger. In the next ledger, the atomic swap happens — one big bang, then back to normal life.
|
||||
|
||||
## Owning the ugly edges (hooks and hardcoded keys)
|
||||
|
||||
Hooks may carry hardcoded 32-byte constants. Detecting them with static analysis is brittle; runtime tracing is too heavy. Instead, the LUT strategy provides a compatibility shim: lookups can still resolve old keys, while all new creations require canonical BLAKE3 keys. Over time, policy can deprecate this fallback.
|
||||
|
||||
---
|
||||
@@ -1,99 +0,0 @@
|
||||
# BLAKE3 vs SHA-512 Performance Analysis for Ripple Data Structures
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document presents empirical performance comparisons between BLAKE3 and SHA-512 (specifically SHA512Half) when hashing Ripple/Xahau blockchain data structures. Tests were conducted on Apple Silicon (M-series) hardware using real-world data distributions.
|
||||
|
||||
## Test Environment
|
||||
|
||||
- **Platform**: Apple Silicon (ARM64)
|
||||
- **OpenSSL Version**: 1.1.1u (likely without ARMv8.2 SHA-512 hardware acceleration)
|
||||
- **BLAKE3**: C reference implementation with NEON optimizations
|
||||
- **Test Data**: Production ledger #16940119 from Xahau network
|
||||
|
||||
## Results by Data Type
|
||||
|
||||
### 1. Keylet Lookups (22-102 bytes, 35-byte weighted average)
|
||||
|
||||
Keylets are namespace discriminators used for ledger lookups. The SHAMap requires high-entropy keys for balanced tree structure, necessitating cryptographic hashing even for small inputs.
|
||||
|
||||
**Distribution:**
|
||||
- 76,478 ACCOUNT lookups (22 bytes)
|
||||
- 41,740 HOOK lookups (22 bytes)
|
||||
- 19,939 HOOK_STATE_DIR lookups (54 bytes)
|
||||
- 17,587 HOOK_DEFINITION lookups (34 bytes)
|
||||
- 17,100 HOOK_STATE lookups (86 bytes)
|
||||
- Other types: ~15,000 operations (22-102 bytes)
|
||||
|
||||
**Performance (627,131 operations):**
|
||||
- **BLAKE3**: 128 ns/hash, 7.81M hashes/sec
|
||||
- **SHA-512**: 228 ns/hash, 4.38M hashes/sec
|
||||
- **Speedup**: 1.78x
|
||||
|
||||
### 2. Leaf Node Data (167-byte average)
|
||||
|
||||
Leaf nodes contain serialized ledger entries (accounts, trustlines, offers, etc.). These represent the actual state data in the ledger.
|
||||
|
||||
**Distribution:**
|
||||
- 626,326 total leaf nodes
|
||||
- 104.6 MB total data
|
||||
- Types: AccountRoot (145k), DirectoryNode (118k), RippleState (115k), HookState (124k), URIToken (114k)
|
||||
|
||||
**Performance (from production benchmark):**
|
||||
- **SHA-512**: 446 ns/hash, 357 MB/s (measured)
|
||||
- **BLAKE3**: ~330 ns/hash, 480 MB/s (projected)
|
||||
- **Expected Speedup**: ~1.35x
|
||||
|
||||
### 3. Inner Nodes (516 bytes exactly)
|
||||
|
||||
Inner nodes contain 16 child hashes (32 bytes each) plus a 4-byte prefix. These form the Merkle tree structure enabling cryptographic proofs.
|
||||
|
||||
**Distribution:**
|
||||
- 211,364 inner nodes
|
||||
- 104.1 MB total data (nearly equal to leaf data volume)
|
||||
|
||||
**Performance (211,364 operations):**
|
||||
- **BLAKE3**: 898 ns/hash, 548 MB/s
|
||||
- **SHA-512**: 1081 ns/hash, 455 MB/s
|
||||
- **Speedup**: 1.20x
|
||||
|
||||
## Overall Impact Analysis
|
||||
|
||||
### Current System Profile
|
||||
|
||||
From production measurements, the ledger validation process shows:
|
||||
- **Map traversal**: 47% of time
|
||||
- **SHA-512 hashing**: 53% of time
|
||||
|
||||
Within the hashing time specifically:
|
||||
- **Keylet lookups**: ~50% of hashing time
|
||||
- **Leaf/inner nodes**: ~50% of hashing time
|
||||
|
||||
### Projected Improvement with BLAKE3
|
||||
|
||||
Given the measured speedups:
|
||||
- Keylet operations: 1.78x faster → 28% time reduction
|
||||
- Leaf operations: 1.35x faster → 26% time reduction
|
||||
- Inner operations: 1.20x faster → 17% time reduction
|
||||
|
||||
**Net improvement**: ~20-25% reduction in total hashing time, or **10-13% reduction in overall validation time**.
|
||||
|
||||
## Key Observations
|
||||
|
||||
1. **Small Input Performance**: BLAKE3 shows its greatest advantage (1.78x) on small keylet inputs where function call overhead dominates.
|
||||
|
||||
2. **Diminishing Returns**: As input size increases to SHA-512's block size (128 bytes) and multiples thereof, the performance gap narrows significantly.
|
||||
|
||||
3. **Architectural Constraint**: The SHAMap design requires cryptographic hashing for all operations to maintain high-entropy keys, preventing optimization through non-cryptographic alternatives.
|
||||
|
||||
4. **Implementation Effort**: Transitioning from SHA-512 to BLAKE3 would require:
|
||||
- Updating all hash generation code
|
||||
- Maintaining backward compatibility
|
||||
- Extensive testing of consensus-critical code
|
||||
- Potential network upgrade coordination
|
||||
|
||||
## Conclusion
|
||||
|
||||
BLAKE3 offers measurable performance improvements over SHA-512 for Ripple data structures, particularly for small inputs. However, the gains are modest (1.2-1.78x depending on input size) rather than revolutionary. With map traversal consuming nearly half the total time, even perfect hashing would only double overall performance.
|
||||
|
||||
For keylet operations specifically, the 1.78x speedup is significant given that keylet hashing accounts for approximately 50% of all hashing time. However, the measured improvements must be weighed against the engineering effort and risk of modifying consensus-critical cryptographic primitives. A 10-13% overall performance gain may not justify the migration complexity unless combined with other architectural improvements.
|
||||
@@ -48,4 +48,4 @@
|
||||
#define TOO_MANY_STATE_MODIFICATIONS -44
|
||||
#define TOO_MANY_NAMESPACES -45
|
||||
#define HOOK_ERROR_CODES
|
||||
#endif //HOOK_ERROR_CODES
|
||||
#endif //HOOK_ERROR_CODES
|
||||
|
||||
@@ -12,8 +12,6 @@ accept(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
extern int64_t
|
||||
rollback(uint32_t read_ptr, uint32_t read_len, int64_t error_code);
|
||||
|
||||
// UTIL
|
||||
|
||||
extern int64_t
|
||||
util_raddr(
|
||||
uint32_t write_ptr,
|
||||
@@ -56,8 +54,6 @@ util_keylet(
|
||||
uint32_t e,
|
||||
uint32_t f);
|
||||
|
||||
// STO
|
||||
|
||||
extern int64_t
|
||||
sto_validate(uint32_t tread_ptr, uint32_t tread_len);
|
||||
|
||||
@@ -85,8 +81,6 @@ sto_erase(
|
||||
uint32_t read_len,
|
||||
uint32_t field_id);
|
||||
|
||||
// EMITTED TXN
|
||||
|
||||
extern int64_t
|
||||
etxn_burden(void);
|
||||
|
||||
@@ -112,8 +106,6 @@ emit(
|
||||
uint32_t read_ptr,
|
||||
uint32_t read_len);
|
||||
|
||||
// FLOAT
|
||||
|
||||
extern int64_t
|
||||
float_set(int32_t exponent, int64_t mantissa);
|
||||
|
||||
@@ -174,8 +166,6 @@ float_log(int64_t float1);
|
||||
extern int64_t
|
||||
float_root(int64_t float1, uint32_t n);
|
||||
|
||||
// LEDGER
|
||||
|
||||
extern int64_t
|
||||
fee_base(void);
|
||||
|
||||
@@ -200,8 +190,6 @@ ledger_keylet(
|
||||
uint32_t hread_ptr,
|
||||
uint32_t hread_len);
|
||||
|
||||
// HOOK
|
||||
|
||||
extern int64_t
|
||||
hook_account(uint32_t write_ptr, uint32_t write_len);
|
||||
|
||||
@@ -233,8 +221,6 @@ hook_skip(uint32_t read_ptr, uint32_t read_len, uint32_t flags);
|
||||
extern int64_t
|
||||
hook_pos(void);
|
||||
|
||||
// SLOT
|
||||
|
||||
extern int64_t
|
||||
slot(uint32_t write_ptr, uint32_t write_len, uint32_t slot);
|
||||
|
||||
@@ -262,8 +248,6 @@ slot_type(uint32_t slot_no, uint32_t flags);
|
||||
extern int64_t
|
||||
slot_float(uint32_t slot_no);
|
||||
|
||||
// STATE
|
||||
|
||||
extern int64_t
|
||||
state_set(
|
||||
uint32_t read_ptr,
|
||||
@@ -300,8 +284,6 @@ state_foreign(
|
||||
uint32_t aread_ptr,
|
||||
uint32_t aread_len);
|
||||
|
||||
// TRACE
|
||||
|
||||
extern int64_t
|
||||
trace(
|
||||
uint32_t mread_ptr,
|
||||
@@ -316,8 +298,6 @@ trace_num(uint32_t read_ptr, uint32_t read_len, int64_t number);
|
||||
extern int64_t
|
||||
trace_float(uint32_t read_ptr, uint32_t read_len, int64_t float1);
|
||||
|
||||
// OTXN
|
||||
|
||||
extern int64_t
|
||||
otxn_burden(void);
|
||||
|
||||
@@ -346,9 +326,8 @@ otxn_param(
|
||||
extern int64_t
|
||||
meta_slot(uint32_t slot_no);
|
||||
|
||||
// featureHooks1
|
||||
|
||||
extern int64_t xpop_slot(uint32_t, uint32_t);
|
||||
extern int64_t
|
||||
xpop_slot(uint32_t slot_no_tx, uint32_t slot_no_meta);
|
||||
|
||||
#define HOOK_EXTERN
|
||||
#endif // HOOK_EXTERN
|
||||
|
||||
58
hook/generate_error.sh
Executable file
58
hook/generate_error.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
SCRIPT_DIR=$(cd "$SCRIPT_DIR" && pwd)
|
||||
|
||||
ENUM_FILE="$SCRIPT_DIR/../src/ripple/app/hook/Enum.h"
|
||||
|
||||
echo '// For documentation please see: https://xrpl-hooks.readme.io/reference/'
|
||||
echo '// Generated using generate_error.sh'
|
||||
echo '#ifndef HOOK_ERROR_CODES'
|
||||
sed -n '/enum hook_return_code/,/};/p' "$ENUM_FILE" |
|
||||
awk '
|
||||
function ltrim(s) { sub(/^[[:space:]]+/, "", s); return s }
|
||||
function rtrim(s) { sub(/[[:space:]]+$/, "", s); return s }
|
||||
function trim(s) { return rtrim(ltrim(s)) }
|
||||
function emit(entry) {
|
||||
entry = trim(entry)
|
||||
if (entry == "")
|
||||
return
|
||||
gsub(/,[[:space:]]*$/, "", entry)
|
||||
split(entry, parts, "=")
|
||||
if (length(parts) < 2)
|
||||
return
|
||||
name = trim(parts[1])
|
||||
value = trim(parts[2])
|
||||
if (name == "" || value == "")
|
||||
return
|
||||
printf "#define %s %s\n", name, value
|
||||
}
|
||||
|
||||
{
|
||||
line = $0
|
||||
if (line ~ /enum[[:space:]]+hook_return_code/)
|
||||
next
|
||||
if (line ~ /^[[:space:]]*\{/)
|
||||
next
|
||||
|
||||
sub(/\/\/.*$/, "", line)
|
||||
|
||||
if (line ~ /^[[:space:]]*\};/) {
|
||||
emit(buffer)
|
||||
exit
|
||||
}
|
||||
|
||||
if (line ~ /^[[:space:]]*$/)
|
||||
next
|
||||
|
||||
buffer = buffer line " "
|
||||
|
||||
if (line ~ /,[[:space:]]*$/) {
|
||||
emit(buffer)
|
||||
buffer = ""
|
||||
}
|
||||
}
|
||||
'
|
||||
echo '#define HOOK_ERROR_CODES'
|
||||
echo '#endif //HOOK_ERROR_CODES'
|
||||
145
hook/generate_extern.sh
Executable file
145
hook/generate_extern.sh
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
SCRIPT_DIR=$(cd "$SCRIPT_DIR" && pwd)
|
||||
|
||||
APPLY_HOOK="$SCRIPT_DIR/../src/ripple/app/hook/applyHook.h"
|
||||
|
||||
{
|
||||
echo '// For documentation please see: https://xrpl-hooks.readme.io/reference/'
|
||||
echo '// Generated using generate_extern.sh'
|
||||
echo '#include <stdint.h>'
|
||||
echo '#ifndef HOOK_EXTERN'
|
||||
echo
|
||||
awk '
|
||||
function trim(s) {
|
||||
sub(/^[[:space:]]+/, "", s);
|
||||
sub(/[[:space:]]+$/, "", s);
|
||||
return s;
|
||||
}
|
||||
|
||||
function emit(ret, name, argc, argt, argn) {
|
||||
attr = (name == "_g") ? " __attribute__((noduplicate))" : "";
|
||||
if (!first)
|
||||
printf("\n");
|
||||
first = 0;
|
||||
printf("extern %s%s\n", ret, attr);
|
||||
if (argc == 0) {
|
||||
printf("%s(void);\n", name);
|
||||
return;
|
||||
}
|
||||
if (argc <= 3) {
|
||||
line = argt[1] " " argn[1];
|
||||
for (i = 2; i <= argc; ++i)
|
||||
line = line ", " argt[i] " " argn[i];
|
||||
printf("%s(%s);\n", name, line);
|
||||
return;
|
||||
}
|
||||
printf("%s(\n", name);
|
||||
for (i = 1; i <= argc; ++i) {
|
||||
sep = (i < argc) ? "," : ");";
|
||||
printf(" %s %s%s\n", argt[i], argn[i], sep);
|
||||
}
|
||||
}
|
||||
|
||||
function process(buffer, kind, payload, parts, n, i, arg, tokens, argc, argt, argn) {
|
||||
if (kind == "func")
|
||||
sub(/^DECLARE_HOOK_FUNCTION[[:space:]]*\(/, "", buffer);
|
||||
else
|
||||
sub(/^DECLARE_HOOK_FUNCNARG[[:space:]]*\(/, "", buffer);
|
||||
buffer = trim(buffer);
|
||||
sub(/\)[[:space:]]*$/, "", buffer);
|
||||
n = split(buffer, parts, ",");
|
||||
for (i = 1; i <= n; ++i)
|
||||
parts[i] = trim(parts[i]);
|
||||
ret = parts[1];
|
||||
name = parts[2];
|
||||
argc = 0;
|
||||
delete argt;
|
||||
delete argn;
|
||||
for (i = 3; i <= n; ++i) {
|
||||
arg = parts[i];
|
||||
if (arg == "")
|
||||
continue;
|
||||
split(arg, tokens, /[[:space:]]+/);
|
||||
if (length(tokens) < 2)
|
||||
continue;
|
||||
++argc;
|
||||
argt[argc] = tokens[1];
|
||||
argn[argc] = tokens[2];
|
||||
}
|
||||
emit(ret, name, argc, argt, argn);
|
||||
}
|
||||
|
||||
BEGIN {
|
||||
first = 1;
|
||||
in_block = 0;
|
||||
in_macro = 0;
|
||||
}
|
||||
|
||||
{
|
||||
line = $0;
|
||||
if (in_block) {
|
||||
if (line ~ /\*\//) {
|
||||
sub(/.*\*\//, "", line);
|
||||
in_block = 0;
|
||||
}
|
||||
else
|
||||
next;
|
||||
}
|
||||
while (line ~ /\/\*/) {
|
||||
if (line ~ /\/\*.*\*\//) {
|
||||
gsub(/\/\*.*\*\//, "", line);
|
||||
}
|
||||
else {
|
||||
sub(/\/\*.*/, "", line);
|
||||
in_block = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
sub(/\/\/.*$/, "", line);
|
||||
line = trim(line);
|
||||
if (line == "")
|
||||
next;
|
||||
|
||||
if (!in_macro && line ~ /^DECLARE_HOOK_FUNCTION\(/) {
|
||||
buffer = line;
|
||||
kind = "func";
|
||||
if (line ~ /\);[[:space:]]*$/) {
|
||||
sub(/\);[[:space:]]*$/, "", buffer);
|
||||
process(buffer, kind);
|
||||
}
|
||||
else
|
||||
in_macro = 1;
|
||||
next;
|
||||
}
|
||||
if (!in_macro && line ~ /^DECLARE_HOOK_FUNCNARG\(/) {
|
||||
buffer = line;
|
||||
kind = "narg";
|
||||
if (line ~ /\);[[:space:]]*$/) {
|
||||
sub(/\);[[:space:]]*$/, "", buffer);
|
||||
process(buffer, kind);
|
||||
}
|
||||
else
|
||||
in_macro = 1;
|
||||
next;
|
||||
}
|
||||
if (in_macro) {
|
||||
buffer = buffer " " line;
|
||||
if (line ~ /\);[[:space:]]*$/) {
|
||||
sub(/\);[[:space:]]*$/, "", buffer);
|
||||
process(buffer, kind);
|
||||
in_macro = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
END {
|
||||
printf("\n");
|
||||
}
|
||||
' "$APPLY_HOOK"
|
||||
|
||||
echo '#define HOOK_EXTERN'
|
||||
echo '#endif // HOOK_EXTERN'
|
||||
}
|
||||
@@ -1,8 +1,13 @@
|
||||
#/bin/bash
|
||||
RIPPLED_ROOT="../src/ripple"
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
SCRIPT_DIR=$(cd "$SCRIPT_DIR" && pwd)
|
||||
|
||||
RIPPLED_ROOT="$SCRIPT_DIR/../src/ripple"
|
||||
echo '// For documentation please see: https://xrpl-hooks.readme.io/reference/'
|
||||
echo '// Generated using generate_sfcodes.sh'
|
||||
cat $RIPPLED_ROOT/protocol/impl/SField.cpp | grep -E '^CONSTRUCT_' |
|
||||
cat "$RIPPLED_ROOT/protocol/impl/SField.cpp" | grep -E '^CONSTRUCT_' |
|
||||
sed 's/UINT16,/1,/g' |
|
||||
sed 's/UINT32,/2,/g' |
|
||||
sed 's/UINT64,/3,/g' |
|
||||
|
||||
38
hook/generate_tts.sh
Executable file
38
hook/generate_tts.sh
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
SCRIPT_DIR=$(cd "$SCRIPT_DIR" && pwd)
|
||||
|
||||
RIPPLED_ROOT="$SCRIPT_DIR/../src/ripple"
|
||||
TX_FORMATS="$RIPPLED_ROOT/protocol/TxFormats.h"
|
||||
|
||||
echo '// For documentation please see: https://xrpl-hooks.readme.io/reference/'
|
||||
echo '// Generated using generate_tts.sh'
|
||||
sed -n '/enum TxType/,/};/p' "$TX_FORMATS" |
|
||||
awk '
|
||||
function ltrim(s) { sub(/^[[:space:]]+/, "", s); return s }
|
||||
function rtrim(s) { sub(/[[:space:]]+$/, "", s); return s }
|
||||
function trim(s) { return rtrim(ltrim(s)) }
|
||||
|
||||
/^[ \t]*tt[A-Z0-9_]+/ {
|
||||
line = $0
|
||||
deprecated = (line ~ /\[\[deprecated/)
|
||||
gsub(/\[\[deprecated[^]]*\]\]/, "", line)
|
||||
sub(/\/\/.*$/, "", line)
|
||||
gsub(/,[[:space:]]*$/, "", line)
|
||||
|
||||
split(line, parts, "=")
|
||||
if (length(parts) < 2)
|
||||
next
|
||||
|
||||
name = trim(parts[1])
|
||||
value = trim(parts[2])
|
||||
if (name == "" || value == "")
|
||||
next
|
||||
|
||||
prefix = deprecated ? "// " : ""
|
||||
postfix = deprecated ? " // deprecated" : ""
|
||||
printf "%s#define %s %s%s\n", prefix, name, value, postfix
|
||||
}
|
||||
'
|
||||
@@ -37,6 +37,7 @@
|
||||
#define KEYLET_NFT_OFFER 23
|
||||
#define KEYLET_HOOK_DEFINITION 24
|
||||
#define KEYLET_HOOK_STATE_DIR 25
|
||||
#define KEYLET_CRON 26
|
||||
|
||||
#define COMPARE_EQUAL 1U
|
||||
#define COMPARE_LESS 2U
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#define sfHookEmitCount ((1U << 16U) + 18U)
|
||||
#define sfHookExecutionIndex ((1U << 16U) + 19U)
|
||||
#define sfHookApiVersion ((1U << 16U) + 20U)
|
||||
#define sfHookStateScale ((1U << 16U) + 21U)
|
||||
#define sfNetworkID ((2U << 16U) + 1U)
|
||||
#define sfFlags ((2U << 16U) + 2U)
|
||||
#define sfSourceTag ((2U << 16U) + 3U)
|
||||
@@ -62,6 +63,9 @@
|
||||
#define sfEmitGeneration ((2U << 16U) + 46U)
|
||||
#define sfLockCount ((2U << 16U) + 49U)
|
||||
#define sfFirstNFTokenSequence ((2U << 16U) + 50U)
|
||||
#define sfStartTime ((2U << 16U) + 93U)
|
||||
#define sfRepeatCount ((2U << 16U) + 94U)
|
||||
#define sfDelaySeconds ((2U << 16U) + 95U)
|
||||
#define sfXahauActivationLgrSeq ((2U << 16U) + 96U)
|
||||
#define sfImportSequence ((2U << 16U) + 97U)
|
||||
#define sfRewardTime ((2U << 16U) + 98U)
|
||||
@@ -105,6 +109,7 @@
|
||||
#define sfEmitParentTxnID ((5U << 16U) + 11U)
|
||||
#define sfEmitNonce ((5U << 16U) + 12U)
|
||||
#define sfEmitHookHash ((5U << 16U) + 13U)
|
||||
#define sfObjectID ((5U << 16U) + 14U)
|
||||
#define sfBookDirectory ((5U << 16U) + 16U)
|
||||
#define sfInvoiceID ((5U << 16U) + 17U)
|
||||
#define sfNickname ((5U << 16U) + 18U)
|
||||
@@ -129,6 +134,8 @@
|
||||
#define sfGovernanceFlags ((5U << 16U) + 99U)
|
||||
#define sfGovernanceMarks ((5U << 16U) + 98U)
|
||||
#define sfEmittedTxnID ((5U << 16U) + 97U)
|
||||
#define sfHookCanEmit ((5U << 16U) + 96U)
|
||||
#define sfCron ((5U << 16U) + 95U)
|
||||
#define sfAmount ((6U << 16U) + 1U)
|
||||
#define sfBalance ((6U << 16U) + 2U)
|
||||
#define sfLimitAmount ((6U << 16U) + 3U)
|
||||
@@ -173,6 +180,8 @@
|
||||
#define sfHookParameterName ((7U << 16U) + 24U)
|
||||
#define sfHookParameterValue ((7U << 16U) + 25U)
|
||||
#define sfBlob ((7U << 16U) + 26U)
|
||||
#define sfRemarkValue ((7U << 16U) + 98U)
|
||||
#define sfRemarkName ((7U << 16U) + 99U)
|
||||
#define sfAccount ((8U << 16U) + 1U)
|
||||
#define sfOwner ((8U << 16U) + 2U)
|
||||
#define sfDestination ((8U << 16U) + 3U)
|
||||
@@ -212,6 +221,7 @@
|
||||
#define sfHookDefinition ((14U << 16U) + 22U)
|
||||
#define sfHookParameter ((14U << 16U) + 23U)
|
||||
#define sfHookGrant ((14U << 16U) + 24U)
|
||||
#define sfRemark ((14U << 16U) + 97U)
|
||||
#define sfGenesisMint ((14U << 16U) + 96U)
|
||||
#define sfActiveValidator ((14U << 16U) + 95U)
|
||||
#define sfImportVLKey ((14U << 16U) + 94U)
|
||||
@@ -232,8 +242,9 @@
|
||||
#define sfHookExecutions ((15U << 16U) + 18U)
|
||||
#define sfHookParameters ((15U << 16U) + 19U)
|
||||
#define sfHookGrants ((15U << 16U) + 20U)
|
||||
#define sfRemarks ((15U << 16U) + 97U)
|
||||
#define sfGenesisMints ((15U << 16U) + 96U)
|
||||
#define sfActiveValidators ((15U << 16U) + 95U)
|
||||
#define sfImportVLKeys ((15U << 16U) + 94U)
|
||||
#define sfHookEmissions ((15U << 16U) + 93U)
|
||||
#define sfAmounts ((15U << 16U) + 92U)
|
||||
#define sfAmounts ((15U << 16U) + 92U)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// For documentation please see: https://xrpl-hooks.readme.io/reference/
|
||||
// Generated using generate_tts.sh
|
||||
#define ttPAYMENT 0
|
||||
#define ttESCROW_CREATE 1
|
||||
#define ttESCROW_FINISH 2
|
||||
@@ -8,6 +9,7 @@
|
||||
// #define ttNICKNAME_SET 6 // deprecated
|
||||
#define ttOFFER_CREATE 7
|
||||
#define ttOFFER_CANCEL 8
|
||||
// #define ttCONTRACT 9 // deprecated
|
||||
#define ttTICKET_CREATE 10
|
||||
// #define ttSPINAL_TAP 11 // deprecated
|
||||
#define ttSIGNER_LIST_SET 12
|
||||
@@ -26,11 +28,15 @@
|
||||
#define ttNFTOKEN_CREATE_OFFER 27
|
||||
#define ttNFTOKEN_CANCEL_OFFER 28
|
||||
#define ttNFTOKEN_ACCEPT_OFFER 29
|
||||
#define ttCLAWBACK 30
|
||||
#define ttURITOKEN_MINT 45
|
||||
#define ttURITOKEN_BURN 46
|
||||
#define ttURITOKEN_BUY 47
|
||||
#define ttURITOKEN_CREATE_SELL_OFFER 48
|
||||
#define ttURITOKEN_CANCEL_SELL_OFFER 49
|
||||
#define ttCRON 92
|
||||
#define ttCRON_SET 93
|
||||
#define ttREMARKS_SET 94
|
||||
#define ttREMIT 95
|
||||
#define ttGENESIS_MINT 96
|
||||
#define ttIMPORT 97
|
||||
@@ -40,4 +46,4 @@
|
||||
#define ttFEE 101
|
||||
#define ttUNL_MODIFY 102
|
||||
#define ttEMIT_FAILURE 103
|
||||
#define ttUNL_REPORT 104
|
||||
#define ttUNL_REPORT 104
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Mapping of keylet functions to their specific HashContext classifiers
|
||||
KEYLET_CLASSIFIERS = {
|
||||
'account': 'KEYLET_ACCOUNT',
|
||||
'amendments': 'KEYLET_AMENDMENTS',
|
||||
'book': 'KEYLET_BOOK',
|
||||
'check': 'KEYLET_CHECK',
|
||||
'child': 'KEYLET_CHILD',
|
||||
'depositPreauth': 'KEYLET_DEPOSIT_PREAUTH',
|
||||
'emittedDir': 'KEYLET_EMITTED_DIR',
|
||||
'emittedTxn': 'KEYLET_EMITTED_TXN',
|
||||
'escrow': 'KEYLET_ESCROW',
|
||||
'fees': 'KEYLET_FEES',
|
||||
'hook': 'KEYLET_HOOK',
|
||||
'hookDefinition': 'KEYLET_HOOK_DEFINITION',
|
||||
'hookState': 'KEYLET_HOOK_STATE',
|
||||
'hookStateDir': 'KEYLET_HOOK_STATE_DIR',
|
||||
'import_vlseq': 'KEYLET_IMPORT_VLSEQ',
|
||||
'line': 'KEYLET_TRUSTLINE',
|
||||
'negativeUNL': 'KEYLET_NEGATIVE_UNL',
|
||||
'nft_buys': 'KEYLET_NFT_BUYS',
|
||||
'nft_sells': 'KEYLET_NFT_SELLS',
|
||||
'nftoffer': 'KEYLET_NFT_OFFER',
|
||||
'nftpage': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_max': 'KEYLET_NFT_PAGE',
|
||||
'nftpage_min': 'KEYLET_NFT_PAGE',
|
||||
'offer': 'KEYLET_OFFER',
|
||||
'ownerDir': 'KEYLET_OWNER_DIR',
|
||||
'page': 'KEYLET_DIR_PAGE',
|
||||
'payChan': 'KEYLET_PAYCHAN',
|
||||
'signers': 'KEYLET_SIGNERS',
|
||||
'skip': 'KEYLET_SKIP_LIST',
|
||||
'ticket': 'KEYLET_TICKET',
|
||||
'UNLReport': 'KEYLET_UNL_REPORT',
|
||||
'unchecked': 'KEYLET_UNCHECKED',
|
||||
'uritoken': 'KEYLET_URI_TOKEN',
|
||||
}
|
||||
|
||||
def add_classifiers_to_digest_h(digest_h_path: str, dry_run: bool = True) -> bool:
|
||||
"""Add the new KEYLET_ classifiers to digest.h if they don't exist."""
|
||||
|
||||
# Read the file
|
||||
with open(digest_h_path, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Check if we already have KEYLET_ classifiers
|
||||
if 'KEYLET_ACCOUNT' in content:
|
||||
print("KEYLET classifiers already exist in digest.h")
|
||||
return True
|
||||
|
||||
# Find the end of the HashContext enum (before the closing brace and semicolon)
|
||||
pattern = r'(enum HashContext[^{]*\{[^}]*)(HOOK_DEFINITION\s*=\s*\d+,?)([^}]*\};)'
|
||||
|
||||
match = re.search(pattern, content, re.DOTALL)
|
||||
if not match:
|
||||
print("ERROR: Could not find HashContext enum in digest.h")
|
||||
return False
|
||||
|
||||
# Build the new classifiers text
|
||||
new_classifiers = []
|
||||
|
||||
# Get the last number used (HOOK_DEFINITION = 17)
|
||||
last_num = 17
|
||||
|
||||
# Add all KEYLET classifiers
|
||||
unique_classifiers = sorted(set(KEYLET_CLASSIFIERS.values()))
|
||||
for i, classifier in enumerate(unique_classifiers, start=1):
|
||||
new_classifiers.append(f" {classifier} = {last_num + i},")
|
||||
|
||||
# Join them with newlines
|
||||
new_text = '\n'.join(new_classifiers)
|
||||
|
||||
# Create the replacement
|
||||
replacement = match.group(1) + match.group(2) + ',\n\n // Keylet-specific hash contexts\n' + new_text + match.group(3)
|
||||
|
||||
# Replace in content
|
||||
new_content = content[:match.start()] + replacement + content[match.end():]
|
||||
|
||||
if dry_run:
|
||||
print("=" * 80)
|
||||
print("WOULD ADD TO digest.h:")
|
||||
print("=" * 80)
|
||||
print(new_text)
|
||||
print("=" * 80)
|
||||
else:
|
||||
with open(digest_h_path, 'w') as f:
|
||||
f.write(new_content)
|
||||
print(f"Updated {digest_h_path} with KEYLET classifiers")
|
||||
|
||||
return True
|
||||
|
||||
def migrate_keylet_call(content: str, func_name: str, dry_run: bool = True) -> Tuple[str, int]:
|
||||
"""
|
||||
Migrate keylet calls from single ledger_index to ledger_index + classifier.
|
||||
Returns (modified_content, number_of_replacements)
|
||||
"""
|
||||
|
||||
classifier = KEYLET_CLASSIFIERS.get(func_name)
|
||||
if not classifier:
|
||||
print(f"WARNING: No classifier mapping for keylet::{func_name}")
|
||||
return content, 0
|
||||
|
||||
# Pattern to match keylet::<func>(hash_options{<ledger_seq>}, ...)
|
||||
# where ledger_seq doesn't already contain a comma (no classifier yet)
|
||||
pattern = re.compile(
|
||||
rf'keylet::{re.escape(func_name)}\s*\(\s*hash_options\s*\{{\s*([^,}}]+)\s*\}}',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
count = 0
|
||||
|
||||
def replacer(match):
|
||||
nonlocal count
|
||||
ledger_seq = match.group(1).strip()
|
||||
# Check if it already has a classifier (contains comma)
|
||||
if ',' in ledger_seq:
|
||||
return match.group(0) # Already migrated
|
||||
|
||||
count += 1
|
||||
# Add the classifier
|
||||
return f'keylet::{func_name}(hash_options{{{ledger_seq}, {classifier}}}'
|
||||
|
||||
new_content = pattern.sub(replacer, content)
|
||||
|
||||
return new_content, count
|
||||
|
||||
def process_file(filepath: str, dry_run: bool = True) -> int:
|
||||
"""Process a single file. Returns number of replacements made."""
|
||||
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
original_content = f.read()
|
||||
|
||||
content = original_content
|
||||
total_replacements = 0
|
||||
replacements_by_func = {}
|
||||
|
||||
# Process each keylet function
|
||||
for func_name in KEYLET_CLASSIFIERS.keys():
|
||||
new_content, count = migrate_keylet_call(content, func_name, dry_run)
|
||||
if count > 0:
|
||||
content = new_content
|
||||
total_replacements += count
|
||||
replacements_by_func[func_name] = count
|
||||
|
||||
if total_replacements > 0:
|
||||
if dry_run:
|
||||
print(f"Would modify {filepath}: {total_replacements} replacements")
|
||||
for func, count in sorted(replacements_by_func.items()):
|
||||
print(f" - keylet::{func}: {count}")
|
||||
else:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
print(f"Modified {filepath}: {total_replacements} replacements")
|
||||
for func, count in sorted(replacements_by_func.items()):
|
||||
print(f" - keylet::{func}: {count}")
|
||||
|
||||
return total_replacements
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Migrate keylet calls to use HashContext classifiers'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
default=True,
|
||||
help='Show what would be changed without modifying files (default: True)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--apply',
|
||||
action='store_true',
|
||||
help='Actually apply the changes (disables dry-run)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--file',
|
||||
help='Process a specific file only'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--add-classifiers',
|
||||
action='store_true',
|
||||
help='Add KEYLET_ classifiers to digest.h'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.apply:
|
||||
args.dry_run = False
|
||||
|
||||
project_root = "/Users/nicholasdudfield/projects/xahaud-worktrees/xahaud-map-stats-rpc"
|
||||
|
||||
# First, optionally add classifiers to digest.h
|
||||
if args.add_classifiers:
|
||||
digest_h = os.path.join(project_root, "src/ripple/protocol/digest.h")
|
||||
if not add_classifiers_to_digest_h(digest_h, args.dry_run):
|
||||
return 1
|
||||
print()
|
||||
|
||||
# Process files
|
||||
if args.file:
|
||||
# Process single file
|
||||
filepath = os.path.join(project_root, args.file)
|
||||
if not os.path.exists(filepath):
|
||||
print(f"ERROR: File not found: {filepath}")
|
||||
return 1
|
||||
|
||||
process_file(filepath, args.dry_run)
|
||||
else:
|
||||
# Process all files
|
||||
total_files = 0
|
||||
total_replacements = 0
|
||||
|
||||
print(f"{'DRY RUN: ' if args.dry_run else ''}Processing files in {project_root}/src/ripple")
|
||||
print("=" * 80)
|
||||
|
||||
for root, dirs, files in os.walk(Path(project_root) / "src" / "ripple"):
|
||||
dirs[:] = [d for d in dirs if d not in ['.git', 'build', '__pycache__']]
|
||||
|
||||
for file in files:
|
||||
if file.endswith(('.cpp', '.h', '.hpp')):
|
||||
filepath = os.path.join(root, file)
|
||||
count = process_file(filepath, args.dry_run)
|
||||
if count > 0:
|
||||
total_files += 1
|
||||
total_replacements += count
|
||||
|
||||
print("=" * 80)
|
||||
print(f"{'Would modify' if args.dry_run else 'Modified'} {total_files} files")
|
||||
print(f"Total replacements: {total_replacements}")
|
||||
|
||||
if args.dry_run:
|
||||
print("\nTo apply these changes, run with --apply flag:")
|
||||
print(f" python3 {sys.argv[0]} --apply")
|
||||
print("\nTo first add classifiers to digest.h:")
|
||||
print(f" python3 {sys.argv[0]} --add-classifiers --apply")
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,8 +1,9 @@
|
||||
#!/bin/bash -u
|
||||
#!/bin/bash
|
||||
# We use set -e and bash with -u to bail on first non zero exit code of any
|
||||
# processes launched or upon any unbound variable.
|
||||
# We use set -x to print commands before running them to help with
|
||||
# debugging.
|
||||
|
||||
set -ex
|
||||
|
||||
echo "START BUILDING (HOST)"
|
||||
@@ -14,7 +15,7 @@ BUILD_CORES=$(echo "scale=0 ; `nproc` / 1.337" | bc)
|
||||
|
||||
if [[ "$GITHUB_REPOSITORY" == "" ]]; then
|
||||
#Default
|
||||
BUILD_CORES=8
|
||||
BUILD_CORES=${BUILD_CORES:-8}
|
||||
fi
|
||||
|
||||
# Ensure still works outside of GH Actions by setting these to /dev/null
|
||||
@@ -46,29 +47,192 @@ fi
|
||||
|
||||
STATIC_CONTAINER=$(docker ps -a | grep $CONTAINER_NAME |wc -l)
|
||||
|
||||
CACHE_VOLUME_NAME="xahau-release-builder-cache"
|
||||
|
||||
# if [[ "$STATIC_CONTAINER" -gt "0" && "$GITHUB_REPOSITORY" != "" ]]; then
|
||||
if false; then
|
||||
echo "Static container, execute in static container to have max. cache"
|
||||
docker start $CONTAINER_NAME
|
||||
docker exec -i $CONTAINER_NAME /hbb_exe/activate-exec bash -x /io/build-core.sh "$GITHUB_REPOSITORY" "$GITHUB_SHA" "$BUILD_CORES" "$GITHUB_RUN_NUMBER"
|
||||
docker exec -i $CONTAINER_NAME /hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && bash -x /io/build-core.sh '$GITHUB_REPOSITORY' '$GITHUB_SHA' '$BUILD_CORES' '$GITHUB_RUN_NUMBER'"
|
||||
docker stop $CONTAINER_NAME
|
||||
else
|
||||
echo "No static container, build on temp container"
|
||||
rm -rf release-build;
|
||||
mkdir -p release-build;
|
||||
|
||||
docker volume create $CACHE_VOLUME_NAME
|
||||
|
||||
# Create inline Dockerfile with environment setup for build-full.sh
|
||||
DOCKERFILE_CONTENT=$(cat <<'DOCKERFILE_EOF'
|
||||
FROM ghcr.io/phusion/holy-build-box:4.0.1-amd64
|
||||
|
||||
ARG BUILD_CORES=8
|
||||
|
||||
# Enable repositories and install dependencies
|
||||
RUN /hbb_exe/activate-exec bash -c "dnf install -y epel-release && \
|
||||
dnf config-manager --set-enabled powertools || dnf config-manager --set-enabled crb && \
|
||||
dnf install -y --enablerepo=devel \
|
||||
wget git \
|
||||
gcc-toolset-11-gcc-c++ gcc-toolset-11-binutils gcc-toolset-11-libatomic-devel \
|
||||
lz4 lz4-devel \
|
||||
ncurses-static ncurses-devel \
|
||||
snappy snappy-devel \
|
||||
zlib zlib-devel zlib-static \
|
||||
libasan \
|
||||
python3 python3-pip \
|
||||
ccache \
|
||||
ninja-build \
|
||||
patch \
|
||||
glibc-devel glibc-static \
|
||||
libxml2-devel \
|
||||
autoconf \
|
||||
automake \
|
||||
texinfo \
|
||||
libtool \
|
||||
llvm14-static llvm14-devel && \
|
||||
dnf clean all"
|
||||
|
||||
# Install Conan 2 and CMake
|
||||
RUN /hbb_exe/activate-exec pip3 install "conan>=2.0,<3.0" && \
|
||||
/hbb_exe/activate-exec wget -q https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz -O cmake.tar.gz && \
|
||||
mkdir cmake && \
|
||||
tar -xzf cmake.tar.gz --strip-components=1 -C cmake && \
|
||||
rm cmake.tar.gz
|
||||
|
||||
# Dual Boost configuration in HBB environment:
|
||||
# - Manual Boost in /usr/local (minimal: for WasmEdge which is pre-built in Docker)
|
||||
# - Conan Boost (full: for the application and all dependencies via toolchain)
|
||||
#
|
||||
# Install minimal Boost 1.86.0 for WasmEdge only (filesystem and its dependencies)
|
||||
# The main application will use Conan-provided Boost for all other components
|
||||
# IMPORTANT: Understanding Boost linking options:
|
||||
# - link=static: Creates static Boost libraries (.a files) instead of shared (.so files)
|
||||
# - runtime-link=shared: Links Boost libraries against shared libc (glibc)
|
||||
# WasmEdge only needs boost::filesystem and boost::system
|
||||
RUN /hbb_exe/activate-exec bash -c "echo 'Boost cache bust: v5-minimal' && \
|
||||
rm -rf /usr/local/lib/libboost* /usr/local/include/boost && \
|
||||
cd /tmp && \
|
||||
wget -q https://archives.boost.io/release/1.86.0/source/boost_1_86_0.tar.gz -O boost.tar.gz && \
|
||||
mkdir boost && \
|
||||
tar -xzf boost.tar.gz --strip-components=1 -C boost && \
|
||||
cd boost && \
|
||||
./bootstrap.sh && \
|
||||
./b2 install \
|
||||
link=static runtime-link=shared -j${BUILD_CORES} \
|
||||
--with-filesystem --with-system && \
|
||||
cd /tmp && \
|
||||
rm -rf boost boost.tar.gz"
|
||||
|
||||
ENV CMAKE_EXE_LINKER_FLAGS="-static-libstdc++"
|
||||
|
||||
ENV LLVM_DIR=/usr/lib64/llvm14/lib/cmake/llvm
|
||||
ENV WasmEdge_LIB=/usr/local/lib64/libwasmedge.a
|
||||
|
||||
ENV CC='ccache gcc'
|
||||
ENV CXX='ccache g++'
|
||||
|
||||
# Install LLD
|
||||
RUN /hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && \
|
||||
cd /tmp && \
|
||||
wget -q https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.3/lld-14.0.3.src.tar.xz && \
|
||||
wget -q https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.3/libunwind-14.0.3.src.tar.xz && \
|
||||
tar -xf lld-14.0.3.src.tar.xz && \
|
||||
tar -xf libunwind-14.0.3.src.tar.xz && \
|
||||
cp -r libunwind-14.0.3.src/include libunwind-14.0.3.src/src lld-14.0.3.src/ && \
|
||||
cd lld-14.0.3.src && \
|
||||
mkdir -p build && cd build && \
|
||||
cmake .. \
|
||||
-DLLVM_LIBRARY_DIR=/usr/lib64/llvm14/lib/ \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/lib64/llvm14/ \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_EXE_LINKER_FLAGS=\"\$CMAKE_EXE_LINKER_FLAGS\" && \
|
||||
make -j${BUILD_CORES} install && \
|
||||
ln -s /usr/lib64/llvm14/lib/include/lld /usr/include/lld && \
|
||||
cp /usr/lib64/llvm14/lib/liblld*.a /usr/local/lib/ && \
|
||||
cd /tmp && rm -rf lld-* libunwind-*"
|
||||
|
||||
# Build and install WasmEdge (static version)
|
||||
# Note: Conan only provides WasmEdge with shared library linking.
|
||||
# For a fully static build, we need to manually install:
|
||||
# * Boost: Static C++ libraries for filesystem and system operations (built from source above)
|
||||
# * LLVM: Static LLVM libraries for WebAssembly compilation (installed via llvm14-static package)
|
||||
# * LLD: Static linker to produce the final static binary (built from source above)
|
||||
# These were installed above to enable WASMEDGE_LINK_LLVM_STATIC=ON
|
||||
RUN cd /tmp && \
|
||||
( wget -nc -q https://github.com/WasmEdge/WasmEdge/archive/refs/tags/0.11.2.zip; unzip -o 0.11.2.zip; ) && \
|
||||
cd WasmEdge-0.11.2 && \
|
||||
( mkdir -p build; echo "" ) && \
|
||||
cd build && \
|
||||
/hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && \
|
||||
ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ar /usr/bin/ar && \
|
||||
ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ranlib /usr/bin/ranlib && \
|
||||
echo '=== Binutils version check ===' && \
|
||||
ar --version | head -1 && \
|
||||
ranlib --version | head -1 && \
|
||||
cmake .. \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
|
||||
-DWASMEDGE_BUILD_SHARED_LIB=OFF \
|
||||
-DWASMEDGE_BUILD_STATIC_LIB=ON \
|
||||
-DWASMEDGE_BUILD_AOT_RUNTIME=ON \
|
||||
-DWASMEDGE_FORCE_DISABLE_LTO=ON \
|
||||
-DWASMEDGE_LINK_LLVM_STATIC=ON \
|
||||
-DWASMEDGE_BUILD_PLUGINS=OFF \
|
||||
-DWASMEDGE_LINK_TOOLS_STATIC=ON \
|
||||
-DBoost_NO_BOOST_CMAKE=ON \
|
||||
-DCMAKE_EXE_LINKER_FLAGS=\"\$CMAKE_EXE_LINKER_FLAGS\" \
|
||||
&& \
|
||||
make -j${BUILD_CORES} install" && \
|
||||
cp -r include/api/wasmedge /usr/include/ && \
|
||||
cd /tmp && rm -rf WasmEdge* 0.11.2.zip
|
||||
|
||||
# Set environment variables
|
||||
ENV PATH=/usr/local/bin:$PATH
|
||||
|
||||
# Configure ccache and Conan 2
|
||||
# NOTE: Using echo commands instead of heredocs because heredocs in Docker RUN commands are finnicky
|
||||
RUN /hbb_exe/activate-exec bash -c "ccache -M 10G && \
|
||||
ccache -o cache_dir=/cache/ccache && \
|
||||
ccache -o compiler_check=content && \
|
||||
mkdir -p ~/.conan2 /cache/conan2 /cache/conan2_download /cache/conan2_sources && \
|
||||
echo 'core.cache:storage_path=/cache/conan2' > ~/.conan2/global.conf && \
|
||||
echo 'core.download:download_cache=/cache/conan2_download' >> ~/.conan2/global.conf && \
|
||||
echo 'core.sources:download_cache=/cache/conan2_sources' >> ~/.conan2/global.conf && \
|
||||
conan profile detect --force && \
|
||||
echo '[settings]' > ~/.conan2/profiles/default && \
|
||||
echo 'arch=x86_64' >> ~/.conan2/profiles/default && \
|
||||
echo 'build_type=Release' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler=gcc' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler.cppstd=20' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler.libcxx=libstdc++11' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler.version=11' >> ~/.conan2/profiles/default && \
|
||||
echo 'os=Linux' >> ~/.conan2/profiles/default && \
|
||||
echo '' >> ~/.conan2/profiles/default && \
|
||||
echo '[conf]' >> ~/.conan2/profiles/default && \
|
||||
echo '# Force building from source for packages with binary compatibility issues' >> ~/.conan2/profiles/default && \
|
||||
echo '*:tools.system.package_manager:mode=build' >> ~/.conan2/profiles/default"
|
||||
|
||||
DOCKERFILE_EOF
|
||||
)
|
||||
|
||||
# Build custom Docker image
|
||||
IMAGE_NAME="xahaud-builder:latest"
|
||||
echo "Building custom Docker image with dependencies..."
|
||||
echo "$DOCKERFILE_CONTENT" | docker build --build-arg BUILD_CORES="$BUILD_CORES" -t "$IMAGE_NAME" - || exit 1
|
||||
|
||||
if [[ "$GITHUB_REPOSITORY" == "" ]]; then
|
||||
# Non GH, local building
|
||||
echo "Non-GH runner, local building, temp container"
|
||||
docker run -i --user 0:$(id -g) --rm -v /data/builds:/data/builds -v `pwd`:/io --network host ghcr.io/foobarwidget/holy-build-box-x64 /hbb_exe/activate-exec bash -x /io/build-full.sh "$GITHUB_REPOSITORY" "$GITHUB_SHA" "$BUILD_CORES" "$GITHUB_RUN_NUMBER"
|
||||
docker run -i --user 0:$(id -g) --rm -v /data/builds:/data/builds -v `pwd`:/io -v "$CACHE_VOLUME_NAME":/cache --network host "$IMAGE_NAME" /hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && bash -x /io/build-full.sh '$GITHUB_REPOSITORY' '$GITHUB_SHA' '$BUILD_CORES' '$GITHUB_RUN_NUMBER'"
|
||||
else
|
||||
# GH Action, runner
|
||||
echo "GH Action, runner, clean & re-create create persistent container"
|
||||
docker rm -f $CONTAINER_NAME
|
||||
echo "echo 'Stopping container: $CONTAINER_NAME'" >> "$JOB_CLEANUP_SCRIPT"
|
||||
echo "docker stop --time=15 \"$CONTAINER_NAME\" || echo 'Failed to stop container or container not running'" >> "$JOB_CLEANUP_SCRIPT"
|
||||
docker run -di --user 0:$(id -g) --name $CONTAINER_NAME -v /data/builds:/data/builds -v `pwd`:/io --network host ghcr.io/foobarwidget/holy-build-box-x64 /hbb_exe/activate-exec bash
|
||||
docker exec -i $CONTAINER_NAME /hbb_exe/activate-exec bash -x /io/build-full.sh "$GITHUB_REPOSITORY" "$GITHUB_SHA" "$BUILD_CORES" "$GITHUB_RUN_NUMBER"
|
||||
docker run -di --user 0:$(id -g) --name $CONTAINER_NAME -v /data/builds:/data/builds -v `pwd`:/io -v "$CACHE_VOLUME_NAME":/cache --network host "$IMAGE_NAME" /hbb_exe/activate-exec bash
|
||||
docker exec -i $CONTAINER_NAME /hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && bash -x /io/build-full.sh '$GITHUB_REPOSITORY' '$GITHUB_SHA' '$BUILD_CORES' '$GITHUB_RUN_NUMBER'"
|
||||
docker stop $CONTAINER_NAME
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -864,8 +864,7 @@ RCLConsensus::Adaptor::validate(
|
||||
auto const serialized = v->getSerialized();
|
||||
|
||||
// suppress it if we receive it
|
||||
app_.getHashRouter().addSuppression(
|
||||
sha512Half(hash_options{PEER_VALIDATION_HASH}, makeSlice(serialized)));
|
||||
app_.getHashRouter().addSuppression(sha512Half(makeSlice(serialized)));
|
||||
|
||||
handleNewValidation(app_, v, "local");
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user