mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-24 20:45:50 +00:00
Compare commits
7 Commits
actions-ca
...
nd-use-git
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
430155587e | ||
|
|
49c1e337e8 | ||
|
|
c5972875c4 | ||
|
|
f8bdb57f2e | ||
|
|
4a65401448 | ||
|
|
8bcebdea42 | ||
|
|
4cc63c028a |
68
.github/actions/xahau-ga-build/action.yml
vendored
68
.github/actions/xahau-ga-build/action.yml
vendored
@@ -59,12 +59,6 @@ inputs:
|
||||
description: 'How to check compiler for changes'
|
||||
required: false
|
||||
default: 'content'
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 cache storage'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 cache storage'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
@@ -77,37 +71,56 @@ runs:
|
||||
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
|
||||
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore ccache directory
|
||||
- name: Restore ccache directory for main branch
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
id: ccache-restore
|
||||
uses: ./.github/actions/xahau-ga-cache-restore
|
||||
with:
|
||||
path: ~/.ccache
|
||||
path: ~/.ccache-main
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
cache-type: ccache-main
|
||||
|
||||
- name: Restore ccache directory for current branch
|
||||
if: inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
|
||||
id: ccache-restore-current-branch
|
||||
uses: ./.github/actions/xahau-ga-cache-restore
|
||||
with:
|
||||
path: ~/.ccache-current
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
cache-type: ccache-current
|
||||
|
||||
- name: Configure ccache
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
# Use ccache's default cache_dir (~/.ccache) - don't override it
|
||||
# This avoids tilde expansion issues when setting it explicitly
|
||||
# Create cache directories
|
||||
mkdir -p ~/.ccache-main ~/.ccache-current
|
||||
|
||||
# Create cache directory using ccache's default
|
||||
mkdir -p ~/.ccache
|
||||
|
||||
# Configure ccache settings (but NOT cache_dir - use default)
|
||||
# This overwrites any cached config to ensure fresh configuration
|
||||
# Configure ccache settings AFTER cache restore (prevents stale cached config)
|
||||
ccache --set-config=max_size=${{ inputs.ccache_max_size }}
|
||||
ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
|
||||
ccache --set-config=compiler_check=${{ inputs.ccache_compiler_check }}
|
||||
|
||||
# Note: Not setting CCACHE_DIR - let ccache use its default (~/.ccache)
|
||||
# Determine if we're on the main branch
|
||||
if [ "${{ steps.safe-branch.outputs.name }}" = "${{ inputs.main_branch }}" ]; then
|
||||
# Main branch: use main branch cache only
|
||||
ccache --set-config=cache_dir="$HOME/.ccache-main"
|
||||
echo "CCACHE_DIR=$HOME/.ccache-main" >> $GITHUB_ENV
|
||||
echo "📦 Main branch: using ~/.ccache-main"
|
||||
else
|
||||
# Feature branch: use current branch cache with main as secondary (read-only fallback)
|
||||
ccache --set-config=cache_dir="$HOME/.ccache-current"
|
||||
ccache --set-config=secondary_storage="file:$HOME/.ccache-main"
|
||||
echo "CCACHE_DIR=$HOME/.ccache-current" >> $GITHUB_ENV
|
||||
echo "📦 Feature branch: using ~/.ccache-current with ~/.ccache-main as secondary"
|
||||
fi
|
||||
|
||||
# Print config for verification
|
||||
echo "=== ccache configuration ==="
|
||||
@@ -223,11 +236,16 @@ runs:
|
||||
shell: bash
|
||||
run: ccache -s
|
||||
|
||||
- name: Save ccache directory
|
||||
if: success() && inputs.ccache_enabled == 'true'
|
||||
uses: ./.github/actions/xahau-ga-cache-save
|
||||
- name: Save ccache directory for main branch
|
||||
if: success() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
path: ~/.ccache-main
|
||||
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
|
||||
|
||||
- name: Save ccache directory for current branch
|
||||
if: success() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.ccache-current
|
||||
key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }}
|
||||
|
||||
247
.github/actions/xahau-ga-cache-restore/action.yml
vendored
247
.github/actions/xahau-ga-cache-restore/action.yml
vendored
@@ -1,10 +1,9 @@
|
||||
name: 'Xahau Cache Restore (S3)'
|
||||
bump: 1
|
||||
description: 'Drop-in replacement for actions/cache/restore using S3 storage'
|
||||
name: 'Cache Restore'
|
||||
description: 'Restores cache with optional clearing based on commit message tags'
|
||||
|
||||
inputs:
|
||||
path:
|
||||
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
|
||||
description: 'A list of files, directories, and wildcard patterns to cache'
|
||||
required: true
|
||||
key:
|
||||
description: 'An explicit key for restoring the cache'
|
||||
@@ -13,14 +12,10 @@ inputs:
|
||||
description: 'An ordered list of prefix-matched keys to use for restoring stale cache if no cache hit occurred for key'
|
||||
required: false
|
||||
default: ''
|
||||
s3-bucket:
|
||||
description: 'S3 bucket name for cache storage'
|
||||
cache-type:
|
||||
description: 'Type of cache (for logging purposes, e.g., "ccache-main", "Conan")'
|
||||
required: false
|
||||
default: 'xahaud-github-actions-cache-niq'
|
||||
s3-region:
|
||||
description: 'S3 region'
|
||||
required: false
|
||||
default: 'us-east-1'
|
||||
default: 'cache'
|
||||
fail-on-cache-miss:
|
||||
description: 'Fail the workflow if cache entry is not found'
|
||||
required: false
|
||||
@@ -29,20 +24,17 @@ inputs:
|
||||
description: 'Check if a cache entry exists for the given input(s) without downloading it'
|
||||
required: false
|
||||
default: 'false'
|
||||
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 access'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 access'
|
||||
required: true
|
||||
additional-clear-keys:
|
||||
description: 'Additional cache keys to clear (newline separated)'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
outputs:
|
||||
cache-hit:
|
||||
description: 'A boolean value to indicate an exact match was found for the primary key'
|
||||
value: ${{ steps.restore-cache.outputs.cache-hit }}
|
||||
cache-primary-key:
|
||||
description: 'The key that was used to restore the cache (may be from restore-keys)'
|
||||
description: 'The key that was used to restore the cache'
|
||||
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
|
||||
cache-matched-key:
|
||||
description: 'The key that was used to restore the cache (exact or prefix match)'
|
||||
@@ -51,58 +43,18 @@ outputs:
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Restore cache from S3
|
||||
id: restore-cache
|
||||
- name: Clear cache if requested via commit message
|
||||
shell: bash
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
S3_BUCKET: ${{ inputs.s3-bucket }}
|
||||
S3_REGION: ${{ inputs.s3-region }}
|
||||
CACHE_KEY: ${{ inputs.key }}
|
||||
RESTORE_KEYS: ${{ inputs.restore-keys }}
|
||||
TARGET_PATH: ${{ inputs.path }}
|
||||
FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }}
|
||||
LOOKUP_ONLY: ${{ inputs.lookup-only }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "=========================================="
|
||||
echo "Xahau Cache Restore (S3)"
|
||||
echo "${{ inputs.cache-type }} cache clear tag detection"
|
||||
echo "=========================================="
|
||||
echo "Target path: ${TARGET_PATH}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "S3 bucket: s3://${S3_BUCKET}"
|
||||
echo ""
|
||||
|
||||
# Normalize target path (expand tilde)
|
||||
if [[ "${TARGET_PATH}" == ~* ]]; then
|
||||
TARGET_PATH="${HOME}${TARGET_PATH:1}"
|
||||
fi
|
||||
|
||||
# Canonicalize path (Linux only - macOS realpath doesn't support -m)
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
TARGET_PATH=$(realpath -m "${TARGET_PATH}")
|
||||
fi
|
||||
|
||||
echo "Normalized target path: ${TARGET_PATH}"
|
||||
echo ""
|
||||
|
||||
# Debug: Show commit message
|
||||
echo "=========================================="
|
||||
echo "DEBUG: Cache clear tag detection"
|
||||
echo "=========================================="
|
||||
echo "Raw commit message:"
|
||||
echo "${XAHAU_GA_COMMIT_MSG}"
|
||||
echo ""
|
||||
echo "Searching for: [ci-ga-clear-cache] or [ci-ga-clear-cache:*]"
|
||||
echo ""
|
||||
|
||||
# Check for [ci-ga-clear-cache] tag in commit message (with optional search terms)
|
||||
# Examples:
|
||||
# [ci-ga-clear-cache] - Clear this job's cache
|
||||
# [ci-ga-clear-cache:ccache] - Clear only if key contains "ccache"
|
||||
# [ci-ga-clear-cache:gcc Debug] - Clear only if key contains both "gcc" AND "Debug"
|
||||
CACHE_KEY="${{ inputs.key }}"
|
||||
|
||||
# Extract search terms if present (e.g., "ccache" from "[ci-ga-clear-cache:ccache]")
|
||||
SEARCH_TERMS=$(echo "${XAHAU_GA_COMMIT_MSG}" | grep -o '\[ci-ga-clear-cache:[^]]*\]' | sed 's/\[ci-ga-clear-cache://;s/\]//' || echo "")
|
||||
@@ -139,153 +91,56 @@ runs:
|
||||
elif echo "${XAHAU_GA_COMMIT_MSG}" | grep -q '\[ci-ga-clear-cache\]'; then
|
||||
# No search terms - always clear this job's cache
|
||||
echo "🗑️ [ci-ga-clear-cache] detected in commit message"
|
||||
echo "Clearing cache for key: ${CACHE_KEY}"
|
||||
echo "Clearing ${{ inputs.cache-type }} cache for key: ${CACHE_KEY}"
|
||||
SHOULD_CLEAR=true
|
||||
fi
|
||||
|
||||
if [ "${SHOULD_CLEAR}" = "true" ]; then
|
||||
echo ""
|
||||
echo "Deleting ${{ inputs.cache-type }} caches via GitHub API..."
|
||||
|
||||
# Delete base layer
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "Deleting base layer: ${S3_BASE_KEY}"
|
||||
aws s3 rm "${S3_BASE_KEY}" --region "${S3_REGION}" 2>/dev/null || true
|
||||
echo "✓ Base layer deleted"
|
||||
# Delete primary cache key
|
||||
echo "Checking for cache: ${CACHE_KEY}"
|
||||
if gh cache list --key "${CACHE_KEY}" --json key --jq '.[].key' | grep -q "${CACHE_KEY}"; then
|
||||
echo " Deleting: ${CACHE_KEY}"
|
||||
gh cache delete "${CACHE_KEY}" || true
|
||||
echo " ✓ Deleted"
|
||||
else
|
||||
echo "ℹ️ No base layer found to delete"
|
||||
echo " ℹ️ Not found"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Cache cleared successfully"
|
||||
echo "Build will proceed from scratch (bootstrap mode)"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Function to try restoring a cache key
|
||||
try_restore_key() {
|
||||
local key=$1
|
||||
local s3_key="s3://${S3_BUCKET}/${key}-base.tar.zst"
|
||||
|
||||
echo "Checking for key: ${key}"
|
||||
|
||||
if aws s3 ls "${s3_key}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "✓ Found cache: ${s3_key}"
|
||||
return 0
|
||||
else
|
||||
echo "✗ Not found: ${key}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Try exact match first
|
||||
MATCHED_KEY=""
|
||||
EXACT_MATCH="false"
|
||||
|
||||
if try_restore_key "${CACHE_KEY}"; then
|
||||
MATCHED_KEY="${CACHE_KEY}"
|
||||
EXACT_MATCH="true"
|
||||
echo ""
|
||||
echo "🎯 Exact cache hit for key: ${CACHE_KEY}"
|
||||
else
|
||||
# Try restore-keys (prefix matching)
|
||||
if [ -n "${RESTORE_KEYS}" ]; then
|
||||
# Delete additional keys if provided
|
||||
if [ -n "${{ inputs.additional-clear-keys }}" ]; then
|
||||
echo ""
|
||||
echo "Primary key not found, trying restore-keys..."
|
||||
|
||||
while IFS= read -r restore_key; do
|
||||
[ -z "${restore_key}" ] && continue
|
||||
restore_key=$(echo "${restore_key}" | xargs)
|
||||
|
||||
if try_restore_key "${restore_key}"; then
|
||||
MATCHED_KEY="${restore_key}"
|
||||
EXACT_MATCH="false"
|
||||
echo ""
|
||||
echo "✓ Cache restored from fallback key: ${restore_key}"
|
||||
break
|
||||
echo "Checking additional keys..."
|
||||
while IFS= read -r key; do
|
||||
[ -z "${key}" ] && continue
|
||||
echo "Checking for cache: ${key}"
|
||||
if gh cache list --key "${key}" --json key --jq '.[].key' | grep -q "${key}"; then
|
||||
echo " Deleting: ${key}"
|
||||
gh cache delete "${key}" || true
|
||||
echo " ✓ Deleted"
|
||||
else
|
||||
echo " ℹ️ Not found"
|
||||
fi
|
||||
done <<< "${RESTORE_KEYS}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if we found anything
|
||||
if [ -z "${MATCHED_KEY}" ]; then
|
||||
echo ""
|
||||
echo "❌ No cache found for key: ${CACHE_KEY}"
|
||||
|
||||
if [ "${FAIL_ON_MISS}" = "true" ]; then
|
||||
echo "fail-on-cache-miss is enabled, failing workflow"
|
||||
exit 1
|
||||
done <<< "${{ inputs.additional-clear-keys }}"
|
||||
fi
|
||||
|
||||
# Set outputs for cache miss
|
||||
echo "cache-hit=false" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=" >> $GITHUB_OUTPUT
|
||||
|
||||
# Create empty cache directory
|
||||
mkdir -p "${TARGET_PATH}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache restore completed (bootstrap mode)"
|
||||
echo "Created empty cache directory: ${TARGET_PATH}"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If lookup-only, we're done
|
||||
if [ "${LOOKUP_ONLY}" = "true" ]; then
|
||||
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ ${{ inputs.cache-type }} cache cleared successfully"
|
||||
echo "Build will proceed from scratch"
|
||||
else
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache lookup completed (lookup-only mode)"
|
||||
echo "Cache exists: ${MATCHED_KEY}"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
echo "ℹ️ No ${{ inputs.cache-type }} cache clear requested"
|
||||
fi
|
||||
|
||||
# Download and extract cache
|
||||
S3_KEY="s3://${S3_BUCKET}/${MATCHED_KEY}-base.tar.zst"
|
||||
TEMP_TARBALL="/tmp/xahau-cache-restore-$$.tar.zst"
|
||||
|
||||
echo ""
|
||||
echo "Downloading cache..."
|
||||
aws s3 cp "${S3_KEY}" "${TEMP_TARBALL}" --region "${S3_REGION}" --no-progress
|
||||
|
||||
TARBALL_SIZE=$(du -h "${TEMP_TARBALL}" | cut -f1)
|
||||
echo "✓ Downloaded: ${TARBALL_SIZE}"
|
||||
|
||||
# Create parent directory if needed
|
||||
mkdir -p "$(dirname "${TARGET_PATH}")"
|
||||
|
||||
# Remove existing target if it exists
|
||||
if [ -e "${TARGET_PATH}" ]; then
|
||||
echo "Removing existing target: ${TARGET_PATH}"
|
||||
rm -rf "${TARGET_PATH}"
|
||||
fi
|
||||
|
||||
# Create target directory and extract
|
||||
mkdir -p "${TARGET_PATH}"
|
||||
echo ""
|
||||
echo "Extracting cache..."
|
||||
zstd -d -c "${TEMP_TARBALL}" | tar -xf - -C "${TARGET_PATH}"
|
||||
echo "✓ Cache extracted to: ${TARGET_PATH}"
|
||||
|
||||
# Cleanup
|
||||
rm -f "${TEMP_TARBALL}"
|
||||
|
||||
# Set outputs
|
||||
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache restore completed successfully"
|
||||
echo "Cache hit: ${EXACT_MATCH}"
|
||||
echo "Matched key: ${MATCHED_KEY}"
|
||||
echo "=========================================="
|
||||
|
||||
- name: Restore cache
|
||||
id: restore-cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ inputs.path }}
|
||||
key: ${{ inputs.key }}
|
||||
restore-keys: ${{ inputs.restore-keys }}
|
||||
fail-on-cache-miss: ${{ inputs.fail-on-cache-miss }}
|
||||
lookup-only: ${{ inputs.lookup-only }}
|
||||
|
||||
110
.github/actions/xahau-ga-cache-save/action.yml
vendored
110
.github/actions/xahau-ga-cache-save/action.yml
vendored
@@ -1,110 +0,0 @@
|
||||
name: 'Xahau Cache Save (S3)'
|
||||
description: 'Drop-in replacement for actions/cache/save using S3 storage'
|
||||
|
||||
inputs:
|
||||
path:
|
||||
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
|
||||
required: true
|
||||
key:
|
||||
description: 'An explicit key for saving the cache'
|
||||
required: true
|
||||
s3-bucket:
|
||||
description: 'S3 bucket name for cache storage'
|
||||
required: false
|
||||
default: 'xahaud-github-actions-cache-niq'
|
||||
s3-region:
|
||||
description: 'S3 region'
|
||||
required: false
|
||||
default: 'us-east-1'
|
||||
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 access'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 access'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Save cache to S3
|
||||
shell: bash
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||
S3_BUCKET: ${{ inputs.s3-bucket }}
|
||||
S3_REGION: ${{ inputs.s3-region }}
|
||||
CACHE_KEY: ${{ inputs.key }}
|
||||
TARGET_PATH: ${{ inputs.path }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "=========================================="
|
||||
echo "Xahau Cache Save (S3)"
|
||||
echo "=========================================="
|
||||
echo "Target path: ${TARGET_PATH}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "S3 bucket: s3://${S3_BUCKET}"
|
||||
echo ""
|
||||
|
||||
# Normalize target path (expand tilde and resolve to absolute path)
|
||||
if [[ "${TARGET_PATH}" == ~* ]]; then
|
||||
TARGET_PATH="${HOME}${TARGET_PATH:1}"
|
||||
fi
|
||||
echo "Normalized target path: ${TARGET_PATH}"
|
||||
echo ""
|
||||
|
||||
# Check if target directory exists
|
||||
if [ ! -d "${TARGET_PATH}" ]; then
|
||||
echo "⚠️ Target directory does not exist: ${TARGET_PATH}"
|
||||
echo "Skipping cache save."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Use static base name (one base per key, immutable)
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
|
||||
|
||||
# Check if base already exists (immutability - first write wins)
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "⚠️ Cache already exists: ${S3_BASE_KEY}"
|
||||
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed (already exists)"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create tarball
|
||||
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
|
||||
|
||||
echo "Creating cache tarball..."
|
||||
tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
|
||||
|
||||
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
|
||||
echo "✓ Cache tarball created: ${BASE_SIZE}"
|
||||
echo ""
|
||||
|
||||
# Upload to S3
|
||||
echo "Uploading cache to S3..."
|
||||
echo " Key: ${CACHE_KEY}-base.tar.zst"
|
||||
|
||||
aws s3api put-object \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--key "${CACHE_KEY}-base.tar.zst" \
|
||||
--body "${BASE_TARBALL}" \
|
||||
--tagging 'type=base' \
|
||||
--region "${S3_REGION}" \
|
||||
>/dev/null 2>&1
|
||||
|
||||
echo "✓ Uploaded: ${S3_BASE_KEY}"
|
||||
|
||||
# Cleanup
|
||||
rm -f "${BASE_TARBALL}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed successfully"
|
||||
echo "Cache size: ${BASE_SIZE}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "=========================================="
|
||||
20
.github/actions/xahau-ga-dependencies/action.yml
vendored
20
.github/actions/xahau-ga-dependencies/action.yml
vendored
@@ -54,12 +54,6 @@ inputs:
|
||||
options:
|
||||
- libstdcxx
|
||||
- libcxx
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 cache storage'
|
||||
required: true
|
||||
aws-secret-access-key:
|
||||
description: 'AWS Secret Access Key for S3 cache storage'
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
cache-hit:
|
||||
@@ -69,7 +63,6 @@ outputs:
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
|
||||
- name: Restore Conan cache
|
||||
if: inputs.cache_enabled == 'true'
|
||||
id: cache-restore-conan
|
||||
@@ -77,12 +70,11 @@ runs:
|
||||
with:
|
||||
path: ~/.conan2
|
||||
# Note: compiler-id format is compiler-version-stdlib[-gccversion]
|
||||
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
|
||||
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-${{ inputs.configuration }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.py') }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
cache-type: Conan
|
||||
|
||||
- name: Configure Conan
|
||||
shell: bash
|
||||
@@ -161,9 +153,7 @@ runs:
|
||||
|
||||
- name: Save Conan cache
|
||||
if: success() && inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
|
||||
uses: ./.github/actions/xahau-ga-cache-save
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.conan2
|
||||
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
key: ${{ steps.cache-restore-conan.outputs.cache-primary-key }}
|
||||
|
||||
6
.github/workflows/xahau-ga-macos.yml
vendored
6
.github/workflows/xahau-ga-macos.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
- Ninja
|
||||
configuration:
|
||||
- Debug
|
||||
runs-on: macos-15
|
||||
runs-on: macos-15-xlarge
|
||||
env:
|
||||
build_dir: .build
|
||||
# Bump this number to invalidate all caches globally.
|
||||
@@ -118,8 +118,6 @@ jobs:
|
||||
compiler: apple-clang
|
||||
compiler_version: ${{ steps.detect-compiler.outputs.compiler_version }}
|
||||
stdlib: libcxx
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Build
|
||||
uses: ./.github/actions/xahau-ga-build
|
||||
@@ -131,8 +129,6 @@ jobs:
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
stdlib: libcxx
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
|
||||
6
.github/workflows/xahau-ga-nix.yml
vendored
6
.github/workflows/xahau-ga-nix.yml
vendored
@@ -2,7 +2,7 @@ name: Nix - GA Runner
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["dev", "candidate", "release", "nd-experiment-overlayfs-2025-10-29"]
|
||||
branches: ["dev", "candidate", "release"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
schedule:
|
||||
@@ -264,8 +264,6 @@ jobs:
|
||||
cc: ${{ matrix.cc }}
|
||||
cxx: ${{ matrix.cxx }}
|
||||
stdlib: ${{ matrix.stdlib }}
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Build
|
||||
uses: ./.github/actions/xahau-ga-build
|
||||
@@ -280,8 +278,6 @@ jobs:
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
stdlib: ${{ matrix.stdlib }}
|
||||
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Set artifact name
|
||||
id: set-artifact-name
|
||||
|
||||
@@ -1769,7 +1769,7 @@ pool.ntp.org
|
||||
# Unless an absolute path is specified, it will be considered relative to the
|
||||
# folder in which the xahaud.cfg file is located.
|
||||
[validators_file]
|
||||
validators.txt
|
||||
validators-xahau.txt
|
||||
|
||||
# Turn down default logging to save disk space in the long run.
|
||||
# Valid values here are trace, debug, info, warning, error, and fatal
|
||||
|
||||
@@ -1482,9 +1482,13 @@ TxQ::accept(Application& app, OpenView& view)
|
||||
{
|
||||
uint32_t currentTime =
|
||||
view.parentCloseTime().time_since_epoch().count();
|
||||
uint256 klStart = keylet::cron(0, AccountID(beast::zero)).key;
|
||||
uint256 const klEnd =
|
||||
keylet::cron(currentTime + 1, AccountID(beast::zero)).key;
|
||||
bool fixCron = view.rules().enabled(fixCronStacking);
|
||||
std::optional<AccountID> accountID = std::nullopt;
|
||||
if (!fixCron)
|
||||
accountID = AccountID(beast::zero);
|
||||
|
||||
uint256 klStart = keylet::cron(0, accountID).key;
|
||||
uint256 const klEnd = keylet::cron(currentTime + 1, accountID).key;
|
||||
|
||||
std::set<AccountID> cronAccs;
|
||||
|
||||
|
||||
@@ -93,6 +93,16 @@ Cron::doApply()
|
||||
auto& view = ctx_.view();
|
||||
auto const& tx = ctx_.tx;
|
||||
|
||||
if (view.rules().enabled(fixCronStacking))
|
||||
{
|
||||
if (auto const seq = tx.getFieldU32(sfLedgerSequence);
|
||||
seq != view.info().seq)
|
||||
{
|
||||
JLOG(j_.warn()) << "Cron: wrong ledger seq=" << seq;
|
||||
return tefFAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
AccountID const& id = tx.getAccountID(sfOwner);
|
||||
|
||||
auto sle = view.peek(keylet::account(id));
|
||||
|
||||
@@ -74,7 +74,7 @@ namespace detail {
|
||||
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
||||
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
||||
// the actual number of amendments. A LogicError on startup will verify this.
|
||||
static constexpr std::size_t numFeatures = 88;
|
||||
static constexpr std::size_t numFeatures = 89;
|
||||
|
||||
/** Amendments that this server supports and the default voting behavior.
|
||||
Whether they are enabled depends on the Rules defined in the validated
|
||||
@@ -376,6 +376,7 @@ extern uint256 const featureIOUIssuerWeakTSH;
|
||||
extern uint256 const featureCron;
|
||||
extern uint256 const fixInvalidTxFlags;
|
||||
extern uint256 const featureExtendedHookState;
|
||||
extern uint256 const fixCronStacking;
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
|
||||
@@ -298,7 +298,7 @@ Keylet
|
||||
uritoken(AccountID const& issuer, Blob const& uri);
|
||||
|
||||
Keylet
|
||||
cron(uint32_t timestamp, AccountID const& id);
|
||||
cron(uint32_t timestamp, std::optional<AccountID> const& id = std::nullopt);
|
||||
|
||||
} // namespace keylet
|
||||
|
||||
|
||||
@@ -482,6 +482,7 @@ REGISTER_FEATURE(IOUIssuerWeakTSH, Supported::yes, VoteBehavior::De
|
||||
REGISTER_FEATURE(Cron, Supported::yes, VoteBehavior::DefaultNo);
|
||||
REGISTER_FIX (fixInvalidTxFlags, Supported::yes, VoteBehavior::DefaultYes);
|
||||
REGISTER_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);
|
||||
REGISTER_FIX (fixCronStacking, Supported::yes, VoteBehavior::DefaultYes);
|
||||
|
||||
// The following amendments are obsolete, but must remain supported
|
||||
// because they could potentially get enabled.
|
||||
|
||||
@@ -466,7 +466,7 @@ uritoken(AccountID const& issuer, Blob const& uri)
|
||||
// Examples: 100M → ~5.4e-12, 1B → ~5.4e-11, 10B → ~5.4e-10, 100B → ~5.4e-9
|
||||
// (negligible).
|
||||
Keylet
|
||||
cron(uint32_t timestamp, AccountID const& id)
|
||||
cron(uint32_t timestamp, std::optional<AccountID> const& id)
|
||||
{
|
||||
static const uint256 ns = indexHash(LedgerNameSpace::CRON);
|
||||
|
||||
@@ -481,7 +481,14 @@ cron(uint32_t timestamp, AccountID const& id)
|
||||
h[10] = static_cast<uint8_t>((timestamp >> 8) & 0xFFU);
|
||||
h[11] = static_cast<uint8_t>((timestamp >> 0) & 0xFFU);
|
||||
|
||||
const uint256 accHash = indexHash(LedgerNameSpace::CRON, timestamp, id);
|
||||
if (!id.has_value())
|
||||
{
|
||||
// final 20 bytes are zero
|
||||
std::memset(h + 12, 0, 20);
|
||||
return {ltCRON, uint256::fromVoid(h)};
|
||||
}
|
||||
|
||||
const uint256 accHash = indexHash(LedgerNameSpace::CRON, timestamp, *id);
|
||||
|
||||
// final 20 bytes are account ID
|
||||
std::memcpy(h + 12, accHash.cdata(), 20);
|
||||
|
||||
@@ -1106,30 +1106,32 @@ chooseLedgerEntryType(Json::Value const& params)
|
||||
std::pair<RPC::Status, LedgerEntryType> result{RPC::Status::OK, ltANY};
|
||||
if (params.isMember(jss::type))
|
||||
{
|
||||
static constexpr std::array<std::pair<char const*, LedgerEntryType>, 22>
|
||||
types{
|
||||
{{jss::account, ltACCOUNT_ROOT},
|
||||
{jss::amendments, ltAMENDMENTS},
|
||||
{jss::check, ltCHECK},
|
||||
{jss::deposit_preauth, ltDEPOSIT_PREAUTH},
|
||||
{jss::directory, ltDIR_NODE},
|
||||
{jss::escrow, ltESCROW},
|
||||
{jss::emitted_txn, ltEMITTED_TXN},
|
||||
{jss::hook, ltHOOK},
|
||||
{jss::hook_definition, ltHOOK_DEFINITION},
|
||||
{jss::hook_state, ltHOOK_STATE},
|
||||
{jss::fee, ltFEE_SETTINGS},
|
||||
{jss::hashes, ltLEDGER_HASHES},
|
||||
{jss::import_vlseq, ltIMPORT_VLSEQ},
|
||||
{jss::offer, ltOFFER},
|
||||
{jss::payment_channel, ltPAYCHAN},
|
||||
{jss::uri_token, ltURI_TOKEN},
|
||||
{jss::signer_list, ltSIGNER_LIST},
|
||||
{jss::state, ltRIPPLE_STATE},
|
||||
{jss::ticket, ltTICKET},
|
||||
{jss::nft_offer, ltNFTOKEN_OFFER},
|
||||
{jss::nft_page, ltNFTOKEN_PAGE},
|
||||
{jss::unl_report, ltUNL_REPORT}}};
|
||||
static constexpr std::array<std::pair<char const*, LedgerEntryType>, 23>
|
||||
types{{
|
||||
{jss::account, ltACCOUNT_ROOT},
|
||||
{jss::amendments, ltAMENDMENTS},
|
||||
{jss::check, ltCHECK},
|
||||
{jss::deposit_preauth, ltDEPOSIT_PREAUTH},
|
||||
{jss::directory, ltDIR_NODE},
|
||||
{jss::escrow, ltESCROW},
|
||||
{jss::emitted_txn, ltEMITTED_TXN},
|
||||
{jss::hook, ltHOOK},
|
||||
{jss::hook_definition, ltHOOK_DEFINITION},
|
||||
{jss::hook_state, ltHOOK_STATE},
|
||||
{jss::fee, ltFEE_SETTINGS},
|
||||
{jss::hashes, ltLEDGER_HASHES},
|
||||
{jss::import_vlseq, ltIMPORT_VLSEQ},
|
||||
{jss::offer, ltOFFER},
|
||||
{jss::payment_channel, ltPAYCHAN},
|
||||
{jss::uri_token, ltURI_TOKEN},
|
||||
{jss::signer_list, ltSIGNER_LIST},
|
||||
{jss::state, ltRIPPLE_STATE},
|
||||
{jss::ticket, ltTICKET},
|
||||
{jss::nft_offer, ltNFTOKEN_OFFER},
|
||||
{jss::nft_page, ltNFTOKEN_PAGE},
|
||||
{jss::unl_report, ltUNL_REPORT},
|
||||
{jss::cron, ltCRON},
|
||||
}};
|
||||
|
||||
auto const& p = params[jss::type];
|
||||
if (!p.isString())
|
||||
|
||||
@@ -781,6 +781,22 @@ public:
|
||||
auto const& hook = resp[jss::result][jss::account_objects][0u];
|
||||
BEAST_EXPECT(hook[sfAccount.jsonName] == gw.human());
|
||||
}
|
||||
{
|
||||
// Create a Cron
|
||||
env(cron::set(gw),
|
||||
cron::startTime(env.now().time_since_epoch().count() + 100),
|
||||
cron::delay(100),
|
||||
cron::repeat(200),
|
||||
fee(XRP(1)));
|
||||
env.close();
|
||||
}
|
||||
{
|
||||
// Find the cron.
|
||||
Json::Value const resp = acct_objs(gw, jss::cron);
|
||||
BEAST_EXPECT(acct_objs_is_size(resp, 1));
|
||||
auto const& cron = resp[jss::result][jss::account_objects][0u];
|
||||
BEAST_EXPECT(cron[sfOwner.jsonName] == gw.human());
|
||||
}
|
||||
{
|
||||
// See how "deletion_blockers_only" handles gw's directory.
|
||||
Json::Value params;
|
||||
|
||||
Reference in New Issue
Block a user