Compare commits

..

4 Commits

Author SHA1 Message Date
tequ
1d0b31f85a add LedgerSequence check 2025-11-13 17:42:08 +09:00
tequ
e3863181c5 . 2025-11-13 16:28:16 +09:00
tequ
8bcebdea42 Support 'cron' type for account_objects (#624) 2025-11-06 15:19:15 +10:00
Alloy Networks
4cc63c028a Change validators.txt to validators-xahau.txt (#619) 2025-11-01 15:26:56 +10:00
18 changed files with 183 additions and 1302 deletions

View File

@@ -1,328 +0,0 @@
name: 'Xahau Cache Restore (S3 + OverlayFS)'
description: 'Drop-in replacement for actions/cache/restore using S3 and OverlayFS for delta caching'
inputs:
path:
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
required: true
key:
description: 'An explicit key for restoring the cache'
required: true
restore-keys:
description: 'An ordered list of prefix-matched keys to use for restoring stale cache if no cache hit occurred for key'
required: false
default: ''
s3-bucket:
description: 'S3 bucket name for cache storage'
required: false
default: 'xahaud-github-actions-cache-niq'
s3-region:
description: 'S3 region'
required: false
default: 'us-east-1'
fail-on-cache-miss:
description: 'Fail the workflow if cache entry is not found'
required: false
default: 'false'
lookup-only:
description: 'Check if a cache entry exists for the given input(s) without downloading it'
required: false
default: 'false'
use-deltas:
description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
required: false
default: 'true'
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
aws-access-key-id:
description: 'AWS Access Key ID for S3 access'
required: true
aws-secret-access-key:
description: 'AWS Secret Access Key for S3 access'
required: true
outputs:
cache-hit:
description: 'A boolean value to indicate an exact match was found for the primary key'
value: ${{ steps.restore-cache.outputs.cache-hit }}
cache-primary-key:
description: 'The key that was used to restore the cache (may be from restore-keys)'
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
cache-matched-key:
description: 'The key that matched (same as cache-primary-key for compatibility)'
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
runs:
using: 'composite'
steps:
- name: Restore cache from S3 with OverlayFS
id: restore-cache
shell: bash
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
S3_BUCKET: ${{ inputs.s3-bucket }}
S3_REGION: ${{ inputs.s3-region }}
CACHE_KEY: ${{ inputs.key }}
RESTORE_KEYS: ${{ inputs.restore-keys }}
TARGET_PATH: ${{ inputs.path }}
FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }}
LOOKUP_ONLY: ${{ inputs.lookup-only }}
USE_DELTAS: ${{ inputs.use-deltas }}
COMMIT_MSG: ${{ github.event.head_commit.message }}
run: |
set -euo pipefail
echo "=========================================="
echo "Xahau Cache Restore (S3 + OverlayFS)"
echo "=========================================="
echo "Target path: ${TARGET_PATH}"
echo "Primary key: ${CACHE_KEY}"
echo "S3 bucket: s3://${S3_BUCKET}"
echo "Use deltas: ${USE_DELTAS}"
echo ""
# Normalize target path (expand tilde and resolve to absolute path)
# This ensures consistent path comparison in the mount registry
if [[ "${TARGET_PATH}" == ~* ]]; then
# Expand tilde manually (works even if directory doesn't exist yet)
TARGET_PATH="${HOME}${TARGET_PATH:1}"
fi
echo "Normalized target path: ${TARGET_PATH}"
# Generate unique cache workspace
CACHE_HASH=$(echo "${CACHE_KEY}" | md5sum | cut -d' ' -f1)
CACHE_WORKSPACE="/tmp/xahau-cache-${CACHE_HASH}"
echo "Cache workspace: ${CACHE_WORKSPACE}"
# Check for [ci-clear-cache] tag in commit message
if echo "${COMMIT_MSG}" | grep -q '\[ci-clear-cache\]'; then
echo ""
echo "🗑️ [ci-clear-cache] detected in commit message"
echo "Clearing cache for key: ${CACHE_KEY}"
echo ""
# Delete base layer
S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
echo "Deleting base layer: ${S3_BASE_KEY}"
aws s3 rm "${S3_BASE_KEY}" --region "${S3_REGION}" 2>/dev/null || true
echo "✓ Base layer deleted"
else
echo " No base layer found to delete"
fi
# Delete all delta layers for this key
echo "Deleting all delta layers matching: ${CACHE_KEY}-delta-*"
DELTA_COUNT=$(aws s3 ls "s3://${S3_BUCKET}/" --region "${S3_REGION}" | grep "${CACHE_KEY}-delta-" | wc -l || echo "0")
DELTA_COUNT=$(echo "${DELTA_COUNT}" | tr -d ' \n') # Trim whitespace
if [ "${DELTA_COUNT}" -gt 0 ]; then
aws s3 rm "s3://${S3_BUCKET}/" --recursive \
--exclude "*" \
--include "${CACHE_KEY}-delta-*" \
--region "${S3_REGION}" 2>/dev/null || true
echo "✓ Deleted ${DELTA_COUNT} delta layer(s)"
else
echo " No delta layers found to delete"
fi
echo ""
echo "✅ Cache cleared successfully"
echo "Build will proceed from scratch (bootstrap mode)"
echo ""
fi
# Create OverlayFS directory structure
mkdir -p "${CACHE_WORKSPACE}"/{base,upper,work,merged}
# Function to try downloading from S3
try_restore_key() {
local try_key="$1"
local s3_base="s3://${S3_BUCKET}/${try_key}-base.tar.zst"
echo "Trying cache key: ${try_key}"
# Check if base exists (one base per key, immutable)
echo "Checking for base layer..."
if aws s3 ls "${s3_base}" --region "${S3_REGION}" >/dev/null 2>&1; then
echo "✓ Found base layer: ${s3_base}"
if [ "${LOOKUP_ONLY}" = "true" ]; then
echo "Lookup-only mode: cache exists, skipping download"
return 0
fi
# Download base layer
echo "Downloading base layer..."
aws s3 cp "${s3_base}" /tmp/cache-base.tar.zst --region "${S3_REGION}" --quiet
# Extract base layer
echo "Extracting base layer..."
tar -xf /tmp/cache-base.tar.zst -C "${CACHE_WORKSPACE}/base"
rm /tmp/cache-base.tar.zst
# Query for latest timestamped delta (only if use-deltas enabled)
if [ "${USE_DELTAS}" = "true" ]; then
echo "Querying for latest delta..."
LATEST_DELTA=$(aws s3api list-objects-v2 \
--bucket "${S3_BUCKET}" \
--prefix "${try_key}-delta-" \
--region "${S3_REGION}" \
--query 'sort_by(Contents, &LastModified)[-1].Key' \
--output text 2>/dev/null || echo "")
if [ -n "${LATEST_DELTA}" ] && [ "${LATEST_DELTA}" != "None" ]; then
echo "✓ Found latest delta: ${LATEST_DELTA}"
echo "Downloading delta layer..."
aws s3 cp "s3://${S3_BUCKET}/${LATEST_DELTA}" /tmp/cache-delta.tar.zst --region "${S3_REGION}" --quiet
echo "Extracting delta layer..."
tar -xf /tmp/cache-delta.tar.zst -C "${CACHE_WORKSPACE}/upper" 2>/dev/null || true
rm /tmp/cache-delta.tar.zst
else
echo " No delta layer found (this is fine for first build)"
fi
else
echo " Delta caching disabled (use-deltas: false)"
fi
return 0
else
echo "✗ No base layer found for key: ${try_key}"
return 1
fi
}
# Try primary key first
MATCHED_KEY=""
EXACT_MATCH="false"
if try_restore_key "${CACHE_KEY}"; then
MATCHED_KEY="${CACHE_KEY}"
EXACT_MATCH="true"
echo ""
echo "🎯 Exact cache hit for key: ${CACHE_KEY}"
else
# Try restore-keys (prefix matching)
if [ -n "${RESTORE_KEYS}" ]; then
echo ""
echo "Primary key not found, trying restore-keys..."
# Split restore-keys by newline
while IFS= read -r restore_key; do
# Skip empty lines
[ -z "${restore_key}" ] && continue
# Trim whitespace
restore_key=$(echo "${restore_key}" | xargs)
if try_restore_key "${restore_key}"; then
MATCHED_KEY="${restore_key}"
EXACT_MATCH="false"
echo ""
echo "✓ Cache restored from fallback key: ${restore_key}"
break
fi
done <<< "${RESTORE_KEYS}"
fi
fi
# Check if we found anything
if [ -z "${MATCHED_KEY}" ]; then
echo ""
echo "❌ No cache found for key: ${CACHE_KEY}"
echo "This is BOOTSTRAP mode - first build for this cache key"
if [ "${FAIL_ON_MISS}" = "true" ]; then
echo "fail-on-cache-miss is enabled, failing workflow"
exit 1
fi
# Set outputs for cache miss
echo "cache-hit=false" >> $GITHUB_OUTPUT
echo "cache-primary-key=" >> $GITHUB_OUTPUT
# Create empty cache directory for bootstrap
mkdir -p "${TARGET_PATH}"
# Record bootstrap mode for save action
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
# For bootstrap: workspace="bootstrap", matched_key=primary_key, exact_match=false
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
echo "${TARGET_PATH}:bootstrap:${CACHE_KEY}:${CACHE_KEY}:false:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
echo ""
echo "=========================================="
echo "Cache restore completed (bootstrap mode)"
echo "Created empty cache directory: ${TARGET_PATH}"
echo "=========================================="
exit 0
fi
# If lookup-only, we're done
if [ "${LOOKUP_ONLY}" = "true" ]; then
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
# Clean up workspace
rm -rf "${CACHE_WORKSPACE}"
echo ""
echo "=========================================="
echo "Cache lookup completed (lookup-only mode)"
echo "=========================================="
exit 0
fi
# Mount OverlayFS
echo ""
echo "Mounting OverlayFS..."
sudo mount -t overlay overlay \
-o lowerdir="${CACHE_WORKSPACE}/base",upperdir="${CACHE_WORKSPACE}/upper",workdir="${CACHE_WORKSPACE}/work" \
"${CACHE_WORKSPACE}/merged"
# Verify mount
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
echo "✓ OverlayFS mounted successfully"
else
echo "❌ Failed to mount OverlayFS"
exit 1
fi
# Create target directory parent if needed
TARGET_PARENT=$(dirname "${TARGET_PATH}")
mkdir -p "${TARGET_PARENT}"
# Remove existing target if it exists
if [ -e "${TARGET_PATH}" ]; then
echo "Removing existing target: ${TARGET_PATH}"
rm -rf "${TARGET_PATH}"
fi
# Symlink target path to merged view
echo "Creating symlink: ${TARGET_PATH} -> ${CACHE_WORKSPACE}/merged"
ln -s "${CACHE_WORKSPACE}/merged" "${TARGET_PATH}"
# Save mount info for cleanup/save later
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
# This tells save action whether to create new base (partial match) or just delta (exact match)
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
echo "${TARGET_PATH}:${CACHE_WORKSPACE}:${MATCHED_KEY}:${CACHE_KEY}:${EXACT_MATCH}:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
# Set outputs
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
# Show statistics
echo ""
echo "Cache statistics:"
echo " Base layer size: $(du -sh ${CACHE_WORKSPACE}/base 2>/dev/null | cut -f1 || echo '0')"
echo " Delta layer size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1 || echo '0')"
echo " Merged view size: $(du -sh ${CACHE_WORKSPACE}/merged 2>/dev/null | cut -f1 || echo '0')"
echo ""
echo "=========================================="
echo "Cache restore completed successfully"
echo "Exact match: ${EXACT_MATCH}"
echo "Matched key: ${MATCHED_KEY}"
echo "=========================================="

View File

@@ -1,398 +0,0 @@
name: 'Xahau Cache Save (S3 + OverlayFS)'
description: 'Drop-in replacement for actions/cache/save using S3 and OverlayFS for delta caching'
inputs:
path:
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
required: true
key:
description: 'An explicit key for saving the cache'
required: true
s3-bucket:
description: 'S3 bucket name for cache storage'
required: false
default: 'xahaud-github-actions-cache-niq'
s3-region:
description: 'S3 region'
required: false
default: 'us-east-1'
use-deltas:
description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
required: false
default: 'true'
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
aws-access-key-id:
description: 'AWS Access Key ID for S3 access'
required: true
aws-secret-access-key:
description: 'AWS Secret Access Key for S3 access'
required: true
runs:
using: 'composite'
steps:
- name: Save cache to S3 with OverlayFS delta
shell: bash
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
S3_BUCKET: ${{ inputs.s3-bucket }}
S3_REGION: ${{ inputs.s3-region }}
CACHE_KEY: ${{ inputs.key }}
TARGET_PATH: ${{ inputs.path }}
USE_DELTAS: ${{ inputs.use-deltas }}
run: |
set -euo pipefail
echo "=========================================="
echo "Xahau Cache Save (S3 + OverlayFS)"
echo "=========================================="
echo "Target path: ${TARGET_PATH}"
echo "Cache key: ${CACHE_KEY}"
echo "S3 bucket: s3://${S3_BUCKET}"
echo ""
# Normalize target path (expand tilde and resolve to absolute path)
# This ensures consistent path comparison with the mount registry
if [[ "${TARGET_PATH}" == ~* ]]; then
# Expand tilde manually (works even if directory doesn't exist yet)
TARGET_PATH="${HOME}${TARGET_PATH:1}"
fi
echo "Normalized target path: ${TARGET_PATH}"
echo ""
# Find the cache workspace from mount registry
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
if [ ! -f "${MOUNT_REGISTRY}" ]; then
echo "⚠️ No cache mounts found (mount registry doesn't exist)"
echo "This usually means cache restore was not called, or there was no cache to restore."
echo "Skipping cache save."
exit 0
fi
# Find entry for this path
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
# Bootstrap mode: path:bootstrap:key:key:false:true/false (workspace="bootstrap")
CACHE_WORKSPACE=""
MATCHED_KEY=""
PRIMARY_KEY=""
EXACT_MATCH=""
REGISTRY_USE_DELTAS=""
while IFS=: read -r mount_path mount_workspace mount_matched_key mount_primary_key mount_exact_match mount_use_deltas; do
if [ "${mount_path}" = "${TARGET_PATH}" ]; then
CACHE_WORKSPACE="${mount_workspace}"
MATCHED_KEY="${mount_matched_key}"
PRIMARY_KEY="${mount_primary_key}"
EXACT_MATCH="${mount_exact_match}"
REGISTRY_USE_DELTAS="${mount_use_deltas}"
break
fi
done < "${MOUNT_REGISTRY}"
if [ -z "${CACHE_WORKSPACE}" ] && [ -z "${MATCHED_KEY}" ]; then
echo "⚠️ No cache entry found for path: ${TARGET_PATH}"
echo "This usually means cache restore was not called for this path."
echo "Skipping cache save."
exit 0
fi
# Determine cache mode
if [ "${CACHE_WORKSPACE}" = "bootstrap" ]; then
CACHE_MODE="bootstrap"
PRIMARY_KEY="${MATCHED_KEY}" # In bootstrap, matched_key field contains primary key
echo "Cache mode: BOOTSTRAP (first build for this key)"
echo "Primary key: ${PRIMARY_KEY}"
elif [ "${EXACT_MATCH}" = "false" ]; then
CACHE_MODE="partial-match"
echo "Cache mode: PARTIAL MATCH (restore-key used)"
echo "Cache workspace: ${CACHE_WORKSPACE}"
echo "Matched key from restore: ${MATCHED_KEY}"
echo "Primary key (will save new base): ${PRIMARY_KEY}"
else
CACHE_MODE="exact-match"
echo "Cache mode: EXACT MATCH (cache hit)"
echo "Cache workspace: ${CACHE_WORKSPACE}"
echo "Matched key: ${MATCHED_KEY}"
fi
echo "Use deltas: ${REGISTRY_USE_DELTAS}"
echo ""
# Handle different cache modes
if [ "${CACHE_MODE}" = "bootstrap" ]; then
# Bootstrap: Save entire cache as base layer (no OverlayFS was used)
echo "Bootstrap mode: Creating initial base layer from ${TARGET_PATH}"
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
echo "Creating base tarball..."
tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
echo "✓ Base tarball created: ${BASE_SIZE}"
echo ""
# Use static base name (one base per key, immutable)
S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
# Check if base already exists (immutability - first write wins)
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
echo "⚠️ Base layer already exists: ${S3_BASE_KEY}"
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
else
echo "Uploading base layer to S3..."
echo " Key: ${PRIMARY_KEY}-base.tar.zst"
aws s3api put-object \
--bucket "${S3_BUCKET}" \
--key "${PRIMARY_KEY}-base.tar.zst" \
--body "${BASE_TARBALL}" \
--tagging 'type=base' \
--region "${S3_REGION}" \
>/dev/null
echo "✓ Uploaded: ${S3_BASE_KEY}"
fi
# Cleanup
rm -f "${BASE_TARBALL}"
echo ""
echo "=========================================="
echo "Bootstrap cache save completed"
echo "Base size: ${BASE_SIZE}"
echo "Cache key: ${PRIMARY_KEY}"
echo "=========================================="
exit 0
elif [ "${CACHE_MODE}" = "partial-match" ]; then
# Partial match: Save merged view as new base ONLY (no delta)
# The delta is relative to the OLD base, not the NEW base we're creating
echo "Partial match mode: Saving new base layer for primary key"
echo "Note: Delta will NOT be saved (it's relative to old base)"
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
echo "Creating base tarball from merged view..."
tar -cf - -C "${CACHE_WORKSPACE}/merged" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
echo "✓ Base tarball created: ${BASE_SIZE}"
echo ""
# Use static base name (one base per key, immutable)
S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
# Check if base already exists (immutability - first write wins)
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
echo "⚠️ Base layer already exists: ${S3_BASE_KEY}"
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
else
echo "Uploading new base layer to S3..."
echo " Key: ${PRIMARY_KEY}-base.tar.zst"
aws s3api put-object \
--bucket "${S3_BUCKET}" \
--key "${PRIMARY_KEY}-base.tar.zst" \
--body "${BASE_TARBALL}" \
--tagging 'type=base' \
--region "${S3_REGION}" \
>/dev/null
echo "✓ Uploaded: ${S3_BASE_KEY}"
fi
# Cleanup
rm -f "${BASE_TARBALL}"
# Unmount and cleanup
echo ""
echo "Cleaning up..."
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
sudo umount "${CACHE_WORKSPACE}/merged" || {
echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
echo "Attempting lazy unmount..."
sudo umount -l "${CACHE_WORKSPACE}/merged" || true
}
fi
rm -rf "${CACHE_WORKSPACE}"
# Remove from registry
if [ -f "${MOUNT_REGISTRY}" ]; then
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
fi
echo "✓ Cleanup completed"
echo ""
echo "=========================================="
echo "Partial match cache save completed"
echo "New base created for: ${PRIMARY_KEY}"
echo "Base size: ${BASE_SIZE}"
if [ "${REGISTRY_USE_DELTAS}" = "true" ]; then
echo "Next exact-match build will create deltas from this base"
else
echo "Next exact-match build will reuse this base (base-only mode)"
fi
echo "=========================================="
exit 0
fi
# For exact-match ONLY: Save delta (if use-deltas enabled)
if [ "${CACHE_MODE}" = "exact-match" ]; then
# If deltas are disabled, just cleanup and exit
if [ "${REGISTRY_USE_DELTAS}" != "true" ]; then
echo " Delta caching disabled (use-deltas: false)"
echo "Base already exists for this key, nothing to save."
# Unmount and cleanup
echo ""
echo "Cleaning up..."
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
fi
rm -rf "${CACHE_WORKSPACE}"
# Remove from registry
if [ -f "${MOUNT_REGISTRY}" ]; then
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
fi
echo ""
echo "=========================================="
echo "Cache save completed (base-only mode)"
echo "=========================================="
exit 0
fi
# Check if upper layer has any changes
if [ -z "$(ls -A ${CACHE_WORKSPACE}/upper 2>/dev/null)" ]; then
echo " No changes detected in upper layer (cache is unchanged)"
echo "Skipping delta upload to save bandwidth."
# Still unmount and cleanup
echo ""
echo "Cleaning up..."
sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
rm -rf "${CACHE_WORKSPACE}"
echo ""
echo "=========================================="
echo "Cache save completed (no changes)"
echo "=========================================="
exit 0
fi
# Show delta statistics
echo "Delta layer statistics:"
echo " Files changed: $(find ${CACHE_WORKSPACE}/upper -type f 2>/dev/null | wc -l)"
echo " Delta size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1)"
echo ""
# Create delta tarball from upper layer
echo "Creating delta tarball..."
DELTA_TARBALL="/tmp/xahau-cache-delta-$$.tar.zst"
tar -cf - -C "${CACHE_WORKSPACE}/upper" . | zstd -3 -T0 -q -o "${DELTA_TARBALL}"
DELTA_SIZE=$(du -h "${DELTA_TARBALL}" | cut -f1)
echo "✓ Delta tarball created: ${DELTA_SIZE}"
echo ""
# Upload timestamped delta (no overwrites = zero concurrency issues)
TIMESTAMP=$(date +%Y%m%d%H%M%S)
COMMIT_SHA=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
# Use PRIMARY_KEY for delta (ensures deltas match their base)
S3_DELTA_TIMESTAMPED="s3://${S3_BUCKET}/${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
echo "Uploading timestamped delta to S3..."
echo " Key: ${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
# Upload with tag (deltas cleaned up inline - keep last 1)
aws s3api put-object \
--bucket "${S3_BUCKET}" \
--key "${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst" \
--body "${DELTA_TARBALL}" \
--tagging 'type=delta-archive' \
--region "${S3_REGION}" \
>/dev/null
echo "✓ Uploaded: ${S3_DELTA_TIMESTAMPED}"
# Inline cleanup: Keep only latest delta (the one we just uploaded)
echo ""
echo "Cleaning up old deltas (keeping only latest)..."
# List all deltas for this key, sorted by LastModified (oldest first)
ALL_DELTAS=$(aws s3api list-objects-v2 \
--bucket "${S3_BUCKET}" \
--prefix "${PRIMARY_KEY}-delta-" \
--region "${S3_REGION}" \
--query 'sort_by(Contents, &LastModified)[*].Key' \
--output json 2>/dev/null || echo "[]")
DELTA_COUNT=$(echo "${ALL_DELTAS}" | jq 'length' 2>/dev/null || echo "0")
if [ "${DELTA_COUNT}" -gt 1 ]; then
# Keep last 1 (newest), delete all older ones (all except last 1 = [0:-1])
OLD_DELTAS=$(echo "${ALL_DELTAS}" | jq -r '.[0:-1][]' 2>/dev/null)
if [ -n "${OLD_DELTAS}" ]; then
DELETE_COUNT=$((DELTA_COUNT - 1))
echo " Found ${DELETE_COUNT} old delta(s) to delete"
# Create delete batch request JSON
DELETE_OBJECTS=$(echo "${OLD_DELTAS}" | jq -R -s -c 'split("\n") | map(select(length > 0)) | map({Key: .}) | {Objects: ., Quiet: true}' 2>/dev/null)
if [ -n "${DELETE_OBJECTS}" ]; then
aws s3api delete-objects \
--bucket "${S3_BUCKET}" \
--delete "${DELETE_OBJECTS}" \
--region "${S3_REGION}" \
>/dev/null 2>&1
echo "✓ Deleted ${DELETE_COUNT} old delta(s)"
fi
fi
else
echo " Only ${DELTA_COUNT} delta(s) exist, no cleanup needed"
fi
# Cleanup delta tarball
rm -f "${DELTA_TARBALL}"
# Cleanup: Unmount OverlayFS and remove workspace
echo ""
echo "Cleaning up..."
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
sudo umount "${CACHE_WORKSPACE}/merged" || {
echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
echo "Attempting lazy unmount..."
sudo umount -l "${CACHE_WORKSPACE}/merged" || true
}
fi
# Remove workspace
rm -rf "${CACHE_WORKSPACE}"
fi
# Remove from registry
if [ -f "${MOUNT_REGISTRY}" ]; then
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
fi
echo "✓ Cleanup completed"
echo ""
echo "=========================================="
echo "Cache save completed successfully"
echo "Mode: ${CACHE_MODE}"
echo "Cache key: ${PRIMARY_KEY}"
if [ -n "${DELTA_SIZE:-}" ]; then
echo "Delta size: ${DELTA_SIZE}"
fi
echo "=========================================="

View File

@@ -2,10 +2,6 @@ name: 'Configure ccache'
description: 'Sets up ccache with consistent configuration'
inputs:
cache_dir:
description: 'Path to ccache directory'
required: false
default: '~/.ccache'
max_size:
description: 'Maximum cache size'
required: false
@@ -18,6 +14,18 @@ inputs:
description: 'How to check compiler for changes'
required: false
default: 'content'
is_main_branch:
description: 'Whether the current branch is the main branch'
required: false
default: 'false'
main_cache_dir:
description: 'Path to the main branch cache directory'
required: false
default: '~/.ccache-main'
current_cache_dir:
description: 'Path to the current branch cache directory'
required: false
default: '~/.ccache-current'
runs:
using: 'composite'
@@ -25,20 +33,31 @@ runs:
- name: Configure ccache
shell: bash
run: |
# Create cache directory
mkdir -p ${{ inputs.cache_dir }}
# Configure ccache settings
ccache --set-config=cache_dir="${{ inputs.cache_dir }}"
ccache --set-config=max_size=${{ inputs.max_size }}
ccache --set-config=hash_dir=${{ inputs.hash_dir }}
ccache --set-config=compiler_check=${{ inputs.compiler_check }}
# Export for use by build tools
echo "CCACHE_DIR=${{ inputs.cache_dir }}" >> $GITHUB_ENV
# Print config for verification
ccache -p
# Zero statistics before the build
ccache -z
# Create cache directories
mkdir -p ${{ inputs.main_cache_dir }} ${{ inputs.current_cache_dir }}
# Set compiler check globally
ccache -o compiler_check=${{ inputs.compiler_check }}
# Use a single config file location
mkdir -p ~/.ccache
export CONF_PATH="$HOME/.ccache/ccache.conf"
# Apply common settings
echo "max_size = ${{ inputs.max_size }}" > "$CONF_PATH"
echo "hash_dir = ${{ inputs.hash_dir }}" >> "$CONF_PATH"
echo "compiler_check = ${{ inputs.compiler_check }}" >> "$CONF_PATH"
if [ "${{ inputs.is_main_branch }}" == "true" ]; then
# Main branch: use main branch cache
ccache --set-config=cache_dir="${{ inputs.main_cache_dir }}"
echo "CCACHE_DIR=${{ inputs.main_cache_dir }}" >> $GITHUB_ENV
else
# Feature branch: use current branch cache with main as secondary
ccache --set-config=cache_dir="${{ inputs.current_cache_dir }}"
ccache --set-config=secondary_storage="file:${{ inputs.main_cache_dir }}"
echo "CCACHE_DIR=${{ inputs.current_cache_dir }}" >> $GITHUB_ENV
fi
ccache -p # Print config for verification
ccache -z # Zero statistics before the build

View File

@@ -47,12 +47,6 @@ inputs:
description: 'GCC version to use for Clang toolchain (e.g. 11, 13)'
required: false
default: ''
aws-access-key-id:
description: 'AWS Access Key ID for S3 cache storage'
required: true
aws-secret-access-key:
description: 'AWS Secret Access Key for S3 cache storage'
required: true
runs:
using: 'composite'
@@ -65,19 +59,28 @@ runs:
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
- name: Restore ccache directory
- name: Restore ccache directory for default branch
if: inputs.ccache_enabled == 'true'
id: ccache-restore
uses: ./.github/actions/xahau-actions-cache-restore
uses: actions/cache/restore@v4
with:
path: ~/.ccache
path: ~/.ccache-main
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
- name: Restore ccache directory for current branch
if: inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
id: ccache-restore-current-branch
uses: actions/cache/restore@v4
with:
path: ~/.ccache-current
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
- name: Configure project
shell: bash
@@ -156,11 +159,16 @@ runs:
shell: bash
run: ccache -s
- name: Save ccache directory
if: always() && inputs.ccache_enabled == 'true'
uses: ./.github/actions/xahau-actions-cache-save
- name: Save ccache directory for default branch
if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
path: ~/.ccache-main
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
- name: Save ccache directory for current branch
if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache-current
key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }}

View File

@@ -32,12 +32,6 @@ inputs:
options:
- libstdcxx
- libcxx
aws-access-key-id:
description: 'AWS Access Key ID for S3 cache storage'
required: true
aws-secret-access-key:
description: 'AWS Secret Access Key for S3 cache storage'
required: true
outputs:
cache-hit:
@@ -47,21 +41,47 @@ outputs:
runs:
using: 'composite'
steps:
- name: Generate safe branch name
if: inputs.cache_enabled == 'true'
id: safe-branch
shell: bash
run: |
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
- name: Check conanfile changes
if: inputs.cache_enabled == 'true'
id: check-conanfile-changes
shell: bash
run: |
# Check if we're on the main branch
if [ "${{ github.ref_name }}" == "${{ inputs.main_branch }}" ]; then
echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT
else
# Fetch main branch for comparison
git fetch origin ${{ inputs.main_branch }}
# Check if conanfile.txt or conanfile.py has changed compared to main branch
if git diff --quiet origin/${{ inputs.main_branch }}..HEAD -- '**/conanfile.txt' '**/conanfile.py'; then
echo "should-save-conan-cache=false" >> $GITHUB_OUTPUT
else
echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT
fi
fi
- name: Restore Conan cache
if: inputs.cache_enabled == 'true'
id: cache-restore-conan
uses: ./.github/actions/xahau-actions-cache-restore
uses: actions/cache/restore@v4
with:
path: ~/.conan2
path: |
~/.conan
~/.conan2
# Note: compiler-id format is compiler-version-stdlib[-gccversion]
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
restore-keys: |
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
use-deltas: 'false'
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
- name: Export custom recipes
shell: bash
@@ -87,11 +107,10 @@ runs:
..
- name: Save Conan cache
if: always() && inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
uses: ./.github/actions/xahau-actions-cache-save
if: always() && inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true' && steps.check-conanfile-changes.outputs.should-save-conan-cache == 'true'
uses: actions/cache/save@v4
with:
path: ~/.conan2
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
use-deltas: 'false'
aws-access-key-id: ${{ inputs.aws-access-key-id }}
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
path: |
~/.conan
~/.conan2
key: ${{ steps.cache-restore-conan.outputs.cache-primary-key }}

View File

@@ -1,290 +0,0 @@
name: Test Cache Actions (State Machine)
on:
push:
branches: ["nd-experiment-overlayfs-*"]
workflow_dispatch:
inputs:
state_assertion:
description: 'Expected state (optional, e.g. "2" to assert state 2)'
required: false
type: string
default: '1'
start_state:
description: 'Force specific starting state (optional, e.g. "3" to start at state 3)'
required: false
type: string
clear_cache:
description: 'Clear cache before running'
required: false
type: boolean
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test-cache-state-machine:
runs-on: ubuntu-latest
env:
CACHE_KEY: test-state-machine-${{ github.ref_name }}
CACHE_DIR: /tmp/test-cache
S3_BUCKET: xahaud-github-actions-cache-niq
S3_REGION: us-east-1
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Parse Inputs (workflow_dispatch or commit message)
id: parse-inputs
run: |
# Priority 1: workflow_dispatch inputs (manual trigger)
STATE_ASSERTION="${{ inputs.state_assertion }}"
START_STATE="${{ inputs.start_state }}"
SHOULD_CLEAR="${{ inputs.clear_cache }}"
# Priority 2: commit message tags (push event)
if [ "${{ github.event_name }}" = "push" ]; then
COMMIT_MSG="${{ github.event.head_commit.message }}"
# Parse [state:N] assertion tag (optional, if not provided as input)
if [ -z "${STATE_ASSERTION}" ] && echo "${COMMIT_MSG}" | grep -qE '\[state:[0-9]+\]'; then
STATE_ASSERTION=$(echo "${COMMIT_MSG}" | grep -oE '\[state:[0-9]+\]' | grep -oE '[0-9]+')
echo "State assertion found in commit: ${STATE_ASSERTION}"
fi
# Parse [start-state:N] force tag (optional, if not provided as input)
if [ -z "${START_STATE}" ] && echo "${COMMIT_MSG}" | grep -qE '\[start-state:[0-9]+\]'; then
START_STATE=$(echo "${COMMIT_MSG}" | grep -oE '\[start-state:[0-9]+\]' | grep -oE '[0-9]+')
echo "Start state found in commit: ${START_STATE}"
fi
# Parse [ci-clear-cache] tag (if not provided as input)
if [ "${SHOULD_CLEAR}" != "true" ] && echo "${COMMIT_MSG}" | grep -q '\[ci-clear-cache\]'; then
SHOULD_CLEAR=true
echo "Cache clear requested in commit"
fi
fi
# Output final values
echo "state_assertion=${STATE_ASSERTION}" >> "$GITHUB_OUTPUT"
echo "start_state=${START_STATE}" >> "$GITHUB_OUTPUT"
echo "should_clear=${SHOULD_CLEAR}" >> "$GITHUB_OUTPUT"
# Log what we're using
echo ""
echo "Configuration:"
[ -n "${STATE_ASSERTION}" ] && echo " State assertion: ${STATE_ASSERTION}"
[ -n "${START_STATE}" ] && echo " Start state: ${START_STATE}"
echo " Clear cache: ${SHOULD_CLEAR}"
- name: Check S3 State (Before Restore)
env:
AWS_ACCESS_KEY_ID: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
run: |
echo "=========================================="
echo "S3 State Check (Before Restore)"
echo "=========================================="
echo "Cache key: ${CACHE_KEY}"
echo ""
# Check if base exists
BASE_EXISTS=false
if aws s3 ls "s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst" --region "${S3_REGION}" >/dev/null 2>&1; then
BASE_EXISTS=true
fi
echo "Base exists: ${BASE_EXISTS}"
# Count deltas
DELTA_COUNT=$(aws s3 ls "s3://${S3_BUCKET}/" --region "${S3_REGION}" | grep "${CACHE_KEY}-delta-" | wc -l || echo "0")
echo "Delta count: ${DELTA_COUNT}"
- name: Restore Cache
uses: ./.github/actions/xahau-actions-cache-restore
with:
path: ${{ env.CACHE_DIR }}
key: ${{ env.CACHE_KEY }}
s3-bucket: ${{ env.S3_BUCKET }}
s3-region: ${{ env.S3_REGION }}
use-deltas: 'true'
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
- name: Auto-Detect State and Validate
id: state
env:
STATE_ASSERTION: ${{ steps.parse-inputs.outputs.state_assertion }}
START_STATE: ${{ steps.parse-inputs.outputs.start_state }}
run: |
echo "=========================================="
echo "State Detection and Validation"
echo "=========================================="
# Create cache directory if it doesn't exist
mkdir -p "${CACHE_DIR}"
# Handle [start-state:N] - force specific state
if [ -n "${START_STATE}" ]; then
echo "🎯 [start-state:${START_STATE}] detected - forcing state setup"
# Clear cache and create state files 0 through START_STATE
rm -f ${CACHE_DIR}/state*.txt 2>/dev/null || true
for i in $(seq 0 ${START_STATE}); do
echo "State ${i} - Forced at $(date)" > "${CACHE_DIR}/state${i}.txt"
echo "Commit: ${{ github.sha }}" >> "${CACHE_DIR}/state${i}.txt"
done
DETECTED_STATE=${START_STATE}
echo "✓ Forced to state ${DETECTED_STATE}"
else
# Auto-detect state by counting state files
STATE_FILES=$(ls ${CACHE_DIR}/state*.txt 2>/dev/null | wc -l)
DETECTED_STATE=${STATE_FILES}
echo "Auto-detected state: ${DETECTED_STATE} (${STATE_FILES} state files)"
fi
# Show cache contents
echo ""
echo "Cache contents:"
if [ -d "${CACHE_DIR}" ] && [ "$(ls -A ${CACHE_DIR})" ]; then
ls -la "${CACHE_DIR}"
else
echo "(empty)"
fi
# Validate [state:N] assertion if provided
if [ -n "${STATE_ASSERTION}" ]; then
echo ""
echo "Validating assertion: [state:${STATE_ASSERTION}]"
if [ "${DETECTED_STATE}" -ne "${STATE_ASSERTION}" ]; then
echo "❌ ERROR: State mismatch!"
echo " Expected (from [state:N]): ${STATE_ASSERTION}"
echo " Detected (from cache): ${DETECTED_STATE}"
exit 1
fi
echo "✓ Assertion passed: detected == expected (${DETECTED_STATE})"
fi
# Output detected state for next steps
echo "detected_state=${DETECTED_STATE}" >> "$GITHUB_OUTPUT"
echo ""
echo "=========================================="
- name: Simulate Build (State Transition)
env:
DETECTED_STATE: ${{ steps.state.outputs.detected_state }}
run: |
echo "=========================================="
echo "Simulating Build (State Transition)"
echo "=========================================="
# Calculate next state
NEXT_STATE=$((DETECTED_STATE + 1))
echo "Transitioning: State ${DETECTED_STATE} → State ${NEXT_STATE}"
echo ""
# Create state file for next state
STATE_FILE="${CACHE_DIR}/state${NEXT_STATE}.txt"
echo "State ${NEXT_STATE} - Created at $(date)" > "${STATE_FILE}"
echo "Commit: ${{ github.sha }}" >> "${STATE_FILE}"
echo "Message: ${{ github.event.head_commit.message }}" >> "${STATE_FILE}"
echo "✓ Created ${STATE_FILE}"
# Show final cache state
echo ""
echo "Final cache contents:"
ls -la "${CACHE_DIR}"
echo ""
echo "State files:"
cat ${CACHE_DIR}/state*.txt
- name: Save Cache
uses: ./.github/actions/xahau-actions-cache-save
with:
path: ${{ env.CACHE_DIR }}
key: ${{ env.CACHE_KEY }}
s3-bucket: ${{ env.S3_BUCKET }}
s3-region: ${{ env.S3_REGION }}
use-deltas: 'true'
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
- name: Validate S3 State (After Save)
env:
AWS_ACCESS_KEY_ID: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
DETECTED_STATE: ${{ steps.state.outputs.detected_state }}
run: |
echo "=========================================="
echo "S3 State Validation (After Save)"
echo "=========================================="
# Calculate next state (what we just saved)
NEXT_STATE=$((DETECTED_STATE + 1))
echo "Saved state: ${NEXT_STATE}"
echo ""
# Check if base exists
if aws s3 ls "s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst" --region "${S3_REGION}" >/dev/null 2>&1; then
BASE_SIZE=$(aws s3 ls "s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst" --region "${S3_REGION}" | awk '{print $3}')
echo "✓ Base exists: ${CACHE_KEY}-base.tar.zst (${BASE_SIZE} bytes)"
else
echo "❌ ERROR: Base should exist after save"
exit 1
fi
# List deltas
echo ""
echo "Delta layers:"
DELTAS=$(aws s3 ls "s3://${S3_BUCKET}/" --region "${S3_REGION}" | grep "${CACHE_KEY}-delta-" || echo "")
if [ -n "${DELTAS}" ]; then
echo "${DELTAS}"
DELTA_COUNT=$(echo "${DELTAS}" | wc -l)
else
echo "(none)"
DELTA_COUNT=0
fi
# Validate S3 state
echo ""
if [ "${DETECTED_STATE}" -eq 0 ]; then
# Saved state 1 from bootstrap (state 0 → 1)
if [ "${DELTA_COUNT}" -ne 0 ]; then
echo "⚠️ WARNING: Bootstrap (state 1) should have 0 deltas, found ${DELTA_COUNT}"
else
echo "✓ State 1 saved: base exists, 0 deltas"
fi
else
# Saved delta (state N+1)
if [ "${DELTA_COUNT}" -ne 1 ]; then
echo "⚠️ WARNING: State ${NEXT_STATE} expects 1 delta (inline cleanup), found ${DELTA_COUNT}"
echo "This might be OK if multiple builds ran concurrently"
else
echo "✓ State ${NEXT_STATE} saved: base + 1 delta (old deltas cleaned)"
fi
fi
echo ""
echo "=========================================="
echo "✅ State ${DETECTED_STATE} → ${NEXT_STATE} Complete!"
echo "=========================================="
echo ""
echo "Next commit will auto-detect state ${NEXT_STATE}"
echo ""
echo "Options:"
echo " # Normal (auto-advance)"
echo " git commit -m 'continue testing'"
echo ""
echo " # With assertion (validate state)"
echo " git commit -m 'test delta [state:${NEXT_STATE}]'"
echo ""
echo " # Clear cache and restart"
echo " git commit -m 'fresh start [ci-clear-cache]'"
echo ""
echo " # Jump to specific state"
echo " git commit -m 'jump to state 3 [start-state:3]'"

View File

@@ -1,182 +0,0 @@
name: Test OverlayFS Delta Extraction
on:
push:
branches: ["*"]
workflow_dispatch:
jobs:
test-overlayfs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
# - name: Test encrypted secrets (decrypt test message)
# run: |
# echo "========================================"
# echo "TESTING ENCRYPTED SECRETS"
# echo "========================================"
# echo ""
# echo "Decrypting test message from .github/secrets/test-message.gpg"
# echo "Using encryption key from GitHub Secrets..."
# echo ""
#
# # Decrypt using key from GitHub Secrets
# echo "${{ secrets.TEST_ENCRYPTION_KEY }}" | \
# gpg --batch --yes --passphrase-fd 0 \
# --decrypt .github/secrets/test-message.gpg
#
# echo ""
# echo "========================================"
# echo "If you see the success message above,"
# echo "then encrypted secrets work! 🎉"
# echo "========================================"
# echo ""
- name: Setup OverlayFS layers
run: |
echo "=== Creating directory structure ==="
mkdir -p /tmp/test/{base,delta,upper,work,merged}
echo "=== Creating base layer files ==="
echo "base file 1" > /tmp/test/base/file1.txt
echo "base file 2" > /tmp/test/base/file2.txt
echo "base file 3" > /tmp/test/base/file3.txt
mkdir -p /tmp/test/base/subdir
echo "base subdir file" > /tmp/test/base/subdir/file.txt
echo "=== Base layer contents ==="
find /tmp/test/base -type f -exec sh -c 'echo "{}:"; cat "{}"' \;
echo "=== Mounting OverlayFS ==="
sudo mount -t overlay overlay \
-o lowerdir=/tmp/test/base,upperdir=/tmp/test/upper,workdir=/tmp/test/work \
/tmp/test/merged
echo "=== Mounted successfully ==="
mount | grep overlay
- name: Verify merged view shows base files
run: |
echo "=== Contents of /merged (should show base files) ==="
ls -R /tmp/test/merged
find /tmp/test/merged -type f -exec sh -c 'echo "{}:"; cat "{}"' \;
- name: Make changes via merged layer
run: |
echo "=== Making changes via /merged ==="
# Overwrite existing file
echo "MODIFIED file 2" > /tmp/test/merged/file2.txt
echo "Modified file2.txt"
# Create new file
echo "NEW file 4" > /tmp/test/merged/file4.txt
echo "Created new file4.txt"
# Create new directory with file
mkdir -p /tmp/test/merged/newdir
echo "NEW file in new dir" > /tmp/test/merged/newdir/newfile.txt
echo "Created newdir/newfile.txt"
# Add file to existing directory
echo "NEW file in existing subdir" > /tmp/test/merged/subdir/newfile.txt
echo "Created subdir/newfile.txt"
echo "=== Changes complete ==="
- name: Show the delta (upperdir)
run: |
echo "========================================"
echo "THE DELTA (only changes in /upper):"
echo "========================================"
if [ -z "$(ls -A /tmp/test/upper)" ]; then
echo "Upper directory is empty - no changes detected"
else
echo "Upper directory structure:"
ls -R /tmp/test/upper
echo ""
echo "Upper directory files with content:"
find /tmp/test/upper -type f -exec sh -c 'echo "---"; echo "FILE: {}"; cat "{}"; echo ""' \;
echo "========================================"
echo "SIZE OF DELTA:"
du -sh /tmp/test/upper
echo "========================================"
fi
- name: Compare base vs upper vs merged
run: |
echo "========================================"
echo "COMPARISON:"
echo "========================================"
echo "BASE layer (original, untouched):"
ls -la /tmp/test/base/
echo ""
echo "UPPER layer (DELTA - only changes):"
ls -la /tmp/test/upper/
echo ""
echo "MERGED layer (unified view = base + upper):"
ls -la /tmp/test/merged/
echo ""
echo "========================================"
echo "PROOF: Upper dir contains ONLY the delta!"
echo "========================================"
- name: Simulate tarball creation (what we'd upload)
run: |
echo "=== Creating tarball of delta ==="
tar -czf /tmp/delta.tar.gz -C /tmp/test/upper .
echo "Delta tarball size:"
ls -lh /tmp/delta.tar.gz
echo ""
echo "Delta tarball contents:"
tar -tzf /tmp/delta.tar.gz
echo ""
echo "========================================"
echo "This is what we'd upload to S3/rsync!"
echo "Only ~few KB instead of entire cache!"
echo "========================================"
- name: Upload delta to S3 (actual test!)
env:
AWS_ACCESS_KEY_ID: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
run: |
echo "========================================"
echo "UPLOADING TO S3"
echo "========================================"
# Upload the delta tarball
aws s3 cp /tmp/delta.tar.gz \
s3://xahaud-github-actions-cache-niq/hello-world-first-test.tar.gz \
--region us-east-1
echo ""
echo "✅ Successfully uploaded to S3!"
echo "File: s3://xahaud-github-actions-cache-niq/hello-world-first-test.tar.gz"
echo ""
# Verify it exists
echo "Verifying upload..."
aws s3 ls s3://xahaud-github-actions-cache-niq/hello-world-first-test.tar.gz --region us-east-1
echo ""
echo "========================================"
echo "S3 upload test complete! 🚀"
echo "========================================"
- name: Cleanup
if: always()
run: |
echo "=== Unmounting OverlayFS ==="
sudo umount /tmp/test/merged || true

View File

@@ -2,7 +2,7 @@ name: Nix - GA Runner
on:
push:
branches: ["dev", "candidate", "release", "nd-experiment-overlayfs-2025-10-29"]
branches: ["dev", "candidate", "release"]
pull_request:
branches: ["dev", "candidate", "release"]
schedule:
@@ -156,7 +156,7 @@ jobs:
env:
build_dir: .build
# Bump this number to invalidate all caches globally.
CACHE_VERSION: 3
CACHE_VERSION: 2
MAIN_BRANCH_NAME: dev
steps:
- name: Checkout
@@ -237,6 +237,7 @@ jobs:
max_size: 2G
hash_dir: true
compiler_check: content
is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }}
- name: Configure Conan
run: |
@@ -293,8 +294,6 @@ jobs:
cache_version: ${{ env.CACHE_VERSION }}
main_branch: ${{ env.MAIN_BRANCH_NAME }}
stdlib: ${{ matrix.stdlib }}
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
- name: Build
uses: ./.github/actions/xahau-ga-build
@@ -309,8 +308,6 @@ jobs:
main_branch: ${{ env.MAIN_BRANCH_NAME }}
stdlib: ${{ matrix.stdlib }}
clang_gcc_toolchain: ${{ matrix.clang_gcc_toolchain || '' }}
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
- name: Set artifact name
id: set-artifact-name

View File

@@ -72,15 +72,15 @@ It generates many files of [results](results):
desired as described above. In a perfect repo, this file will be
empty.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../.github/workflows/levelization.yml.disabled) to validate
Github workflow](../../.github/workflows/levelization.yml) to validate
that nothing changed.
* [`ordering.txt`](results/ordering.txt): A list showing relationships
between modules where there are no loops as they actually exist, as
opposed to how they are desired as described above.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../.github/workflows/levelization.yml.disabled) to validate
Github workflow](../../.github/workflows/levelization.yml) to validate
that nothing changed.
* [`levelization.yml`](../../.github/workflows/levelization.yml.disabled)
* [`levelization.yml`](../../.github/workflows/levelization.yml)
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or

View File

@@ -1769,7 +1769,7 @@ pool.ntp.org
# Unless an absolute path is specified, it will be considered relative to the
# folder in which the xahaud.cfg file is located.
[validators_file]
validators.txt
validators-xahau.txt
# Turn down default logging to save disk space in the long run.
# Valid values here are trace, debug, info, warning, error, and fatal

View File

@@ -44,12 +44,15 @@ Cron::preflight(PreflightContext const& ctx)
auto const ret = preflight0(ctx);
if (!isTesSuccess(ret))
{
JLOG(ctx.j.fatal()) << "Cron: preflight failed";
return ret;
}
auto account = ctx.tx.getAccountID(sfAccount);
if (account != beast::zero)
{
JLOG(ctx.j.warn()) << "Cron: Bad source id";
JLOG(ctx.j.fatal()) << "Cron: Bad source id";
return temBAD_SRC_ACCOUNT;
}
@@ -57,21 +60,21 @@ Cron::preflight(PreflightContext const& ctx)
auto const fee = ctx.tx.getFieldAmount(sfFee);
if (!fee.native() || fee != beast::zero)
{
JLOG(ctx.j.warn()) << "Cron: invalid fee";
JLOG(ctx.j.fatal()) << "Cron: invalid fee";
return temBAD_FEE;
}
if (!ctx.tx.getSigningPubKey().empty() || !ctx.tx.getSignature().empty() ||
ctx.tx.isFieldPresent(sfSigners))
{
JLOG(ctx.j.warn()) << "Cron: Bad signature";
JLOG(ctx.j.fatal()) << "Cron: Bad signature";
return temBAD_SIGNATURE;
}
if (ctx.tx.getFieldU32(sfSequence) != 0 ||
ctx.tx.isFieldPresent(sfPreviousTxnID))
{
JLOG(ctx.j.warn()) << "Cron: Bad sequence";
JLOG(ctx.j.fatal()) << "Cron: Bad sequence";
return temBAD_SEQUENCE;
}
@@ -84,6 +87,12 @@ Cron::preclaim(PreclaimContext const& ctx)
if (!ctx.view.rules().enabled(featureCron))
return temDISABLED;
if (ctx.tx.getFieldU32(sfLedgerSequence) != ctx.view.info().seq)
{
JLOG(ctx.j.fatal()) << "Cron: wrong ledger sequence";
return tefFAILURE;
}
return tesSUCCESS;
}
@@ -105,7 +114,7 @@ Cron::doApply()
if (!sle->isFieldPresent(sfCron))
{
JLOG(j_.warn()) << "Cron: sfCron missing from account " << id;
JLOG(j_.fatal()) << "Cron: sfCron missing from account " << id;
return tefINTERNAL;
}
@@ -114,7 +123,7 @@ Cron::doApply()
auto sleCron = view.peek(klOld);
if (!sleCron)
{
JLOG(j_.warn()) << "Cron: Cron object missing for account " << id;
JLOG(j_.fatal()) << "Cron: Cron object missing for account " << id;
return tesSUCCESS;
}
@@ -126,7 +135,10 @@ Cron::doApply()
// do all this sanity checking before we modify the ledger...
uint32_t afterTime = lastStartTime + delay;
if (afterTime < lastStartTime)
{
JLOG(j_.fatal()) << "Cron: afterTime < lastStartTime";
return tefINTERNAL;
}
// in all circumstances the Cron object is deleted...
// if there are further crons to do then a new one is created at the next
@@ -134,7 +146,10 @@ Cron::doApply()
if (!view.dirRemove(
keylet::ownerDir(id), (*sleCron)[sfOwnerNode], klOld, false))
{
JLOG(j_.fatal()) << "Cron: Ownerdir bad. " << id;
return tefBAD_LEDGER;
}
view.erase(sleCron);
@@ -156,19 +171,22 @@ Cron::doApply()
auto const page =
view.dirInsert(keylet::ownerDir(id), klCron, describeOwnerDir(id));
if (!page)
{
JLOG(j_.fatal()) << "Cron: Ownerdir full. " << id;
return tecDIR_FULL;
}
sleCron = std::make_shared<SLE>(klCron);
auto newSleCron = std::make_shared<SLE>(klCron);
sleCron->setFieldU64(sfOwnerNode, *page);
sleCron->setFieldU32(sfDelaySeconds, delay);
sleCron->setFieldU32(sfRepeatCount, recur - 1);
sleCron->setFieldU32(sfStartTime, afterTime);
sleCron->setAccountID(sfOwner, id);
newSleCron->setFieldU64(sfOwnerNode, *page);
newSleCron->setFieldU32(sfDelaySeconds, delay);
newSleCron->setFieldU32(sfRepeatCount, recur - 1);
newSleCron->setFieldU32(sfStartTime, afterTime);
newSleCron->setAccountID(sfOwner, id);
sle->setFieldH256(sfCron, klCron.key);
view.insert(sleCron);
view.insert(newSleCron);
view.update(sle);
return tesSUCCESS;

View File

@@ -1106,30 +1106,32 @@ chooseLedgerEntryType(Json::Value const& params)
std::pair<RPC::Status, LedgerEntryType> result{RPC::Status::OK, ltANY};
if (params.isMember(jss::type))
{
static constexpr std::array<std::pair<char const*, LedgerEntryType>, 22>
types{
{{jss::account, ltACCOUNT_ROOT},
{jss::amendments, ltAMENDMENTS},
{jss::check, ltCHECK},
{jss::deposit_preauth, ltDEPOSIT_PREAUTH},
{jss::directory, ltDIR_NODE},
{jss::escrow, ltESCROW},
{jss::emitted_txn, ltEMITTED_TXN},
{jss::hook, ltHOOK},
{jss::hook_definition, ltHOOK_DEFINITION},
{jss::hook_state, ltHOOK_STATE},
{jss::fee, ltFEE_SETTINGS},
{jss::hashes, ltLEDGER_HASHES},
{jss::import_vlseq, ltIMPORT_VLSEQ},
{jss::offer, ltOFFER},
{jss::payment_channel, ltPAYCHAN},
{jss::uri_token, ltURI_TOKEN},
{jss::signer_list, ltSIGNER_LIST},
{jss::state, ltRIPPLE_STATE},
{jss::ticket, ltTICKET},
{jss::nft_offer, ltNFTOKEN_OFFER},
{jss::nft_page, ltNFTOKEN_PAGE},
{jss::unl_report, ltUNL_REPORT}}};
static constexpr std::array<std::pair<char const*, LedgerEntryType>, 23>
types{{
{jss::account, ltACCOUNT_ROOT},
{jss::amendments, ltAMENDMENTS},
{jss::check, ltCHECK},
{jss::deposit_preauth, ltDEPOSIT_PREAUTH},
{jss::directory, ltDIR_NODE},
{jss::escrow, ltESCROW},
{jss::emitted_txn, ltEMITTED_TXN},
{jss::hook, ltHOOK},
{jss::hook_definition, ltHOOK_DEFINITION},
{jss::hook_state, ltHOOK_STATE},
{jss::fee, ltFEE_SETTINGS},
{jss::hashes, ltLEDGER_HASHES},
{jss::import_vlseq, ltIMPORT_VLSEQ},
{jss::offer, ltOFFER},
{jss::payment_channel, ltPAYCHAN},
{jss::uri_token, ltURI_TOKEN},
{jss::signer_list, ltSIGNER_LIST},
{jss::state, ltRIPPLE_STATE},
{jss::ticket, ltTICKET},
{jss::nft_offer, ltNFTOKEN_OFFER},
{jss::nft_page, ltNFTOKEN_PAGE},
{jss::unl_report, ltUNL_REPORT},
{jss::cron, ltCRON},
}};
auto const& p = params[jss::type];
if (!p.isString())

View File

@@ -781,6 +781,22 @@ public:
auto const& hook = resp[jss::result][jss::account_objects][0u];
BEAST_EXPECT(hook[sfAccount.jsonName] == gw.human());
}
{
// Create a Cron
env(cron::set(gw),
cron::startTime(env.now().time_since_epoch().count() + 100),
cron::delay(100),
cron::repeat(200),
fee(XRP(1)));
env.close();
}
{
// Find the cron.
Json::Value const resp = acct_objs(gw, jss::cron);
BEAST_EXPECT(acct_objs_is_size(resp, 1));
auto const& cron = resp[jss::result][jss::account_objects][0u];
BEAST_EXPECT(cron[sfOwner.jsonName] == gw.human());
}
{
// See how "deletion_blockers_only" handles gw's directory.
Json::Value params;