mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 01:07:54 +00:00
Compare commits
7 Commits
fix-manife
...
ce7b1c4f1d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce7b1c4f1d | ||
|
|
e062dcae58 | ||
|
|
a9d284fec1 | ||
|
|
065d0c3e07 | ||
|
|
4fda40b709 | ||
|
|
6014356d91 | ||
|
|
d790f97430 |
282
.github/actions/xahau-actions-cache-restore/action.yml
vendored
Normal file
282
.github/actions/xahau-actions-cache-restore/action.yml
vendored
Normal file
@@ -0,0 +1,282 @@
|
|||||||
|
name: 'Xahau Cache Restore (S3 + OverlayFS)'
|
||||||
|
description: 'Drop-in replacement for actions/cache/restore using S3 and OverlayFS for delta caching'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
path:
|
||||||
|
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
|
||||||
|
required: true
|
||||||
|
key:
|
||||||
|
description: 'An explicit key for restoring the cache'
|
||||||
|
required: true
|
||||||
|
restore-keys:
|
||||||
|
description: 'An ordered list of prefix-matched keys to use for restoring stale cache if no cache hit occurred for key'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
s3-bucket:
|
||||||
|
description: 'S3 bucket name for cache storage'
|
||||||
|
required: false
|
||||||
|
default: 'xahaud-github-actions-cache-niq'
|
||||||
|
s3-region:
|
||||||
|
description: 'S3 region'
|
||||||
|
required: false
|
||||||
|
default: 'us-east-1'
|
||||||
|
fail-on-cache-miss:
|
||||||
|
description: 'Fail the workflow if cache entry is not found'
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
lookup-only:
|
||||||
|
description: 'Check if a cache entry exists for the given input(s) without downloading it'
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
use-deltas:
|
||||||
|
description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||||
|
aws-access-key-id:
|
||||||
|
description: 'AWS Access Key ID for S3 access'
|
||||||
|
required: true
|
||||||
|
aws-secret-access-key:
|
||||||
|
description: 'AWS Secret Access Key for S3 access'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
cache-hit:
|
||||||
|
description: 'A boolean value to indicate an exact match was found for the primary key'
|
||||||
|
value: ${{ steps.restore-cache.outputs.cache-hit }}
|
||||||
|
cache-primary-key:
|
||||||
|
description: 'The key that was used to restore the cache (may be from restore-keys)'
|
||||||
|
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
|
||||||
|
cache-matched-key:
|
||||||
|
description: 'The key that matched (same as cache-primary-key for compatibility)'
|
||||||
|
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: 'composite'
|
||||||
|
steps:
|
||||||
|
- name: Restore cache from S3 with OverlayFS
|
||||||
|
id: restore-cache
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
|
S3_BUCKET: ${{ inputs.s3-bucket }}
|
||||||
|
S3_REGION: ${{ inputs.s3-region }}
|
||||||
|
CACHE_KEY: ${{ inputs.key }}
|
||||||
|
RESTORE_KEYS: ${{ inputs.restore-keys }}
|
||||||
|
TARGET_PATH: ${{ inputs.path }}
|
||||||
|
FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }}
|
||||||
|
LOOKUP_ONLY: ${{ inputs.lookup-only }}
|
||||||
|
USE_DELTAS: ${{ inputs.use-deltas }}
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Xahau Cache Restore (S3 + OverlayFS)"
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Target path: ${TARGET_PATH}"
|
||||||
|
echo "Primary key: ${CACHE_KEY}"
|
||||||
|
echo "S3 bucket: s3://${S3_BUCKET}"
|
||||||
|
echo "Use deltas: ${USE_DELTAS}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Generate unique cache workspace
|
||||||
|
CACHE_HASH=$(echo "${CACHE_KEY}" | md5sum | cut -d' ' -f1)
|
||||||
|
CACHE_WORKSPACE="/tmp/xahau-cache-${CACHE_HASH}"
|
||||||
|
|
||||||
|
echo "Cache workspace: ${CACHE_WORKSPACE}"
|
||||||
|
|
||||||
|
# Create OverlayFS directory structure
|
||||||
|
mkdir -p "${CACHE_WORKSPACE}"/{base,upper,work,merged}
|
||||||
|
|
||||||
|
# Function to try downloading from S3
|
||||||
|
try_restore_key() {
|
||||||
|
local try_key="$1"
|
||||||
|
local s3_base="s3://${S3_BUCKET}/${try_key}-base.tar.zst"
|
||||||
|
|
||||||
|
echo "Trying cache key: ${try_key}"
|
||||||
|
|
||||||
|
# Check if base exists (one base per key, immutable)
|
||||||
|
echo "Checking for base layer..."
|
||||||
|
if aws s3 ls "${s3_base}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||||
|
echo "✓ Found base layer: ${s3_base}"
|
||||||
|
|
||||||
|
if [ "${LOOKUP_ONLY}" = "true" ]; then
|
||||||
|
echo "Lookup-only mode: cache exists, skipping download"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download base layer
|
||||||
|
echo "Downloading base layer..."
|
||||||
|
aws s3 cp "${s3_base}" /tmp/cache-base.tar.zst --region "${S3_REGION}" --quiet
|
||||||
|
|
||||||
|
# Extract base layer
|
||||||
|
echo "Extracting base layer..."
|
||||||
|
tar -xf /tmp/cache-base.tar.zst -C "${CACHE_WORKSPACE}/base"
|
||||||
|
rm /tmp/cache-base.tar.zst
|
||||||
|
|
||||||
|
# Query for latest timestamped delta (only if use-deltas enabled)
|
||||||
|
if [ "${USE_DELTAS}" = "true" ]; then
|
||||||
|
echo "Querying for latest delta..."
|
||||||
|
LATEST_DELTA=$(aws s3api list-objects-v2 \
|
||||||
|
--bucket "${S3_BUCKET}" \
|
||||||
|
--prefix "${try_key}-delta-" \
|
||||||
|
--region "${S3_REGION}" \
|
||||||
|
--query 'sort_by(Contents, &LastModified)[-1].Key' \
|
||||||
|
--output text 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -n "${LATEST_DELTA}" ] && [ "${LATEST_DELTA}" != "None" ]; then
|
||||||
|
echo "✓ Found latest delta: ${LATEST_DELTA}"
|
||||||
|
echo "Downloading delta layer..."
|
||||||
|
aws s3 cp "s3://${S3_BUCKET}/${LATEST_DELTA}" /tmp/cache-delta.tar.zst --region "${S3_REGION}" --quiet
|
||||||
|
|
||||||
|
echo "Extracting delta layer..."
|
||||||
|
tar -xf /tmp/cache-delta.tar.zst -C "${CACHE_WORKSPACE}/upper" 2>/dev/null || true
|
||||||
|
rm /tmp/cache-delta.tar.zst
|
||||||
|
else
|
||||||
|
echo "ℹ No delta layer found (this is fine for first build)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "ℹ Delta caching disabled (use-deltas: false)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo "✗ No base layer found for key: ${try_key}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Try primary key first
|
||||||
|
MATCHED_KEY=""
|
||||||
|
EXACT_MATCH="false"
|
||||||
|
|
||||||
|
if try_restore_key "${CACHE_KEY}"; then
|
||||||
|
MATCHED_KEY="${CACHE_KEY}"
|
||||||
|
EXACT_MATCH="true"
|
||||||
|
echo ""
|
||||||
|
echo "🎯 Exact cache hit for key: ${CACHE_KEY}"
|
||||||
|
else
|
||||||
|
# Try restore-keys (prefix matching)
|
||||||
|
if [ -n "${RESTORE_KEYS}" ]; then
|
||||||
|
echo ""
|
||||||
|
echo "Primary key not found, trying restore-keys..."
|
||||||
|
|
||||||
|
# Split restore-keys by newline
|
||||||
|
while IFS= read -r restore_key; do
|
||||||
|
# Skip empty lines
|
||||||
|
[ -z "${restore_key}" ] && continue
|
||||||
|
|
||||||
|
# Trim whitespace
|
||||||
|
restore_key=$(echo "${restore_key}" | xargs)
|
||||||
|
|
||||||
|
if try_restore_key "${restore_key}"; then
|
||||||
|
MATCHED_KEY="${restore_key}"
|
||||||
|
EXACT_MATCH="false"
|
||||||
|
echo ""
|
||||||
|
echo "✓ Cache restored from fallback key: ${restore_key}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done <<< "${RESTORE_KEYS}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if we found anything
|
||||||
|
if [ -z "${MATCHED_KEY}" ]; then
|
||||||
|
echo ""
|
||||||
|
echo "❌ No cache found for key: ${CACHE_KEY}"
|
||||||
|
echo "This is BOOTSTRAP mode - first build for this cache key"
|
||||||
|
|
||||||
|
if [ "${FAIL_ON_MISS}" = "true" ]; then
|
||||||
|
echo "fail-on-cache-miss is enabled, failing workflow"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set outputs for cache miss
|
||||||
|
echo "cache-hit=false" >> $GITHUB_OUTPUT
|
||||||
|
echo "cache-primary-key=" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Create empty cache directory for bootstrap
|
||||||
|
mkdir -p "${TARGET_PATH}"
|
||||||
|
|
||||||
|
# Record bootstrap mode for save action
|
||||||
|
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
|
||||||
|
# For bootstrap: workspace="bootstrap", matched_key=primary_key, exact_match=false
|
||||||
|
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
|
||||||
|
echo "${TARGET_PATH}:bootstrap:${CACHE_KEY}:${CACHE_KEY}:false:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Cache restore completed (bootstrap mode)"
|
||||||
|
echo "Created empty cache directory: ${TARGET_PATH}"
|
||||||
|
echo "=========================================="
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If lookup-only, we're done
|
||||||
|
if [ "${LOOKUP_ONLY}" = "true" ]; then
|
||||||
|
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||||
|
echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Clean up workspace
|
||||||
|
rm -rf "${CACHE_WORKSPACE}"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Cache lookup completed (lookup-only mode)"
|
||||||
|
echo "=========================================="
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount OverlayFS
|
||||||
|
echo ""
|
||||||
|
echo "Mounting OverlayFS..."
|
||||||
|
sudo mount -t overlay overlay \
|
||||||
|
-o lowerdir="${CACHE_WORKSPACE}/base",upperdir="${CACHE_WORKSPACE}/upper",workdir="${CACHE_WORKSPACE}/work" \
|
||||||
|
"${CACHE_WORKSPACE}/merged"
|
||||||
|
|
||||||
|
# Verify mount
|
||||||
|
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||||
|
echo "✓ OverlayFS mounted successfully"
|
||||||
|
else
|
||||||
|
echo "❌ Failed to mount OverlayFS"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create target directory parent if needed
|
||||||
|
TARGET_PARENT=$(dirname "${TARGET_PATH}")
|
||||||
|
mkdir -p "${TARGET_PARENT}"
|
||||||
|
|
||||||
|
# Remove existing target if it exists
|
||||||
|
if [ -e "${TARGET_PATH}" ]; then
|
||||||
|
echo "Removing existing target: ${TARGET_PATH}"
|
||||||
|
rm -rf "${TARGET_PATH}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Symlink target path to merged view
|
||||||
|
echo "Creating symlink: ${TARGET_PATH} -> ${CACHE_WORKSPACE}/merged"
|
||||||
|
ln -s "${CACHE_WORKSPACE}/merged" "${TARGET_PATH}"
|
||||||
|
|
||||||
|
# Save mount info for cleanup/save later
|
||||||
|
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
|
||||||
|
# This tells save action whether to create new base (partial match) or just delta (exact match)
|
||||||
|
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
|
||||||
|
echo "${TARGET_PATH}:${CACHE_WORKSPACE}:${MATCHED_KEY}:${CACHE_KEY}:${EXACT_MATCH}:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
|
||||||
|
|
||||||
|
# Set outputs
|
||||||
|
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||||
|
echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Show statistics
|
||||||
|
echo ""
|
||||||
|
echo "Cache statistics:"
|
||||||
|
echo " Base layer size: $(du -sh ${CACHE_WORKSPACE}/base 2>/dev/null | cut -f1 || echo '0')"
|
||||||
|
echo " Delta layer size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1 || echo '0')"
|
||||||
|
echo " Merged view size: $(du -sh ${CACHE_WORKSPACE}/merged 2>/dev/null | cut -f1 || echo '0')"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Cache restore completed successfully"
|
||||||
|
echo "Exact match: ${EXACT_MATCH}"
|
||||||
|
echo "Matched key: ${MATCHED_KEY}"
|
||||||
|
echo "=========================================="
|
||||||
342
.github/actions/xahau-actions-cache-save/action.yml
vendored
Normal file
342
.github/actions/xahau-actions-cache-save/action.yml
vendored
Normal file
@@ -0,0 +1,342 @@
|
|||||||
|
name: 'Xahau Cache Save (S3 + OverlayFS)'
|
||||||
|
description: 'Drop-in replacement for actions/cache/save using S3 and OverlayFS for delta caching'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
path:
|
||||||
|
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
|
||||||
|
required: true
|
||||||
|
key:
|
||||||
|
description: 'An explicit key for saving the cache'
|
||||||
|
required: true
|
||||||
|
s3-bucket:
|
||||||
|
description: 'S3 bucket name for cache storage'
|
||||||
|
required: false
|
||||||
|
default: 'xahaud-github-actions-cache-niq'
|
||||||
|
s3-region:
|
||||||
|
description: 'S3 region'
|
||||||
|
required: false
|
||||||
|
default: 'us-east-1'
|
||||||
|
use-deltas:
|
||||||
|
description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
|
||||||
|
required: false
|
||||||
|
default: 'true'
|
||||||
|
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||||
|
aws-access-key-id:
|
||||||
|
description: 'AWS Access Key ID for S3 access'
|
||||||
|
required: true
|
||||||
|
aws-secret-access-key:
|
||||||
|
description: 'AWS Secret Access Key for S3 access'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: 'composite'
|
||||||
|
steps:
|
||||||
|
- name: Save cache to S3 with OverlayFS delta
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
||||||
|
S3_BUCKET: ${{ inputs.s3-bucket }}
|
||||||
|
S3_REGION: ${{ inputs.s3-region }}
|
||||||
|
CACHE_KEY: ${{ inputs.key }}
|
||||||
|
TARGET_PATH: ${{ inputs.path }}
|
||||||
|
USE_DELTAS: ${{ inputs.use-deltas }}
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Xahau Cache Save (S3 + OverlayFS)"
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Target path: ${TARGET_PATH}"
|
||||||
|
echo "Cache key: ${CACHE_KEY}"
|
||||||
|
echo "S3 bucket: s3://${S3_BUCKET}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Find the cache workspace from mount registry
|
||||||
|
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
|
||||||
|
|
||||||
|
if [ ! -f "${MOUNT_REGISTRY}" ]; then
|
||||||
|
echo "⚠️ No cache mounts found (mount registry doesn't exist)"
|
||||||
|
echo "This usually means cache restore was not called, or there was no cache to restore."
|
||||||
|
echo "Skipping cache save."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Find entry for this path
|
||||||
|
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
|
||||||
|
# Bootstrap mode: path:bootstrap:key:key:false:true/false (workspace="bootstrap")
|
||||||
|
CACHE_WORKSPACE=""
|
||||||
|
MATCHED_KEY=""
|
||||||
|
PRIMARY_KEY=""
|
||||||
|
EXACT_MATCH=""
|
||||||
|
REGISTRY_USE_DELTAS=""
|
||||||
|
|
||||||
|
while IFS=: read -r mount_path mount_workspace mount_matched_key mount_primary_key mount_exact_match mount_use_deltas; do
|
||||||
|
if [ "${mount_path}" = "${TARGET_PATH}" ]; then
|
||||||
|
CACHE_WORKSPACE="${mount_workspace}"
|
||||||
|
MATCHED_KEY="${mount_matched_key}"
|
||||||
|
PRIMARY_KEY="${mount_primary_key}"
|
||||||
|
EXACT_MATCH="${mount_exact_match}"
|
||||||
|
REGISTRY_USE_DELTAS="${mount_use_deltas}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done < "${MOUNT_REGISTRY}"
|
||||||
|
|
||||||
|
if [ -z "${CACHE_WORKSPACE}" ] && [ -z "${MATCHED_KEY}" ]; then
|
||||||
|
echo "⚠️ No cache entry found for path: ${TARGET_PATH}"
|
||||||
|
echo "This usually means cache restore was not called for this path."
|
||||||
|
echo "Skipping cache save."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine cache mode
|
||||||
|
if [ "${CACHE_WORKSPACE}" = "bootstrap" ]; then
|
||||||
|
CACHE_MODE="bootstrap"
|
||||||
|
PRIMARY_KEY="${MATCHED_KEY}" # In bootstrap, matched_key field contains primary key
|
||||||
|
echo "Cache mode: BOOTSTRAP (first build for this key)"
|
||||||
|
echo "Primary key: ${PRIMARY_KEY}"
|
||||||
|
elif [ "${EXACT_MATCH}" = "false" ]; then
|
||||||
|
CACHE_MODE="partial-match"
|
||||||
|
echo "Cache mode: PARTIAL MATCH (restore-key used)"
|
||||||
|
echo "Cache workspace: ${CACHE_WORKSPACE}"
|
||||||
|
echo "Matched key from restore: ${MATCHED_KEY}"
|
||||||
|
echo "Primary key (will save new base): ${PRIMARY_KEY}"
|
||||||
|
else
|
||||||
|
CACHE_MODE="exact-match"
|
||||||
|
echo "Cache mode: EXACT MATCH (cache hit)"
|
||||||
|
echo "Cache workspace: ${CACHE_WORKSPACE}"
|
||||||
|
echo "Matched key: ${MATCHED_KEY}"
|
||||||
|
fi
|
||||||
|
echo "Use deltas: ${REGISTRY_USE_DELTAS}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Handle different cache modes
|
||||||
|
if [ "${CACHE_MODE}" = "bootstrap" ]; then
|
||||||
|
# Bootstrap: Save entire cache as base layer (no OverlayFS was used)
|
||||||
|
echo "Bootstrap mode: Creating initial base layer from ${TARGET_PATH}"
|
||||||
|
|
||||||
|
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
|
||||||
|
echo "Creating base tarball..."
|
||||||
|
tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
|
||||||
|
|
||||||
|
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
|
||||||
|
echo "✓ Base tarball created: ${BASE_SIZE}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Use static base name (one base per key, immutable)
|
||||||
|
S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
|
||||||
|
|
||||||
|
# Check if base already exists (immutability - first write wins)
|
||||||
|
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||||
|
echo "⚠️ Base layer already exists: ${S3_BASE_KEY}"
|
||||||
|
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
|
||||||
|
else
|
||||||
|
echo "Uploading base layer to S3..."
|
||||||
|
echo " Key: ${PRIMARY_KEY}-base.tar.zst"
|
||||||
|
|
||||||
|
aws s3 cp "${BASE_TARBALL}" "${S3_BASE_KEY}" \
|
||||||
|
--region "${S3_REGION}" \
|
||||||
|
--tagging "type=base" \
|
||||||
|
--quiet
|
||||||
|
|
||||||
|
echo "✓ Uploaded: ${S3_BASE_KEY}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
rm -f "${BASE_TARBALL}"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Bootstrap cache save completed"
|
||||||
|
echo "Base size: ${BASE_SIZE}"
|
||||||
|
echo "Cache key: ${PRIMARY_KEY}"
|
||||||
|
echo "=========================================="
|
||||||
|
exit 0
|
||||||
|
|
||||||
|
elif [ "${CACHE_MODE}" = "partial-match" ]; then
|
||||||
|
# Partial match: Save merged view as new base ONLY (no delta)
|
||||||
|
# The delta is relative to the OLD base, not the NEW base we're creating
|
||||||
|
echo "Partial match mode: Saving new base layer for primary key"
|
||||||
|
echo "Note: Delta will NOT be saved (it's relative to old base)"
|
||||||
|
|
||||||
|
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
|
||||||
|
echo "Creating base tarball from merged view..."
|
||||||
|
tar -cf - -C "${CACHE_WORKSPACE}/merged" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
|
||||||
|
|
||||||
|
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
|
||||||
|
echo "✓ Base tarball created: ${BASE_SIZE}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Use static base name (one base per key, immutable)
|
||||||
|
S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
|
||||||
|
|
||||||
|
# Check if base already exists (immutability - first write wins)
|
||||||
|
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||||
|
echo "⚠️ Base layer already exists: ${S3_BASE_KEY}"
|
||||||
|
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
|
||||||
|
else
|
||||||
|
echo "Uploading new base layer to S3..."
|
||||||
|
echo " Key: ${PRIMARY_KEY}-base.tar.zst"
|
||||||
|
|
||||||
|
aws s3 cp "${BASE_TARBALL}" "${S3_BASE_KEY}" \
|
||||||
|
--region "${S3_REGION}" \
|
||||||
|
--tagging "type=base" \
|
||||||
|
--quiet
|
||||||
|
|
||||||
|
echo "✓ Uploaded: ${S3_BASE_KEY}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
rm -f "${BASE_TARBALL}"
|
||||||
|
|
||||||
|
# Unmount and cleanup
|
||||||
|
echo ""
|
||||||
|
echo "Cleaning up..."
|
||||||
|
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||||
|
sudo umount "${CACHE_WORKSPACE}/merged" || {
|
||||||
|
echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
|
||||||
|
echo "Attempting lazy unmount..."
|
||||||
|
sudo umount -l "${CACHE_WORKSPACE}/merged" || true
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
rm -rf "${CACHE_WORKSPACE}"
|
||||||
|
|
||||||
|
# Remove from registry
|
||||||
|
if [ -f "${MOUNT_REGISTRY}" ]; then
|
||||||
|
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
|
||||||
|
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✓ Cleanup completed"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Partial match cache save completed"
|
||||||
|
echo "New base created for: ${PRIMARY_KEY}"
|
||||||
|
echo "Base size: ${BASE_SIZE}"
|
||||||
|
if [ "${REGISTRY_USE_DELTAS}" = "true" ]; then
|
||||||
|
echo "Next exact-match build will create deltas from this base"
|
||||||
|
else
|
||||||
|
echo "Next exact-match build will reuse this base (base-only mode)"
|
||||||
|
fi
|
||||||
|
echo "=========================================="
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# For exact-match ONLY: Save delta (if use-deltas enabled)
|
||||||
|
if [ "${CACHE_MODE}" = "exact-match" ]; then
|
||||||
|
# If deltas are disabled, just cleanup and exit
|
||||||
|
if [ "${REGISTRY_USE_DELTAS}" != "true" ]; then
|
||||||
|
echo "ℹ️ Delta caching disabled (use-deltas: false)"
|
||||||
|
echo "Base already exists for this key, nothing to save."
|
||||||
|
|
||||||
|
# Unmount and cleanup
|
||||||
|
echo ""
|
||||||
|
echo "Cleaning up..."
|
||||||
|
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||||
|
sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
rm -rf "${CACHE_WORKSPACE}"
|
||||||
|
|
||||||
|
# Remove from registry
|
||||||
|
if [ -f "${MOUNT_REGISTRY}" ]; then
|
||||||
|
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
|
||||||
|
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Cache save completed (base-only mode)"
|
||||||
|
echo "=========================================="
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if upper layer has any changes
|
||||||
|
if [ -z "$(ls -A ${CACHE_WORKSPACE}/upper 2>/dev/null)" ]; then
|
||||||
|
echo "ℹ️ No changes detected in upper layer (cache is unchanged)"
|
||||||
|
echo "Skipping delta upload to save bandwidth."
|
||||||
|
|
||||||
|
# Still unmount and cleanup
|
||||||
|
echo ""
|
||||||
|
echo "Cleaning up..."
|
||||||
|
sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
|
||||||
|
rm -rf "${CACHE_WORKSPACE}"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Cache save completed (no changes)"
|
||||||
|
echo "=========================================="
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show delta statistics
|
||||||
|
echo "Delta layer statistics:"
|
||||||
|
echo " Files changed: $(find ${CACHE_WORKSPACE}/upper -type f 2>/dev/null | wc -l)"
|
||||||
|
echo " Delta size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create delta tarball from upper layer
|
||||||
|
echo "Creating delta tarball..."
|
||||||
|
DELTA_TARBALL="/tmp/xahau-cache-delta-$$.tar.zst"
|
||||||
|
|
||||||
|
tar -cf - -C "${CACHE_WORKSPACE}/upper" . | zstd -3 -T0 -q -o "${DELTA_TARBALL}"
|
||||||
|
|
||||||
|
DELTA_SIZE=$(du -h "${DELTA_TARBALL}" | cut -f1)
|
||||||
|
echo "✓ Delta tarball created: ${DELTA_SIZE}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Upload timestamped delta (no overwrites = zero concurrency issues)
|
||||||
|
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||||
|
COMMIT_SHA=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
||||||
|
|
||||||
|
# Use PRIMARY_KEY for delta (ensures deltas match their base)
|
||||||
|
S3_DELTA_TIMESTAMPED="s3://${S3_BUCKET}/${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
|
||||||
|
|
||||||
|
echo "Uploading timestamped delta to S3..."
|
||||||
|
echo " Key: ${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
|
||||||
|
|
||||||
|
# Upload with tag for auto-deletion after 7 days
|
||||||
|
aws s3 cp "${DELTA_TARBALL}" "${S3_DELTA_TIMESTAMPED}" \
|
||||||
|
--region "${S3_REGION}" \
|
||||||
|
--tagging "type=delta-archive" \
|
||||||
|
--quiet
|
||||||
|
|
||||||
|
echo "✓ Uploaded: ${S3_DELTA_TIMESTAMPED}"
|
||||||
|
echo " (tagged for auto-deletion after 7 days)"
|
||||||
|
|
||||||
|
# Cleanup delta tarball
|
||||||
|
rm -f "${DELTA_TARBALL}"
|
||||||
|
|
||||||
|
# Cleanup: Unmount OverlayFS and remove workspace
|
||||||
|
echo ""
|
||||||
|
echo "Cleaning up..."
|
||||||
|
|
||||||
|
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||||
|
sudo umount "${CACHE_WORKSPACE}/merged" || {
|
||||||
|
echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
|
||||||
|
echo "Attempting lazy unmount..."
|
||||||
|
sudo umount -l "${CACHE_WORKSPACE}/merged" || true
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove workspace
|
||||||
|
rm -rf "${CACHE_WORKSPACE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove from registry
|
||||||
|
if [ -f "${MOUNT_REGISTRY}" ]; then
|
||||||
|
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
|
||||||
|
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✓ Cleanup completed"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Cache save completed successfully"
|
||||||
|
echo "Mode: ${CACHE_MODE}"
|
||||||
|
echo "Cache key: ${PRIMARY_KEY}"
|
||||||
|
if [ -n "${DELTA_SIZE:-}" ]; then
|
||||||
|
echo "Delta size: ${DELTA_SIZE}"
|
||||||
|
fi
|
||||||
|
echo "=========================================="
|
||||||
182
.github/workflows/test-overlayfs-delta.yml
vendored
Normal file
182
.github/workflows/test-overlayfs-delta.yml
vendored
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
name: Test OverlayFS Delta Extraction
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["*"]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-overlayfs:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# - name: Test encrypted secrets (decrypt test message)
|
||||||
|
# run: |
|
||||||
|
# echo "========================================"
|
||||||
|
# echo "TESTING ENCRYPTED SECRETS"
|
||||||
|
# echo "========================================"
|
||||||
|
# echo ""
|
||||||
|
# echo "Decrypting test message from .github/secrets/test-message.gpg"
|
||||||
|
# echo "Using encryption key from GitHub Secrets..."
|
||||||
|
# echo ""
|
||||||
|
#
|
||||||
|
# # Decrypt using key from GitHub Secrets
|
||||||
|
# echo "${{ secrets.TEST_ENCRYPTION_KEY }}" | \
|
||||||
|
# gpg --batch --yes --passphrase-fd 0 \
|
||||||
|
# --decrypt .github/secrets/test-message.gpg
|
||||||
|
#
|
||||||
|
# echo ""
|
||||||
|
# echo "========================================"
|
||||||
|
# echo "If you see the success message above,"
|
||||||
|
# echo "then encrypted secrets work! 🎉"
|
||||||
|
# echo "========================================"
|
||||||
|
# echo ""
|
||||||
|
|
||||||
|
- name: Setup OverlayFS layers
|
||||||
|
run: |
|
||||||
|
echo "=== Creating directory structure ==="
|
||||||
|
mkdir -p /tmp/test/{base,delta,upper,work,merged}
|
||||||
|
|
||||||
|
echo "=== Creating base layer files ==="
|
||||||
|
echo "base file 1" > /tmp/test/base/file1.txt
|
||||||
|
echo "base file 2" > /tmp/test/base/file2.txt
|
||||||
|
echo "base file 3" > /tmp/test/base/file3.txt
|
||||||
|
mkdir -p /tmp/test/base/subdir
|
||||||
|
echo "base subdir file" > /tmp/test/base/subdir/file.txt
|
||||||
|
|
||||||
|
echo "=== Base layer contents ==="
|
||||||
|
find /tmp/test/base -type f -exec sh -c 'echo "{}:"; cat "{}"' \;
|
||||||
|
|
||||||
|
echo "=== Mounting OverlayFS ==="
|
||||||
|
sudo mount -t overlay overlay \
|
||||||
|
-o lowerdir=/tmp/test/base,upperdir=/tmp/test/upper,workdir=/tmp/test/work \
|
||||||
|
/tmp/test/merged
|
||||||
|
|
||||||
|
echo "=== Mounted successfully ==="
|
||||||
|
mount | grep overlay
|
||||||
|
|
||||||
|
- name: Verify merged view shows base files
|
||||||
|
run: |
|
||||||
|
echo "=== Contents of /merged (should show base files) ==="
|
||||||
|
ls -R /tmp/test/merged
|
||||||
|
find /tmp/test/merged -type f -exec sh -c 'echo "{}:"; cat "{}"' \;
|
||||||
|
|
||||||
|
- name: Make changes via merged layer
|
||||||
|
run: |
|
||||||
|
echo "=== Making changes via /merged ==="
|
||||||
|
|
||||||
|
# Overwrite existing file
|
||||||
|
echo "MODIFIED file 2" > /tmp/test/merged/file2.txt
|
||||||
|
echo "Modified file2.txt"
|
||||||
|
|
||||||
|
# Create new file
|
||||||
|
echo "NEW file 4" > /tmp/test/merged/file4.txt
|
||||||
|
echo "Created new file4.txt"
|
||||||
|
|
||||||
|
# Create new directory with file
|
||||||
|
mkdir -p /tmp/test/merged/newdir
|
||||||
|
echo "NEW file in new dir" > /tmp/test/merged/newdir/newfile.txt
|
||||||
|
echo "Created newdir/newfile.txt"
|
||||||
|
|
||||||
|
# Add file to existing directory
|
||||||
|
echo "NEW file in existing subdir" > /tmp/test/merged/subdir/newfile.txt
|
||||||
|
echo "Created subdir/newfile.txt"
|
||||||
|
|
||||||
|
echo "=== Changes complete ==="
|
||||||
|
|
||||||
|
- name: Show the delta (upperdir)
|
||||||
|
run: |
|
||||||
|
echo "========================================"
|
||||||
|
echo "THE DELTA (only changes in /upper):"
|
||||||
|
echo "========================================"
|
||||||
|
|
||||||
|
if [ -z "$(ls -A /tmp/test/upper)" ]; then
|
||||||
|
echo "Upper directory is empty - no changes detected"
|
||||||
|
else
|
||||||
|
echo "Upper directory structure:"
|
||||||
|
ls -R /tmp/test/upper
|
||||||
|
echo ""
|
||||||
|
echo "Upper directory files with content:"
|
||||||
|
find /tmp/test/upper -type f -exec sh -c 'echo "---"; echo "FILE: {}"; cat "{}"; echo ""' \;
|
||||||
|
|
||||||
|
echo "========================================"
|
||||||
|
echo "SIZE OF DELTA:"
|
||||||
|
du -sh /tmp/test/upper
|
||||||
|
echo "========================================"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Compare base vs upper vs merged
|
||||||
|
run: |
|
||||||
|
echo "========================================"
|
||||||
|
echo "COMPARISON:"
|
||||||
|
echo "========================================"
|
||||||
|
|
||||||
|
echo "BASE layer (original, untouched):"
|
||||||
|
ls -la /tmp/test/base/
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "UPPER layer (DELTA - only changes):"
|
||||||
|
ls -la /tmp/test/upper/
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "MERGED layer (unified view = base + upper):"
|
||||||
|
ls -la /tmp/test/merged/
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "========================================"
|
||||||
|
echo "PROOF: Upper dir contains ONLY the delta!"
|
||||||
|
echo "========================================"
|
||||||
|
|
||||||
|
- name: Simulate tarball creation (what we'd upload)
|
||||||
|
run: |
|
||||||
|
echo "=== Creating tarball of delta ==="
|
||||||
|
tar -czf /tmp/delta.tar.gz -C /tmp/test/upper .
|
||||||
|
|
||||||
|
echo "Delta tarball size:"
|
||||||
|
ls -lh /tmp/delta.tar.gz
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Delta tarball contents:"
|
||||||
|
tar -tzf /tmp/delta.tar.gz
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "========================================"
|
||||||
|
echo "This is what we'd upload to S3/rsync!"
|
||||||
|
echo "Only ~few KB instead of entire cache!"
|
||||||
|
echo "========================================"
|
||||||
|
|
||||||
|
- name: Upload delta to S3 (actual test!)
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||||
|
run: |
|
||||||
|
echo "========================================"
|
||||||
|
echo "UPLOADING TO S3"
|
||||||
|
echo "========================================"
|
||||||
|
|
||||||
|
# Upload the delta tarball
|
||||||
|
aws s3 cp /tmp/delta.tar.gz \
|
||||||
|
s3://xahaud-github-actions-cache-niq/hello-world-first-test.tar.gz \
|
||||||
|
--region us-east-1
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✅ Successfully uploaded to S3!"
|
||||||
|
echo "File: s3://xahaud-github-actions-cache-niq/hello-world-first-test.tar.gz"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Verify it exists
|
||||||
|
echo "Verifying upload..."
|
||||||
|
aws s3 ls s3://xahaud-github-actions-cache-niq/hello-world-first-test.tar.gz --region us-east-1
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "========================================"
|
||||||
|
echo "S3 upload test complete! 🚀"
|
||||||
|
echo "========================================"
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo "=== Unmounting OverlayFS ==="
|
||||||
|
sudo umount /tmp/test/merged || true
|
||||||
@@ -72,15 +72,15 @@ It generates many files of [results](results):
|
|||||||
desired as described above. In a perfect repo, this file will be
|
desired as described above. In a perfect repo, this file will be
|
||||||
empty.
|
empty.
|
||||||
This file is committed to the repo, and is used by the [levelization
|
This file is committed to the repo, and is used by the [levelization
|
||||||
Github workflow](../../.github/workflows/levelization.yml) to validate
|
Github workflow](../../.github/workflows/levelization.yml.disabled) to validate
|
||||||
that nothing changed.
|
that nothing changed.
|
||||||
* [`ordering.txt`](results/ordering.txt): A list showing relationships
|
* [`ordering.txt`](results/ordering.txt): A list showing relationships
|
||||||
between modules where there are no loops as they actually exist, as
|
between modules where there are no loops as they actually exist, as
|
||||||
opposed to how they are desired as described above.
|
opposed to how they are desired as described above.
|
||||||
This file is committed to the repo, and is used by the [levelization
|
This file is committed to the repo, and is used by the [levelization
|
||||||
Github workflow](../../.github/workflows/levelization.yml) to validate
|
Github workflow](../../.github/workflows/levelization.yml.disabled) to validate
|
||||||
that nothing changed.
|
that nothing changed.
|
||||||
* [`levelization.yml`](../../.github/workflows/levelization.yml)
|
* [`levelization.yml`](../../.github/workflows/levelization.yml.disabled)
|
||||||
Github Actions workflow to test that levelization loops haven't
|
Github Actions workflow to test that levelization loops haven't
|
||||||
changed. Unfortunately, if changes are detected, it can't tell if
|
changed. Unfortunately, if changes are detected, it can't tell if
|
||||||
they are improvements or not, so if you have resolved any issues or
|
they are improvements or not, so if you have resolved any issues or
|
||||||
|
|||||||
@@ -1769,7 +1769,7 @@ pool.ntp.org
|
|||||||
# Unless an absolute path is specified, it will be considered relative to the
|
# Unless an absolute path is specified, it will be considered relative to the
|
||||||
# folder in which the xahaud.cfg file is located.
|
# folder in which the xahaud.cfg file is located.
|
||||||
[validators_file]
|
[validators_file]
|
||||||
validators-xahau.txt
|
validators.txt
|
||||||
|
|
||||||
# Turn down default logging to save disk space in the long run.
|
# Turn down default logging to save disk space in the long run.
|
||||||
# Valid values here are trace, debug, info, warning, error, and fatal
|
# Valid values here are trace, debug, info, warning, error, and fatal
|
||||||
|
|||||||
@@ -471,10 +471,6 @@ ManifestCache::applyManifest(Manifest m)
|
|||||||
|
|
||||||
auto masterKey = m.masterKey;
|
auto masterKey = m.masterKey;
|
||||||
map_.emplace(std::move(masterKey), std::move(m));
|
map_.emplace(std::move(masterKey), std::move(m));
|
||||||
|
|
||||||
// Increment sequence to invalidate cached manifest messages
|
|
||||||
seq_++;
|
|
||||||
|
|
||||||
return ManifestDisposition::accepted;
|
return ManifestDisposition::accepted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1482,13 +1482,9 @@ TxQ::accept(Application& app, OpenView& view)
|
|||||||
{
|
{
|
||||||
uint32_t currentTime =
|
uint32_t currentTime =
|
||||||
view.parentCloseTime().time_since_epoch().count();
|
view.parentCloseTime().time_since_epoch().count();
|
||||||
bool fixCron = view.rules().enabled(fixCronStacking);
|
uint256 klStart = keylet::cron(0, AccountID(beast::zero)).key;
|
||||||
std::optional<AccountID> accountID = std::nullopt;
|
uint256 const klEnd =
|
||||||
if (!fixCron)
|
keylet::cron(currentTime + 1, AccountID(beast::zero)).key;
|
||||||
accountID = AccountID(beast::zero);
|
|
||||||
|
|
||||||
uint256 klStart = keylet::cron(0, accountID).key;
|
|
||||||
uint256 const klEnd = keylet::cron(currentTime + 1, accountID).key;
|
|
||||||
|
|
||||||
std::set<AccountID> cronAccs;
|
std::set<AccountID> cronAccs;
|
||||||
|
|
||||||
|
|||||||
@@ -93,16 +93,6 @@ Cron::doApply()
|
|||||||
auto& view = ctx_.view();
|
auto& view = ctx_.view();
|
||||||
auto const& tx = ctx_.tx;
|
auto const& tx = ctx_.tx;
|
||||||
|
|
||||||
if (view.rules().enabled(fixCronStacking))
|
|
||||||
{
|
|
||||||
if (auto const seq = tx.getFieldU32(sfLedgerSequence);
|
|
||||||
seq != view.info().seq)
|
|
||||||
{
|
|
||||||
JLOG(j_.warn()) << "Cron: wrong ledger seq=" << seq;
|
|
||||||
return tefFAILURE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
AccountID const& id = tx.getAccountID(sfOwner);
|
AccountID const& id = tx.getAccountID(sfOwner);
|
||||||
|
|
||||||
auto sle = view.peek(keylet::account(id));
|
auto sle = view.peek(keylet::account(id));
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ namespace detail {
|
|||||||
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
||||||
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
||||||
// the actual number of amendments. A LogicError on startup will verify this.
|
// the actual number of amendments. A LogicError on startup will verify this.
|
||||||
static constexpr std::size_t numFeatures = 89;
|
static constexpr std::size_t numFeatures = 88;
|
||||||
|
|
||||||
/** Amendments that this server supports and the default voting behavior.
|
/** Amendments that this server supports and the default voting behavior.
|
||||||
Whether they are enabled depends on the Rules defined in the validated
|
Whether they are enabled depends on the Rules defined in the validated
|
||||||
@@ -376,7 +376,6 @@ extern uint256 const featureIOUIssuerWeakTSH;
|
|||||||
extern uint256 const featureCron;
|
extern uint256 const featureCron;
|
||||||
extern uint256 const fixInvalidTxFlags;
|
extern uint256 const fixInvalidTxFlags;
|
||||||
extern uint256 const featureExtendedHookState;
|
extern uint256 const featureExtendedHookState;
|
||||||
extern uint256 const fixCronStacking;
|
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
|
|
||||||
|
|||||||
@@ -298,7 +298,7 @@ Keylet
|
|||||||
uritoken(AccountID const& issuer, Blob const& uri);
|
uritoken(AccountID const& issuer, Blob const& uri);
|
||||||
|
|
||||||
Keylet
|
Keylet
|
||||||
cron(uint32_t timestamp, std::optional<AccountID> const& id = std::nullopt);
|
cron(uint32_t timestamp, AccountID const& id);
|
||||||
|
|
||||||
} // namespace keylet
|
} // namespace keylet
|
||||||
|
|
||||||
|
|||||||
@@ -482,7 +482,6 @@ REGISTER_FEATURE(IOUIssuerWeakTSH, Supported::yes, VoteBehavior::De
|
|||||||
REGISTER_FEATURE(Cron, Supported::yes, VoteBehavior::DefaultNo);
|
REGISTER_FEATURE(Cron, Supported::yes, VoteBehavior::DefaultNo);
|
||||||
REGISTER_FIX (fixInvalidTxFlags, Supported::yes, VoteBehavior::DefaultYes);
|
REGISTER_FIX (fixInvalidTxFlags, Supported::yes, VoteBehavior::DefaultYes);
|
||||||
REGISTER_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);
|
REGISTER_FEATURE(ExtendedHookState, Supported::yes, VoteBehavior::DefaultNo);
|
||||||
REGISTER_FIX (fixCronStacking, Supported::yes, VoteBehavior::DefaultYes);
|
|
||||||
|
|
||||||
// The following amendments are obsolete, but must remain supported
|
// The following amendments are obsolete, but must remain supported
|
||||||
// because they could potentially get enabled.
|
// because they could potentially get enabled.
|
||||||
|
|||||||
@@ -466,7 +466,7 @@ uritoken(AccountID const& issuer, Blob const& uri)
|
|||||||
// Examples: 100M → ~5.4e-12, 1B → ~5.4e-11, 10B → ~5.4e-10, 100B → ~5.4e-9
|
// Examples: 100M → ~5.4e-12, 1B → ~5.4e-11, 10B → ~5.4e-10, 100B → ~5.4e-9
|
||||||
// (negligible).
|
// (negligible).
|
||||||
Keylet
|
Keylet
|
||||||
cron(uint32_t timestamp, std::optional<AccountID> const& id)
|
cron(uint32_t timestamp, AccountID const& id)
|
||||||
{
|
{
|
||||||
static const uint256 ns = indexHash(LedgerNameSpace::CRON);
|
static const uint256 ns = indexHash(LedgerNameSpace::CRON);
|
||||||
|
|
||||||
@@ -481,14 +481,7 @@ cron(uint32_t timestamp, std::optional<AccountID> const& id)
|
|||||||
h[10] = static_cast<uint8_t>((timestamp >> 8) & 0xFFU);
|
h[10] = static_cast<uint8_t>((timestamp >> 8) & 0xFFU);
|
||||||
h[11] = static_cast<uint8_t>((timestamp >> 0) & 0xFFU);
|
h[11] = static_cast<uint8_t>((timestamp >> 0) & 0xFFU);
|
||||||
|
|
||||||
if (!id.has_value())
|
const uint256 accHash = indexHash(LedgerNameSpace::CRON, timestamp, id);
|
||||||
{
|
|
||||||
// final 20 bytes are zero
|
|
||||||
std::memset(h + 12, 0, 20);
|
|
||||||
return {ltCRON, uint256::fromVoid(h)};
|
|
||||||
}
|
|
||||||
|
|
||||||
const uint256 accHash = indexHash(LedgerNameSpace::CRON, timestamp, *id);
|
|
||||||
|
|
||||||
// final 20 bytes are account ID
|
// final 20 bytes are account ID
|
||||||
std::memcpy(h + 12, accHash.cdata(), 20);
|
std::memcpy(h + 12, accHash.cdata(), 20);
|
||||||
|
|||||||
@@ -1106,32 +1106,30 @@ chooseLedgerEntryType(Json::Value const& params)
|
|||||||
std::pair<RPC::Status, LedgerEntryType> result{RPC::Status::OK, ltANY};
|
std::pair<RPC::Status, LedgerEntryType> result{RPC::Status::OK, ltANY};
|
||||||
if (params.isMember(jss::type))
|
if (params.isMember(jss::type))
|
||||||
{
|
{
|
||||||
static constexpr std::array<std::pair<char const*, LedgerEntryType>, 23>
|
static constexpr std::array<std::pair<char const*, LedgerEntryType>, 22>
|
||||||
types{{
|
types{
|
||||||
{jss::account, ltACCOUNT_ROOT},
|
{{jss::account, ltACCOUNT_ROOT},
|
||||||
{jss::amendments, ltAMENDMENTS},
|
{jss::amendments, ltAMENDMENTS},
|
||||||
{jss::check, ltCHECK},
|
{jss::check, ltCHECK},
|
||||||
{jss::deposit_preauth, ltDEPOSIT_PREAUTH},
|
{jss::deposit_preauth, ltDEPOSIT_PREAUTH},
|
||||||
{jss::directory, ltDIR_NODE},
|
{jss::directory, ltDIR_NODE},
|
||||||
{jss::escrow, ltESCROW},
|
{jss::escrow, ltESCROW},
|
||||||
{jss::emitted_txn, ltEMITTED_TXN},
|
{jss::emitted_txn, ltEMITTED_TXN},
|
||||||
{jss::hook, ltHOOK},
|
{jss::hook, ltHOOK},
|
||||||
{jss::hook_definition, ltHOOK_DEFINITION},
|
{jss::hook_definition, ltHOOK_DEFINITION},
|
||||||
{jss::hook_state, ltHOOK_STATE},
|
{jss::hook_state, ltHOOK_STATE},
|
||||||
{jss::fee, ltFEE_SETTINGS},
|
{jss::fee, ltFEE_SETTINGS},
|
||||||
{jss::hashes, ltLEDGER_HASHES},
|
{jss::hashes, ltLEDGER_HASHES},
|
||||||
{jss::import_vlseq, ltIMPORT_VLSEQ},
|
{jss::import_vlseq, ltIMPORT_VLSEQ},
|
||||||
{jss::offer, ltOFFER},
|
{jss::offer, ltOFFER},
|
||||||
{jss::payment_channel, ltPAYCHAN},
|
{jss::payment_channel, ltPAYCHAN},
|
||||||
{jss::uri_token, ltURI_TOKEN},
|
{jss::uri_token, ltURI_TOKEN},
|
||||||
{jss::signer_list, ltSIGNER_LIST},
|
{jss::signer_list, ltSIGNER_LIST},
|
||||||
{jss::state, ltRIPPLE_STATE},
|
{jss::state, ltRIPPLE_STATE},
|
||||||
{jss::ticket, ltTICKET},
|
{jss::ticket, ltTICKET},
|
||||||
{jss::nft_offer, ltNFTOKEN_OFFER},
|
{jss::nft_offer, ltNFTOKEN_OFFER},
|
||||||
{jss::nft_page, ltNFTOKEN_PAGE},
|
{jss::nft_page, ltNFTOKEN_PAGE},
|
||||||
{jss::unl_report, ltUNL_REPORT},
|
{jss::unl_report, ltUNL_REPORT}}};
|
||||||
{jss::cron, ltCRON},
|
|
||||||
}};
|
|
||||||
|
|
||||||
auto const& p = params[jss::type];
|
auto const& p = params[jss::type];
|
||||||
if (!p.isString())
|
if (!p.isString())
|
||||||
|
|||||||
@@ -781,22 +781,6 @@ public:
|
|||||||
auto const& hook = resp[jss::result][jss::account_objects][0u];
|
auto const& hook = resp[jss::result][jss::account_objects][0u];
|
||||||
BEAST_EXPECT(hook[sfAccount.jsonName] == gw.human());
|
BEAST_EXPECT(hook[sfAccount.jsonName] == gw.human());
|
||||||
}
|
}
|
||||||
{
|
|
||||||
// Create a Cron
|
|
||||||
env(cron::set(gw),
|
|
||||||
cron::startTime(env.now().time_since_epoch().count() + 100),
|
|
||||||
cron::delay(100),
|
|
||||||
cron::repeat(200),
|
|
||||||
fee(XRP(1)));
|
|
||||||
env.close();
|
|
||||||
}
|
|
||||||
{
|
|
||||||
// Find the cron.
|
|
||||||
Json::Value const resp = acct_objs(gw, jss::cron);
|
|
||||||
BEAST_EXPECT(acct_objs_is_size(resp, 1));
|
|
||||||
auto const& cron = resp[jss::result][jss::account_objects][0u];
|
|
||||||
BEAST_EXPECT(cron[sfOwner.jsonName] == gw.human());
|
|
||||||
}
|
|
||||||
{
|
{
|
||||||
// See how "deletion_blockers_only" handles gw's directory.
|
// See how "deletion_blockers_only" handles gw's directory.
|
||||||
Json::Value params;
|
Json::Value params;
|
||||||
|
|||||||
Reference in New Issue
Block a user