mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
Reverts the unnecessary mktemp change from638cb0afethat broke cache saving. What happened: - Original delta code used $$ (PID) for temp files: DELTA_TARBALL="/tmp/...-$$.tar.zst" - This creates a STRING, not a file - zstd creates the file when writing - When removing deltas (638cb0afe), I unnecessarily changed to mktemp for "better practice" - mktemp CREATES an empty file - zstd refuses to overwrite it - Result: "already exists; not overwritten" error Why it seemed to work: - Immutability check skipped save for existing caches - Upload code path never executed during testing - Bug only appeared when actually trying to create new cache The fix: - Revert to PID-based naming ($$) that was working - Don't fix what isn't broken Applies to both save and restore actions for consistency.
212 lines
7.2 KiB
YAML
212 lines
7.2 KiB
YAML
name: 'Xahau Cache Restore (S3)'
|
|
description: 'Drop-in replacement for actions/cache/restore using S3 storage'
|
|
|
|
inputs:
|
|
path:
|
|
description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)'
|
|
required: true
|
|
key:
|
|
description: 'An explicit key for restoring the cache'
|
|
required: true
|
|
restore-keys:
|
|
description: 'An ordered list of prefix-matched keys to use for restoring stale cache if no cache hit occurred for key'
|
|
required: false
|
|
default: ''
|
|
s3-bucket:
|
|
description: 'S3 bucket name for cache storage'
|
|
required: false
|
|
default: 'xahaud-github-actions-cache-niq'
|
|
s3-region:
|
|
description: 'S3 region'
|
|
required: false
|
|
default: 'us-east-1'
|
|
fail-on-cache-miss:
|
|
description: 'Fail the workflow if cache entry is not found'
|
|
required: false
|
|
default: 'false'
|
|
lookup-only:
|
|
description: 'Check if a cache entry exists for the given input(s) without downloading it'
|
|
required: false
|
|
default: 'false'
|
|
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
|
aws-access-key-id:
|
|
description: 'AWS Access Key ID for S3 access'
|
|
required: true
|
|
aws-secret-access-key:
|
|
description: 'AWS Secret Access Key for S3 access'
|
|
required: true
|
|
|
|
outputs:
|
|
cache-hit:
|
|
description: 'A boolean value to indicate an exact match was found for the primary key'
|
|
value: ${{ steps.restore-cache.outputs.cache-hit }}
|
|
cache-primary-key:
|
|
description: 'The key that was used to restore the cache (may be from restore-keys)'
|
|
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
|
|
cache-matched-key:
|
|
description: 'The key that was used to restore the cache (exact or prefix match)'
|
|
value: ${{ steps.restore-cache.outputs.cache-matched-key }}
|
|
|
|
runs:
|
|
using: 'composite'
|
|
steps:
|
|
- name: Restore cache from S3
|
|
id: restore-cache
|
|
shell: bash
|
|
env:
|
|
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
|
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
|
|
S3_BUCKET: ${{ inputs.s3-bucket }}
|
|
S3_REGION: ${{ inputs.s3-region }}
|
|
CACHE_KEY: ${{ inputs.key }}
|
|
RESTORE_KEYS: ${{ inputs.restore-keys }}
|
|
TARGET_PATH: ${{ inputs.path }}
|
|
FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }}
|
|
LOOKUP_ONLY: ${{ inputs.lookup-only }}
|
|
run: |
|
|
set -euo pipefail
|
|
|
|
echo "=========================================="
|
|
echo "Xahau Cache Restore (S3)"
|
|
echo "=========================================="
|
|
echo "Target path: ${TARGET_PATH}"
|
|
echo "Cache key: ${CACHE_KEY}"
|
|
echo "S3 bucket: s3://${S3_BUCKET}"
|
|
echo ""
|
|
|
|
# Normalize target path (expand tilde and resolve to absolute path)
|
|
if [[ "${TARGET_PATH}" == ~* ]]; then
|
|
TARGET_PATH="${HOME}${TARGET_PATH:1}"
|
|
fi
|
|
TARGET_PATH=$(realpath -m "${TARGET_PATH}")
|
|
echo "Normalized target path: ${TARGET_PATH}"
|
|
echo ""
|
|
|
|
# Function to try restoring a cache key
|
|
try_restore_key() {
|
|
local key=$1
|
|
local s3_key="s3://${S3_BUCKET}/${key}-base.tar.zst"
|
|
|
|
echo "Checking for key: ${key}"
|
|
|
|
if aws s3 ls "${s3_key}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
|
echo "✓ Found cache: ${s3_key}"
|
|
return 0
|
|
else
|
|
echo "✗ Not found: ${key}"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Try exact match first
|
|
MATCHED_KEY=""
|
|
EXACT_MATCH="false"
|
|
|
|
if try_restore_key "${CACHE_KEY}"; then
|
|
MATCHED_KEY="${CACHE_KEY}"
|
|
EXACT_MATCH="true"
|
|
echo ""
|
|
echo "🎯 Exact cache hit for key: ${CACHE_KEY}"
|
|
else
|
|
# Try restore-keys (prefix matching)
|
|
if [ -n "${RESTORE_KEYS}" ]; then
|
|
echo ""
|
|
echo "Primary key not found, trying restore-keys..."
|
|
|
|
while IFS= read -r restore_key; do
|
|
[ -z "${restore_key}" ] && continue
|
|
restore_key=$(echo "${restore_key}" | xargs)
|
|
|
|
if try_restore_key "${restore_key}"; then
|
|
MATCHED_KEY="${restore_key}"
|
|
EXACT_MATCH="false"
|
|
echo ""
|
|
echo "✓ Cache restored from fallback key: ${restore_key}"
|
|
break
|
|
fi
|
|
done <<< "${RESTORE_KEYS}"
|
|
fi
|
|
fi
|
|
|
|
# Check if we found anything
|
|
if [ -z "${MATCHED_KEY}" ]; then
|
|
echo ""
|
|
echo "❌ No cache found for key: ${CACHE_KEY}"
|
|
|
|
if [ "${FAIL_ON_MISS}" = "true" ]; then
|
|
echo "fail-on-cache-miss is enabled, failing workflow"
|
|
exit 1
|
|
fi
|
|
|
|
# Set outputs for cache miss
|
|
echo "cache-hit=false" >> $GITHUB_OUTPUT
|
|
echo "cache-primary-key=" >> $GITHUB_OUTPUT
|
|
echo "cache-matched-key=" >> $GITHUB_OUTPUT
|
|
|
|
# Create empty cache directory
|
|
mkdir -p "${TARGET_PATH}"
|
|
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Cache restore completed (bootstrap mode)"
|
|
echo "Created empty cache directory: ${TARGET_PATH}"
|
|
echo "=========================================="
|
|
exit 0
|
|
fi
|
|
|
|
# If lookup-only, we're done
|
|
if [ "${LOOKUP_ONLY}" = "true" ]; then
|
|
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
|
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
|
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
|
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Cache lookup completed (lookup-only mode)"
|
|
echo "Cache exists: ${MATCHED_KEY}"
|
|
echo "=========================================="
|
|
exit 0
|
|
fi
|
|
|
|
# Download and extract cache
|
|
S3_KEY="s3://${S3_BUCKET}/${MATCHED_KEY}-base.tar.zst"
|
|
TEMP_TARBALL="/tmp/xahau-cache-restore-$$.tar.zst"
|
|
|
|
echo ""
|
|
echo "Downloading cache..."
|
|
aws s3 cp "${S3_KEY}" "${TEMP_TARBALL}" --region "${S3_REGION}"
|
|
|
|
TARBALL_SIZE=$(du -h "${TEMP_TARBALL}" | cut -f1)
|
|
echo "✓ Downloaded: ${TARBALL_SIZE}"
|
|
|
|
# Create parent directory if needed
|
|
mkdir -p "$(dirname "${TARGET_PATH}")"
|
|
|
|
# Remove existing target if it exists
|
|
if [ -e "${TARGET_PATH}" ]; then
|
|
echo "Removing existing target: ${TARGET_PATH}"
|
|
rm -rf "${TARGET_PATH}"
|
|
fi
|
|
|
|
# Create target directory and extract
|
|
mkdir -p "${TARGET_PATH}"
|
|
echo ""
|
|
echo "Extracting cache..."
|
|
zstd -d -c "${TEMP_TARBALL}" | tar -xf - -C "${TARGET_PATH}"
|
|
echo "✓ Cache extracted to: ${TARGET_PATH}"
|
|
|
|
# Cleanup
|
|
rm -f "${TEMP_TARBALL}"
|
|
|
|
# Set outputs
|
|
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
|
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
|
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
|
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Cache restore completed successfully"
|
|
echo "Cache hit: ${EXACT_MATCH}"
|
|
echo "Matched key: ${MATCHED_KEY}"
|
|
echo "=========================================="
|