mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-04 10:45:50 +00:00
Compare commits
13 Commits
f8d1a6f2b4
...
nd-experim
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
83f6bc64e1 | ||
|
|
be6fad9692 | ||
|
|
b24e4647ba | ||
|
|
638cb0afe5 | ||
|
|
bd384e6bc1 | ||
|
|
4c546e5d91 | ||
|
|
28727b3f86 | ||
|
|
a4f96a435a | ||
|
|
d0f63cc2d1 | ||
|
|
2433bfe277 | ||
|
|
ef40a7f351 | ||
|
|
a4a4126bdc | ||
|
|
0559b6c418 |
@@ -1,5 +1,5 @@
|
||||
name: 'Xahau Cache Restore (S3 + OverlayFS)'
|
||||
description: 'Drop-in replacement for actions/cache/restore using S3 and OverlayFS for delta caching'
|
||||
name: 'Xahau Cache Restore (S3)'
|
||||
description: 'Drop-in replacement for actions/cache/restore using S3 storage'
|
||||
|
||||
inputs:
|
||||
path:
|
||||
@@ -28,10 +28,6 @@ inputs:
|
||||
description: 'Check if a cache entry exists for the given input(s) without downloading it'
|
||||
required: false
|
||||
default: 'false'
|
||||
use-deltas:
|
||||
description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
|
||||
required: false
|
||||
default: 'true'
|
||||
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 access'
|
||||
@@ -48,13 +44,13 @@ outputs:
|
||||
description: 'The key that was used to restore the cache (may be from restore-keys)'
|
||||
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
|
||||
cache-matched-key:
|
||||
description: 'The key that matched (same as cache-primary-key for compatibility)'
|
||||
value: ${{ steps.restore-cache.outputs.cache-primary-key }}
|
||||
description: 'The key that was used to restore the cache (exact or prefix match)'
|
||||
value: ${{ steps.restore-cache.outputs.cache-matched-key }}
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Restore cache from S3 with OverlayFS
|
||||
- name: Restore cache from S3
|
||||
id: restore-cache
|
||||
shell: bash
|
||||
env:
|
||||
@@ -67,133 +63,42 @@ runs:
|
||||
TARGET_PATH: ${{ inputs.path }}
|
||||
FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }}
|
||||
LOOKUP_ONLY: ${{ inputs.lookup-only }}
|
||||
USE_DELTAS: ${{ inputs.use-deltas }}
|
||||
COMMIT_MSG: ${{ github.event.head_commit.message }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "=========================================="
|
||||
echo "Xahau Cache Restore (S3 + OverlayFS)"
|
||||
echo "Xahau Cache Restore (S3)"
|
||||
echo "=========================================="
|
||||
echo "Target path: ${TARGET_PATH}"
|
||||
echo "Primary key: ${CACHE_KEY}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "S3 bucket: s3://${S3_BUCKET}"
|
||||
echo "Use deltas: ${USE_DELTAS}"
|
||||
echo ""
|
||||
|
||||
# Normalize target path (expand tilde and resolve to absolute path)
|
||||
# This ensures consistent path comparison in the mount registry
|
||||
if [[ "${TARGET_PATH}" == ~* ]]; then
|
||||
# Expand tilde manually (works even if directory doesn't exist yet)
|
||||
TARGET_PATH="${HOME}${TARGET_PATH:1}"
|
||||
fi
|
||||
TARGET_PATH=$(realpath -m "${TARGET_PATH}")
|
||||
echo "Normalized target path: ${TARGET_PATH}"
|
||||
echo ""
|
||||
|
||||
# Generate unique cache workspace
|
||||
CACHE_HASH=$(echo "${CACHE_KEY}" | md5sum | cut -d' ' -f1)
|
||||
CACHE_WORKSPACE="/tmp/xahau-cache-${CACHE_HASH}"
|
||||
|
||||
echo "Cache workspace: ${CACHE_WORKSPACE}"
|
||||
|
||||
# Check for [ci-clear-cache] tag in commit message
|
||||
if echo "${COMMIT_MSG}" | grep -q '\[ci-clear-cache\]'; then
|
||||
echo ""
|
||||
echo "🗑️ [ci-clear-cache] detected in commit message"
|
||||
echo "Clearing cache for key: ${CACHE_KEY}"
|
||||
echo ""
|
||||
|
||||
# Delete base layer
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "Deleting base layer: ${S3_BASE_KEY}"
|
||||
aws s3 rm "${S3_BASE_KEY}" --region "${S3_REGION}" 2>/dev/null || true
|
||||
echo "✓ Base layer deleted"
|
||||
else
|
||||
echo "ℹ️ No base layer found to delete"
|
||||
fi
|
||||
|
||||
# Delete all delta layers for this key
|
||||
echo "Deleting all delta layers matching: ${CACHE_KEY}-delta-*"
|
||||
DELTA_COUNT=$(aws s3 ls "s3://${S3_BUCKET}/" --region "${S3_REGION}" | grep "${CACHE_KEY}-delta-" | wc -l || echo "0")
|
||||
DELTA_COUNT=$(echo "${DELTA_COUNT}" | tr -d ' \n') # Trim whitespace
|
||||
if [ "${DELTA_COUNT}" -gt 0 ]; then
|
||||
aws s3 rm "s3://${S3_BUCKET}/" --recursive \
|
||||
--exclude "*" \
|
||||
--include "${CACHE_KEY}-delta-*" \
|
||||
--region "${S3_REGION}" 2>/dev/null || true
|
||||
echo "✓ Deleted ${DELTA_COUNT} delta layer(s)"
|
||||
else
|
||||
echo "ℹ️ No delta layers found to delete"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Cache cleared successfully"
|
||||
echo "Build will proceed from scratch (bootstrap mode)"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Create OverlayFS directory structure
|
||||
mkdir -p "${CACHE_WORKSPACE}"/{base,upper,work,merged}
|
||||
|
||||
# Function to try downloading from S3
|
||||
# Function to try restoring a cache key
|
||||
try_restore_key() {
|
||||
local try_key="$1"
|
||||
local s3_base="s3://${S3_BUCKET}/${try_key}-base.tar.zst"
|
||||
local key=$1
|
||||
local s3_key="s3://${S3_BUCKET}/${key}-base.tar.zst"
|
||||
|
||||
echo "Trying cache key: ${try_key}"
|
||||
|
||||
# Check if base exists (one base per key, immutable)
|
||||
echo "Checking for base layer..."
|
||||
if aws s3 ls "${s3_base}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "✓ Found base layer: ${s3_base}"
|
||||
|
||||
if [ "${LOOKUP_ONLY}" = "true" ]; then
|
||||
echo "Lookup-only mode: cache exists, skipping download"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Download base layer
|
||||
echo "Downloading base layer..."
|
||||
aws s3 cp "${s3_base}" /tmp/cache-base.tar.zst --region "${S3_REGION}" --quiet
|
||||
|
||||
# Extract base layer
|
||||
echo "Extracting base layer..."
|
||||
tar -xf /tmp/cache-base.tar.zst -C "${CACHE_WORKSPACE}/base"
|
||||
rm /tmp/cache-base.tar.zst
|
||||
|
||||
# Query for latest timestamped delta (only if use-deltas enabled)
|
||||
if [ "${USE_DELTAS}" = "true" ]; then
|
||||
echo "Querying for latest delta..."
|
||||
LATEST_DELTA=$(aws s3api list-objects-v2 \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--prefix "${try_key}-delta-" \
|
||||
--region "${S3_REGION}" \
|
||||
--query 'sort_by(Contents, &LastModified)[-1].Key' \
|
||||
--output text 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "${LATEST_DELTA}" ] && [ "${LATEST_DELTA}" != "None" ]; then
|
||||
echo "✓ Found latest delta: ${LATEST_DELTA}"
|
||||
echo "Downloading delta layer..."
|
||||
aws s3 cp "s3://${S3_BUCKET}/${LATEST_DELTA}" /tmp/cache-delta.tar.zst --region "${S3_REGION}" --quiet
|
||||
|
||||
echo "Extracting delta layer..."
|
||||
tar -xf /tmp/cache-delta.tar.zst -C "${CACHE_WORKSPACE}/upper" 2>/dev/null || true
|
||||
rm /tmp/cache-delta.tar.zst
|
||||
else
|
||||
echo "ℹ No delta layer found (this is fine for first build)"
|
||||
fi
|
||||
else
|
||||
echo "ℹ Delta caching disabled (use-deltas: false)"
|
||||
fi
|
||||
echo "Checking for key: ${key}"
|
||||
|
||||
if aws s3 ls "${s3_key}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "✓ Found cache: ${s3_key}"
|
||||
return 0
|
||||
else
|
||||
echo "✗ No base layer found for key: ${try_key}"
|
||||
echo "✗ Not found: ${key}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Try primary key first
|
||||
# Try exact match first
|
||||
MATCHED_KEY=""
|
||||
EXACT_MATCH="false"
|
||||
|
||||
@@ -208,12 +113,8 @@ runs:
|
||||
echo ""
|
||||
echo "Primary key not found, trying restore-keys..."
|
||||
|
||||
# Split restore-keys by newline
|
||||
while IFS= read -r restore_key; do
|
||||
# Skip empty lines
|
||||
[ -z "${restore_key}" ] && continue
|
||||
|
||||
# Trim whitespace
|
||||
restore_key=$(echo "${restore_key}" | xargs)
|
||||
|
||||
if try_restore_key "${restore_key}"; then
|
||||
@@ -231,7 +132,6 @@ runs:
|
||||
if [ -z "${MATCHED_KEY}" ]; then
|
||||
echo ""
|
||||
echo "❌ No cache found for key: ${CACHE_KEY}"
|
||||
echo "This is BOOTSTRAP mode - first build for this cache key"
|
||||
|
||||
if [ "${FAIL_ON_MISS}" = "true" ]; then
|
||||
echo "fail-on-cache-miss is enabled, failing workflow"
|
||||
@@ -241,16 +141,11 @@ runs:
|
||||
# Set outputs for cache miss
|
||||
echo "cache-hit=false" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=" >> $GITHUB_OUTPUT
|
||||
|
||||
# Create empty cache directory for bootstrap
|
||||
# Create empty cache directory
|
||||
mkdir -p "${TARGET_PATH}"
|
||||
|
||||
# Record bootstrap mode for save action
|
||||
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
|
||||
# For bootstrap: workspace="bootstrap", matched_key=primary_key, exact_match=false
|
||||
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
|
||||
echo "${TARGET_PATH}:bootstrap:${CACHE_KEY}:${CACHE_KEY}:false:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache restore completed (bootstrap mode)"
|
||||
@@ -262,36 +157,30 @@ runs:
|
||||
# If lookup-only, we're done
|
||||
if [ "${LOOKUP_ONLY}" = "true" ]; then
|
||||
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Clean up workspace
|
||||
rm -rf "${CACHE_WORKSPACE}"
|
||||
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache lookup completed (lookup-only mode)"
|
||||
echo "Cache exists: ${MATCHED_KEY}"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Mount OverlayFS
|
||||
# Download and extract cache
|
||||
S3_KEY="s3://${S3_BUCKET}/${MATCHED_KEY}-base.tar.zst"
|
||||
TEMP_TARBALL="/tmp/xahau-cache-restore-$$.tar.zst"
|
||||
|
||||
echo ""
|
||||
echo "Mounting OverlayFS..."
|
||||
sudo mount -t overlay overlay \
|
||||
-o lowerdir="${CACHE_WORKSPACE}/base",upperdir="${CACHE_WORKSPACE}/upper",workdir="${CACHE_WORKSPACE}/work" \
|
||||
"${CACHE_WORKSPACE}/merged"
|
||||
echo "Downloading cache..."
|
||||
aws s3 cp "${S3_KEY}" "${TEMP_TARBALL}" --region "${S3_REGION}"
|
||||
|
||||
# Verify mount
|
||||
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||
echo "✓ OverlayFS mounted successfully"
|
||||
else
|
||||
echo "❌ Failed to mount OverlayFS"
|
||||
exit 1
|
||||
fi
|
||||
TARBALL_SIZE=$(du -h "${TEMP_TARBALL}" | cut -f1)
|
||||
echo "✓ Downloaded: ${TARBALL_SIZE}"
|
||||
|
||||
# Create target directory parent if needed
|
||||
TARGET_PARENT=$(dirname "${TARGET_PATH}")
|
||||
mkdir -p "${TARGET_PARENT}"
|
||||
# Create parent directory if needed
|
||||
mkdir -p "$(dirname "${TARGET_PATH}")"
|
||||
|
||||
# Remove existing target if it exists
|
||||
if [ -e "${TARGET_PATH}" ]; then
|
||||
@@ -299,30 +188,24 @@ runs:
|
||||
rm -rf "${TARGET_PATH}"
|
||||
fi
|
||||
|
||||
# Symlink target path to merged view
|
||||
echo "Creating symlink: ${TARGET_PATH} -> ${CACHE_WORKSPACE}/merged"
|
||||
ln -s "${CACHE_WORKSPACE}/merged" "${TARGET_PATH}"
|
||||
# Create target directory and extract
|
||||
mkdir -p "${TARGET_PATH}"
|
||||
echo ""
|
||||
echo "Extracting cache..."
|
||||
zstd -d -c "${TEMP_TARBALL}" | tar -xf - -C "${TARGET_PATH}"
|
||||
echo "✓ Cache extracted to: ${TARGET_PATH}"
|
||||
|
||||
# Save mount info for cleanup/save later
|
||||
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
|
||||
# This tells save action whether to create new base (partial match) or just delta (exact match)
|
||||
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
|
||||
echo "${TARGET_PATH}:${CACHE_WORKSPACE}:${MATCHED_KEY}:${CACHE_KEY}:${EXACT_MATCH}:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
|
||||
# Cleanup
|
||||
rm -f "${TEMP_TARBALL}"
|
||||
|
||||
# Set outputs
|
||||
echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
|
||||
echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Show statistics
|
||||
echo ""
|
||||
echo "Cache statistics:"
|
||||
echo " Base layer size: $(du -sh ${CACHE_WORKSPACE}/base 2>/dev/null | cut -f1 || echo '0')"
|
||||
echo " Delta layer size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1 || echo '0')"
|
||||
echo " Merged view size: $(du -sh ${CACHE_WORKSPACE}/merged 2>/dev/null | cut -f1 || echo '0')"
|
||||
echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
|
||||
echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache restore completed successfully"
|
||||
echo "Exact match: ${EXACT_MATCH}"
|
||||
echo "Cache hit: ${EXACT_MATCH}"
|
||||
echo "Matched key: ${MATCHED_KEY}"
|
||||
echo "=========================================="
|
||||
|
||||
368
.github/actions/xahau-actions-cache-save/action.yml
vendored
368
.github/actions/xahau-actions-cache-save/action.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: 'Xahau Cache Save (S3 + OverlayFS)'
|
||||
description: 'Drop-in replacement for actions/cache/save using S3 and OverlayFS for delta caching'
|
||||
name: 'Xahau Cache Save (S3)'
|
||||
description: 'Drop-in replacement for actions/cache/save using S3 storage'
|
||||
|
||||
inputs:
|
||||
path:
|
||||
@@ -16,10 +16,6 @@ inputs:
|
||||
description: 'S3 region'
|
||||
required: false
|
||||
default: 'us-east-1'
|
||||
use-deltas:
|
||||
description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
|
||||
required: false
|
||||
default: 'true'
|
||||
# Note: Composite actions can't access secrets.* directly - must be passed from workflow
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 access'
|
||||
@@ -31,7 +27,7 @@ inputs:
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Save cache to S3 with OverlayFS delta
|
||||
- name: Save cache to S3
|
||||
shell: bash
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
|
||||
@@ -40,12 +36,11 @@ runs:
|
||||
S3_REGION: ${{ inputs.s3-region }}
|
||||
CACHE_KEY: ${{ inputs.key }}
|
||||
TARGET_PATH: ${{ inputs.path }}
|
||||
USE_DELTAS: ${{ inputs.use-deltas }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "=========================================="
|
||||
echo "Xahau Cache Save (S3 + OverlayFS)"
|
||||
echo "Xahau Cache Save (S3)"
|
||||
echo "=========================================="
|
||||
echo "Target path: ${TARGET_PATH}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
@@ -53,346 +48,63 @@ runs:
|
||||
echo ""
|
||||
|
||||
# Normalize target path (expand tilde and resolve to absolute path)
|
||||
# This ensures consistent path comparison with the mount registry
|
||||
if [[ "${TARGET_PATH}" == ~* ]]; then
|
||||
# Expand tilde manually (works even if directory doesn't exist yet)
|
||||
TARGET_PATH="${HOME}${TARGET_PATH:1}"
|
||||
fi
|
||||
echo "Normalized target path: ${TARGET_PATH}"
|
||||
echo ""
|
||||
|
||||
# Find the cache workspace from mount registry
|
||||
MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
|
||||
|
||||
if [ ! -f "${MOUNT_REGISTRY}" ]; then
|
||||
echo "⚠️ No cache mounts found (mount registry doesn't exist)"
|
||||
echo "This usually means cache restore was not called, or there was no cache to restore."
|
||||
# Check if target directory exists
|
||||
if [ ! -d "${TARGET_PATH}" ]; then
|
||||
echo "⚠️ Target directory does not exist: ${TARGET_PATH}"
|
||||
echo "Skipping cache save."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Find entry for this path
|
||||
# Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
|
||||
# Bootstrap mode: path:bootstrap:key:key:false:true/false (workspace="bootstrap")
|
||||
CACHE_WORKSPACE=""
|
||||
MATCHED_KEY=""
|
||||
PRIMARY_KEY=""
|
||||
EXACT_MATCH=""
|
||||
REGISTRY_USE_DELTAS=""
|
||||
# Use static base name (one base per key, immutable)
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
|
||||
|
||||
while IFS=: read -r mount_path mount_workspace mount_matched_key mount_primary_key mount_exact_match mount_use_deltas; do
|
||||
if [ "${mount_path}" = "${TARGET_PATH}" ]; then
|
||||
CACHE_WORKSPACE="${mount_workspace}"
|
||||
MATCHED_KEY="${mount_matched_key}"
|
||||
PRIMARY_KEY="${mount_primary_key}"
|
||||
EXACT_MATCH="${mount_exact_match}"
|
||||
REGISTRY_USE_DELTAS="${mount_use_deltas}"
|
||||
break
|
||||
fi
|
||||
done < "${MOUNT_REGISTRY}"
|
||||
|
||||
if [ -z "${CACHE_WORKSPACE}" ] && [ -z "${MATCHED_KEY}" ]; then
|
||||
echo "⚠️ No cache entry found for path: ${TARGET_PATH}"
|
||||
echo "This usually means cache restore was not called for this path."
|
||||
echo "Skipping cache save."
|
||||
# Check if base already exists (immutability - first write wins)
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "⚠️ Cache already exists: ${S3_BASE_KEY}"
|
||||
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed (already exists)"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Determine cache mode
|
||||
if [ "${CACHE_WORKSPACE}" = "bootstrap" ]; then
|
||||
CACHE_MODE="bootstrap"
|
||||
PRIMARY_KEY="${MATCHED_KEY}" # In bootstrap, matched_key field contains primary key
|
||||
echo "Cache mode: BOOTSTRAP (first build for this key)"
|
||||
echo "Primary key: ${PRIMARY_KEY}"
|
||||
elif [ "${EXACT_MATCH}" = "false" ]; then
|
||||
CACHE_MODE="partial-match"
|
||||
echo "Cache mode: PARTIAL MATCH (restore-key used)"
|
||||
echo "Cache workspace: ${CACHE_WORKSPACE}"
|
||||
echo "Matched key from restore: ${MATCHED_KEY}"
|
||||
echo "Primary key (will save new base): ${PRIMARY_KEY}"
|
||||
else
|
||||
CACHE_MODE="exact-match"
|
||||
echo "Cache mode: EXACT MATCH (cache hit)"
|
||||
echo "Cache workspace: ${CACHE_WORKSPACE}"
|
||||
echo "Matched key: ${MATCHED_KEY}"
|
||||
fi
|
||||
echo "Use deltas: ${REGISTRY_USE_DELTAS}"
|
||||
# Create tarball
|
||||
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
|
||||
|
||||
echo "Creating cache tarball..."
|
||||
tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
|
||||
|
||||
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
|
||||
echo "✓ Cache tarball created: ${BASE_SIZE}"
|
||||
echo ""
|
||||
|
||||
# Handle different cache modes
|
||||
if [ "${CACHE_MODE}" = "bootstrap" ]; then
|
||||
# Bootstrap: Save entire cache as base layer (no OverlayFS was used)
|
||||
echo "Bootstrap mode: Creating initial base layer from ${TARGET_PATH}"
|
||||
# Upload to S3
|
||||
echo "Uploading cache to S3..."
|
||||
echo " Key: ${CACHE_KEY}-base.tar.zst"
|
||||
|
||||
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
|
||||
echo "Creating base tarball..."
|
||||
tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
|
||||
aws s3api put-object \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--key "${CACHE_KEY}-base.tar.zst" \
|
||||
--body "${BASE_TARBALL}" \
|
||||
--tagging 'type=base' \
|
||||
--region "${S3_REGION}" \
|
||||
>/dev/null
|
||||
|
||||
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
|
||||
echo "✓ Base tarball created: ${BASE_SIZE}"
|
||||
echo ""
|
||||
echo "✓ Uploaded: ${S3_BASE_KEY}"
|
||||
|
||||
# Use static base name (one base per key, immutable)
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
|
||||
|
||||
# Check if base already exists (immutability - first write wins)
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "⚠️ Base layer already exists: ${S3_BASE_KEY}"
|
||||
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
|
||||
else
|
||||
echo "Uploading base layer to S3..."
|
||||
echo " Key: ${PRIMARY_KEY}-base.tar.zst"
|
||||
|
||||
aws s3api put-object \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--key "${PRIMARY_KEY}-base.tar.zst" \
|
||||
--body "${BASE_TARBALL}" \
|
||||
--tagging 'type=base' \
|
||||
--region "${S3_REGION}" \
|
||||
>/dev/null
|
||||
|
||||
echo "✓ Uploaded: ${S3_BASE_KEY}"
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -f "${BASE_TARBALL}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Bootstrap cache save completed"
|
||||
echo "Base size: ${BASE_SIZE}"
|
||||
echo "Cache key: ${PRIMARY_KEY}"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
|
||||
elif [ "${CACHE_MODE}" = "partial-match" ]; then
|
||||
# Partial match: Save merged view as new base ONLY (no delta)
|
||||
# The delta is relative to the OLD base, not the NEW base we're creating
|
||||
echo "Partial match mode: Saving new base layer for primary key"
|
||||
echo "Note: Delta will NOT be saved (it's relative to old base)"
|
||||
|
||||
BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
|
||||
echo "Creating base tarball from merged view..."
|
||||
tar -cf - -C "${CACHE_WORKSPACE}/merged" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
|
||||
|
||||
BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
|
||||
echo "✓ Base tarball created: ${BASE_SIZE}"
|
||||
echo ""
|
||||
|
||||
# Use static base name (one base per key, immutable)
|
||||
S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
|
||||
|
||||
# Check if base already exists (immutability - first write wins)
|
||||
if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
|
||||
echo "⚠️ Base layer already exists: ${S3_BASE_KEY}"
|
||||
echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
|
||||
else
|
||||
echo "Uploading new base layer to S3..."
|
||||
echo " Key: ${PRIMARY_KEY}-base.tar.zst"
|
||||
|
||||
aws s3api put-object \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--key "${PRIMARY_KEY}-base.tar.zst" \
|
||||
--body "${BASE_TARBALL}" \
|
||||
--tagging 'type=base' \
|
||||
--region "${S3_REGION}" \
|
||||
>/dev/null
|
||||
|
||||
echo "✓ Uploaded: ${S3_BASE_KEY}"
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -f "${BASE_TARBALL}"
|
||||
|
||||
# Unmount and cleanup
|
||||
echo ""
|
||||
echo "Cleaning up..."
|
||||
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||
sudo umount "${CACHE_WORKSPACE}/merged" || {
|
||||
echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
|
||||
echo "Attempting lazy unmount..."
|
||||
sudo umount -l "${CACHE_WORKSPACE}/merged" || true
|
||||
}
|
||||
fi
|
||||
rm -rf "${CACHE_WORKSPACE}"
|
||||
|
||||
# Remove from registry
|
||||
if [ -f "${MOUNT_REGISTRY}" ]; then
|
||||
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
|
||||
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
echo "✓ Cleanup completed"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Partial match cache save completed"
|
||||
echo "New base created for: ${PRIMARY_KEY}"
|
||||
echo "Base size: ${BASE_SIZE}"
|
||||
if [ "${REGISTRY_USE_DELTAS}" = "true" ]; then
|
||||
echo "Next exact-match build will create deltas from this base"
|
||||
else
|
||||
echo "Next exact-match build will reuse this base (base-only mode)"
|
||||
fi
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# For exact-match ONLY: Save delta (if use-deltas enabled)
|
||||
if [ "${CACHE_MODE}" = "exact-match" ]; then
|
||||
# If deltas are disabled, just cleanup and exit
|
||||
if [ "${REGISTRY_USE_DELTAS}" != "true" ]; then
|
||||
echo "ℹ️ Delta caching disabled (use-deltas: false)"
|
||||
echo "Base already exists for this key, nothing to save."
|
||||
|
||||
# Unmount and cleanup
|
||||
echo ""
|
||||
echo "Cleaning up..."
|
||||
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||
sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
|
||||
fi
|
||||
rm -rf "${CACHE_WORKSPACE}"
|
||||
|
||||
# Remove from registry
|
||||
if [ -f "${MOUNT_REGISTRY}" ]; then
|
||||
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
|
||||
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed (base-only mode)"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if upper layer has any changes
|
||||
if [ -z "$(ls -A ${CACHE_WORKSPACE}/upper 2>/dev/null)" ]; then
|
||||
echo "ℹ️ No changes detected in upper layer (cache is unchanged)"
|
||||
echo "Skipping delta upload to save bandwidth."
|
||||
|
||||
# Still unmount and cleanup
|
||||
echo ""
|
||||
echo "Cleaning up..."
|
||||
sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
|
||||
rm -rf "${CACHE_WORKSPACE}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed (no changes)"
|
||||
echo "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Show delta statistics
|
||||
echo "Delta layer statistics:"
|
||||
echo " Files changed: $(find ${CACHE_WORKSPACE}/upper -type f 2>/dev/null | wc -l)"
|
||||
echo " Delta size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1)"
|
||||
echo ""
|
||||
|
||||
# Create delta tarball from upper layer
|
||||
echo "Creating delta tarball..."
|
||||
DELTA_TARBALL="/tmp/xahau-cache-delta-$$.tar.zst"
|
||||
|
||||
tar -cf - -C "${CACHE_WORKSPACE}/upper" . | zstd -3 -T0 -q -o "${DELTA_TARBALL}"
|
||||
|
||||
DELTA_SIZE=$(du -h "${DELTA_TARBALL}" | cut -f1)
|
||||
echo "✓ Delta tarball created: ${DELTA_SIZE}"
|
||||
echo ""
|
||||
|
||||
# Upload timestamped delta (no overwrites = zero concurrency issues)
|
||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||
COMMIT_SHA=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
||||
|
||||
# Use PRIMARY_KEY for delta (ensures deltas match their base)
|
||||
S3_DELTA_TIMESTAMPED="s3://${S3_BUCKET}/${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
|
||||
|
||||
echo "Uploading timestamped delta to S3..."
|
||||
echo " Key: ${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
|
||||
|
||||
# Upload with tag (deltas cleaned up inline - keep last 1)
|
||||
aws s3api put-object \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--key "${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst" \
|
||||
--body "${DELTA_TARBALL}" \
|
||||
--tagging 'type=delta-archive' \
|
||||
--region "${S3_REGION}" \
|
||||
>/dev/null
|
||||
|
||||
echo "✓ Uploaded: ${S3_DELTA_TIMESTAMPED}"
|
||||
|
||||
# Inline cleanup: Keep only latest delta (the one we just uploaded)
|
||||
echo ""
|
||||
echo "Cleaning up old deltas (keeping only latest)..."
|
||||
|
||||
# List all deltas for this key, sorted by LastModified (oldest first)
|
||||
ALL_DELTAS=$(aws s3api list-objects-v2 \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--prefix "${PRIMARY_KEY}-delta-" \
|
||||
--region "${S3_REGION}" \
|
||||
--query 'sort_by(Contents, &LastModified)[*].Key' \
|
||||
--output json 2>/dev/null || echo "[]")
|
||||
|
||||
DELTA_COUNT=$(echo "${ALL_DELTAS}" | jq 'length' 2>/dev/null || echo "0")
|
||||
|
||||
if [ "${DELTA_COUNT}" -gt 1 ]; then
|
||||
# Keep last 1 (newest), delete all older ones (all except last 1 = [0:-1])
|
||||
OLD_DELTAS=$(echo "${ALL_DELTAS}" | jq -r '.[0:-1][]' 2>/dev/null)
|
||||
|
||||
if [ -n "${OLD_DELTAS}" ]; then
|
||||
DELETE_COUNT=$((DELTA_COUNT - 1))
|
||||
echo " Found ${DELETE_COUNT} old delta(s) to delete"
|
||||
|
||||
# Create delete batch request JSON
|
||||
DELETE_OBJECTS=$(echo "${OLD_DELTAS}" | jq -R -s -c 'split("\n") | map(select(length > 0)) | map({Key: .}) | {Objects: ., Quiet: true}' 2>/dev/null)
|
||||
|
||||
if [ -n "${DELETE_OBJECTS}" ]; then
|
||||
aws s3api delete-objects \
|
||||
--bucket "${S3_BUCKET}" \
|
||||
--delete "${DELETE_OBJECTS}" \
|
||||
--region "${S3_REGION}" \
|
||||
>/dev/null 2>&1
|
||||
|
||||
echo "✓ Deleted ${DELETE_COUNT} old delta(s)"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "ℹ️ Only ${DELTA_COUNT} delta(s) exist, no cleanup needed"
|
||||
fi
|
||||
|
||||
# Cleanup delta tarball
|
||||
rm -f "${DELTA_TARBALL}"
|
||||
|
||||
# Cleanup: Unmount OverlayFS and remove workspace
|
||||
echo ""
|
||||
echo "Cleaning up..."
|
||||
|
||||
if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
|
||||
sudo umount "${CACHE_WORKSPACE}/merged" || {
|
||||
echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
|
||||
echo "Attempting lazy unmount..."
|
||||
sudo umount -l "${CACHE_WORKSPACE}/merged" || true
|
||||
}
|
||||
fi
|
||||
|
||||
# Remove workspace
|
||||
rm -rf "${CACHE_WORKSPACE}"
|
||||
fi
|
||||
|
||||
# Remove from registry
|
||||
if [ -f "${MOUNT_REGISTRY}" ]; then
|
||||
grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
|
||||
mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
echo "✓ Cleanup completed"
|
||||
# Cleanup
|
||||
rm -f "${BASE_TARBALL}"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Cache save completed successfully"
|
||||
echo "Mode: ${CACHE_MODE}"
|
||||
echo "Cache key: ${PRIMARY_KEY}"
|
||||
if [ -n "${DELTA_SIZE:-}" ]; then
|
||||
echo "Delta size: ${DELTA_SIZE}"
|
||||
fi
|
||||
echo "Cache size: ${BASE_SIZE}"
|
||||
echo "Cache key: ${CACHE_KEY}"
|
||||
echo "=========================================="
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
name: 'Configure ccache'
|
||||
description: 'Sets up ccache with consistent configuration'
|
||||
|
||||
inputs:
|
||||
cache_dir:
|
||||
description: 'Path to ccache directory'
|
||||
required: false
|
||||
default: '~/.ccache'
|
||||
max_size:
|
||||
description: 'Maximum cache size'
|
||||
required: false
|
||||
default: '2G'
|
||||
hash_dir:
|
||||
description: 'Whether to include directory paths in hash'
|
||||
required: false
|
||||
default: 'true'
|
||||
compiler_check:
|
||||
description: 'How to check compiler for changes'
|
||||
required: false
|
||||
default: 'content'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Configure ccache
|
||||
shell: bash
|
||||
run: |
|
||||
# Create cache directory
|
||||
mkdir -p ${{ inputs.cache_dir }}
|
||||
|
||||
# Configure ccache settings
|
||||
ccache --set-config=cache_dir="${{ inputs.cache_dir }}"
|
||||
ccache --set-config=max_size=${{ inputs.max_size }}
|
||||
ccache --set-config=hash_dir=${{ inputs.hash_dir }}
|
||||
ccache --set-config=compiler_check=${{ inputs.compiler_check }}
|
||||
|
||||
# Export for use by build tools
|
||||
echo "CCACHE_DIR=${{ inputs.cache_dir }}" >> $GITHUB_ENV
|
||||
|
||||
# Print config for verification
|
||||
ccache -p
|
||||
|
||||
# Zero statistics before the build
|
||||
ccache -z
|
||||
77
.github/actions/xahau-ga-build/action.yml
vendored
77
.github/actions/xahau-ga-build/action.yml
vendored
@@ -47,6 +47,18 @@ inputs:
|
||||
description: 'GCC version to use for Clang toolchain (e.g. 11, 13)'
|
||||
required: false
|
||||
default: ''
|
||||
ccache_max_size:
|
||||
description: 'Maximum ccache size'
|
||||
required: false
|
||||
default: '2G'
|
||||
ccache_hash_dir:
|
||||
description: 'Whether to include directory paths in hash'
|
||||
required: false
|
||||
default: 'true'
|
||||
ccache_compiler_check:
|
||||
description: 'How to check compiler for changes'
|
||||
required: false
|
||||
default: 'content'
|
||||
aws-access-key-id:
|
||||
description: 'AWS Access Key ID for S3 cache storage'
|
||||
required: true
|
||||
@@ -79,6 +91,31 @@ runs:
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
- name: Configure ccache
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
# Use ccache's default cache_dir (~/.ccache) - don't override it
|
||||
# This avoids tilde expansion issues when setting it explicitly
|
||||
|
||||
# Create cache directory using ccache's default
|
||||
mkdir -p ~/.ccache
|
||||
|
||||
# Configure ccache settings (but NOT cache_dir - use default)
|
||||
# This overwrites any cached config to ensure fresh configuration
|
||||
ccache --set-config=max_size=${{ inputs.ccache_max_size }}
|
||||
ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
|
||||
ccache --set-config=compiler_check=${{ inputs.ccache_compiler_check }}
|
||||
|
||||
# Note: Not setting CCACHE_DIR - let ccache use its default (~/.ccache)
|
||||
|
||||
# Print config for verification
|
||||
echo "=== ccache configuration ==="
|
||||
ccache -p
|
||||
|
||||
# Zero statistics before the build
|
||||
ccache -z
|
||||
|
||||
- name: Configure project
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -93,14 +130,27 @@ runs:
|
||||
if [ -n "${{ inputs.cxx }}" ]; then
|
||||
export CXX="${{ inputs.cxx }}"
|
||||
fi
|
||||
|
||||
|
||||
# Configure ccache launcher args
|
||||
CCACHE_ARGS=""
|
||||
|
||||
# Create wrapper toolchain that overlays ccache on top of Conan's toolchain
|
||||
# This enables ccache for the main app build without affecting Conan dependency builds
|
||||
if [ "${{ inputs.ccache_enabled }}" = "true" ]; then
|
||||
CCACHE_ARGS="-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
|
||||
cat > wrapper_toolchain.cmake <<'EOF'
|
||||
# Include Conan's generated toolchain first (sets compiler, flags, etc.)
|
||||
# Note: CMAKE_CURRENT_LIST_DIR is the directory containing this wrapper (.build/)
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/build/generators/conan_toolchain.cmake)
|
||||
|
||||
# Overlay ccache configuration for main application build
|
||||
# This does NOT affect Conan dependency builds (already completed)
|
||||
set(CMAKE_C_COMPILER_LAUNCHER ccache CACHE STRING "C compiler launcher" FORCE)
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER ccache CACHE STRING "C++ compiler launcher" FORCE)
|
||||
EOF
|
||||
TOOLCHAIN_FILE="wrapper_toolchain.cmake"
|
||||
echo "✅ Created wrapper toolchain with ccache enabled"
|
||||
else
|
||||
TOOLCHAIN_FILE="build/generators/conan_toolchain.cmake"
|
||||
echo "ℹ️ Using Conan toolchain directly (ccache disabled)"
|
||||
fi
|
||||
|
||||
|
||||
# Configure C++ standard library if specified
|
||||
# libstdcxx used for clang-14/16 to work around missing lexicographical_compare_three_way in libc++
|
||||
# libcxx can be used with clang-17+ which has full C++20 support
|
||||
@@ -140,16 +190,25 @@ runs:
|
||||
# So we get: .build/build/generators/ with our non-standard folder name
|
||||
cmake .. \
|
||||
-G "${{ inputs.generator }}" \
|
||||
$CCACHE_ARGS \
|
||||
${CMAKE_CXX_FLAGS:+-DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS"} \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=${TOOLCHAIN_FILE} \
|
||||
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }}
|
||||
|
||||
- name: Show ccache config before build
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "=========================================="
|
||||
echo "ccache configuration before build"
|
||||
echo "=========================================="
|
||||
ccache -p
|
||||
echo ""
|
||||
|
||||
- name: Build project
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ inputs.build_dir }}
|
||||
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc)
|
||||
cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc) -- -v
|
||||
|
||||
- name: Show ccache statistics
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
|
||||
76
.github/actions/xahau-ga-dependencies/action.yml
vendored
76
.github/actions/xahau-ga-dependencies/action.yml
vendored
@@ -25,6 +25,28 @@ inputs:
|
||||
description: 'Main branch name for restore keys'
|
||||
required: false
|
||||
default: 'dev'
|
||||
os:
|
||||
description: 'Operating system (Linux, Macos)'
|
||||
required: false
|
||||
default: 'Linux'
|
||||
arch:
|
||||
description: 'Architecture (x86_64, armv8)'
|
||||
required: false
|
||||
default: 'x86_64'
|
||||
compiler:
|
||||
description: 'Compiler type (gcc, clang, apple-clang)'
|
||||
required: true
|
||||
compiler_version:
|
||||
description: 'Compiler version (11, 13, 14, etc.)'
|
||||
required: true
|
||||
cc:
|
||||
description: 'C compiler executable (gcc-13, clang-14, etc.), empty for macOS'
|
||||
required: false
|
||||
default: ''
|
||||
cxx:
|
||||
description: 'C++ compiler executable (g++-14, clang++-14, etc.), empty for macOS'
|
||||
required: false
|
||||
default: ''
|
||||
stdlib:
|
||||
description: 'C++ standard library for Conan configuration (note: also in compiler-id)'
|
||||
required: true
|
||||
@@ -59,10 +81,61 @@ runs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
use-deltas: 'false'
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
- name: Configure Conan
|
||||
shell: bash
|
||||
run: |
|
||||
# Create the default profile directory if it doesn't exist
|
||||
mkdir -p ~/.conan2/profiles
|
||||
|
||||
# Determine the correct libcxx based on stdlib parameter
|
||||
if [ "${{ inputs.stdlib }}" = "libcxx" ]; then
|
||||
LIBCXX="libc++"
|
||||
else
|
||||
LIBCXX="libstdc++11"
|
||||
fi
|
||||
|
||||
# Create profile with our specific settings
|
||||
# This overwrites any cached profile to ensure fresh configuration
|
||||
cat > ~/.conan2/profiles/default <<EOF
|
||||
[settings]
|
||||
arch=${{ inputs.arch }}
|
||||
build_type=${{ inputs.configuration }}
|
||||
compiler=${{ inputs.compiler }}
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=${LIBCXX}
|
||||
compiler.version=${{ inputs.compiler_version }}
|
||||
os=${{ inputs.os }}
|
||||
EOF
|
||||
|
||||
# Add buildenv and conf sections for Linux (not needed for macOS)
|
||||
if [ "${{ inputs.os }}" = "Linux" ] && [ -n "${{ inputs.cc }}" ]; then
|
||||
cat >> ~/.conan2/profiles/default <<EOF
|
||||
|
||||
[buildenv]
|
||||
CC=/usr/bin/${{ inputs.cc }}
|
||||
CXX=/usr/bin/${{ inputs.cxx }}
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={"c": "/usr/bin/${{ inputs.cc }}", "cpp": "/usr/bin/${{ inputs.cxx }}"}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Add macOS-specific conf if needed
|
||||
if [ "${{ inputs.os }}" = "Macos" ]; then
|
||||
cat >> ~/.conan2/profiles/default <<EOF
|
||||
|
||||
[conf]
|
||||
# Workaround for gRPC with newer Apple Clang
|
||||
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Display profile for verification
|
||||
conan profile show
|
||||
|
||||
- name: Export custom recipes
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -92,6 +165,5 @@ runs:
|
||||
with:
|
||||
path: ~/.conan2
|
||||
key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
|
||||
use-deltas: 'false'
|
||||
aws-access-key-id: ${{ inputs.aws-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
|
||||
|
||||
46
.github/workflows/xahau-ga-macos.yml.disabled
vendored
46
.github/workflows/xahau-ga-macos.yml.disabled
vendored
@@ -78,14 +78,6 @@ jobs:
|
||||
- name: Install ccache
|
||||
run: brew install ccache
|
||||
|
||||
- name: Configure ccache
|
||||
uses: ./.github/actions/xahau-configure-ccache
|
||||
with:
|
||||
max_size: 2G
|
||||
hash_dir: true
|
||||
compiler_check: content
|
||||
is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }}
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
echo "PATH:"
|
||||
@@ -98,32 +90,12 @@ jobs:
|
||||
echo "---- Full Environment ----"
|
||||
env
|
||||
|
||||
- name: Configure Conan
|
||||
- name: Detect compiler version
|
||||
id: detect-compiler
|
||||
run: |
|
||||
# Create the default profile directory if it doesn't exist
|
||||
mkdir -p ~/.conan2/profiles
|
||||
|
||||
# Detect compiler version
|
||||
COMPILER_VERSION=$(clang --version | grep -oE 'version [0-9]+' | grep -oE '[0-9]+')
|
||||
|
||||
# Create profile with our specific settings
|
||||
cat > ~/.conan2/profiles/default <<EOF
|
||||
[settings]
|
||||
arch=armv8
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=${COMPILER_VERSION}
|
||||
os=Macos
|
||||
|
||||
[conf]
|
||||
# Workaround for gRPC with newer Apple Clang
|
||||
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
EOF
|
||||
|
||||
# Display profile for verification
|
||||
conan profile show
|
||||
echo "compiler_version=${COMPILER_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "Detected Apple Clang version: ${COMPILER_VERSION}"
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/xahau-ga-dependencies
|
||||
@@ -133,6 +105,13 @@ jobs:
|
||||
compiler-id: clang
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
os: Macos
|
||||
arch: armv8
|
||||
compiler: apple-clang
|
||||
compiler_version: ${{ steps.detect-compiler.outputs.compiler_version }}
|
||||
stdlib: libcxx
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Build
|
||||
uses: ./.github/actions/xahau-ga-build
|
||||
@@ -143,6 +122,9 @@ jobs:
|
||||
compiler-id: clang
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
stdlib: libcxx
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
|
||||
45
.github/workflows/xahau-ga-nix.yml
vendored
45
.github/workflows/xahau-ga-nix.yml
vendored
@@ -231,47 +231,6 @@ jobs:
|
||||
# Install Conan 2
|
||||
pip install --upgrade "conan>=2.0,<3"
|
||||
|
||||
- name: Configure ccache
|
||||
uses: ./.github/actions/xahau-configure-ccache
|
||||
with:
|
||||
max_size: 2G
|
||||
hash_dir: true
|
||||
compiler_check: content
|
||||
|
||||
- name: Configure Conan
|
||||
run: |
|
||||
# Create the default profile directory if it doesn't exist
|
||||
mkdir -p ~/.conan2/profiles
|
||||
|
||||
# Determine the correct libcxx based on stdlib parameter
|
||||
if [ "${{ matrix.stdlib }}" = "libcxx" ]; then
|
||||
LIBCXX="libc++"
|
||||
else
|
||||
LIBCXX="libstdc++11"
|
||||
fi
|
||||
|
||||
# Create profile with our specific settings
|
||||
cat > ~/.conan2/profiles/default <<EOF
|
||||
[settings]
|
||||
arch=x86_64
|
||||
build_type=${{ matrix.configuration }}
|
||||
compiler=${{ matrix.compiler }}
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=${LIBCXX}
|
||||
compiler.version=${{ matrix.compiler_version }}
|
||||
os=Linux
|
||||
|
||||
[buildenv]
|
||||
CC=/usr/bin/${{ matrix.cc }}
|
||||
CXX=/usr/bin/${{ matrix.cxx }}
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}
|
||||
EOF
|
||||
|
||||
# Display profile for verification
|
||||
conan profile show
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
echo "PATH:"
|
||||
@@ -292,6 +251,10 @@ jobs:
|
||||
compiler-id: ${{ matrix.compiler_id }}
|
||||
cache_version: ${{ env.CACHE_VERSION }}
|
||||
main_branch: ${{ env.MAIN_BRANCH_NAME }}
|
||||
compiler: ${{ matrix.compiler }}
|
||||
compiler_version: ${{ matrix.compiler_version }}
|
||||
cc: ${{ matrix.cc }}
|
||||
cxx: ${{ matrix.cxx }}
|
||||
stdlib: ${{ matrix.stdlib }}
|
||||
aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
|
||||
|
||||
Reference in New Issue
Block a user