mirror of
				https://github.com/Xahau/xahaud.git
				synced 2025-11-04 02:35:48 +00:00 
			
		
		
		
	Compare commits
	
		
			7 Commits
		
	
	
		
			a4f96a435a
			...
			83f6bc64e1
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					83f6bc64e1 | ||
| 
						 | 
					be6fad9692 | ||
| 
						 | 
					b24e4647ba | ||
| 
						 | 
					638cb0afe5 | ||
| 
						 | 
					bd384e6bc1 | ||
| 
						 | 
					4c546e5d91 | ||
| 
						 | 
					28727b3f86 | 
@@ -1,5 +1,5 @@
 | 
			
		||||
name: 'Xahau Cache Restore (S3 + OverlayFS)'
 | 
			
		||||
description: 'Drop-in replacement for actions/cache/restore using S3 and OverlayFS for delta caching'
 | 
			
		||||
name: 'Xahau Cache Restore (S3)'
 | 
			
		||||
description: 'Drop-in replacement for actions/cache/restore using S3 storage'
 | 
			
		||||
 | 
			
		||||
inputs:
 | 
			
		||||
  path:
 | 
			
		||||
@@ -28,10 +28,6 @@ inputs:
 | 
			
		||||
    description: 'Check if a cache entry exists for the given input(s) without downloading it'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'false'
 | 
			
		||||
  use-deltas:
 | 
			
		||||
    description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'true'
 | 
			
		||||
  # Note: Composite actions can't access secrets.* directly - must be passed from workflow
 | 
			
		||||
  aws-access-key-id:
 | 
			
		||||
    description: 'AWS Access Key ID for S3 access'
 | 
			
		||||
@@ -48,13 +44,13 @@ outputs:
 | 
			
		||||
    description: 'The key that was used to restore the cache (may be from restore-keys)'
 | 
			
		||||
    value: ${{ steps.restore-cache.outputs.cache-primary-key }}
 | 
			
		||||
  cache-matched-key:
 | 
			
		||||
    description: 'The key that matched (same as cache-primary-key for compatibility)'
 | 
			
		||||
    value: ${{ steps.restore-cache.outputs.cache-primary-key }}
 | 
			
		||||
    description: 'The key that was used to restore the cache (exact or prefix match)'
 | 
			
		||||
    value: ${{ steps.restore-cache.outputs.cache-matched-key }}
 | 
			
		||||
 | 
			
		||||
runs:
 | 
			
		||||
  using: 'composite'
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Restore cache from S3 with OverlayFS
 | 
			
		||||
    - name: Restore cache from S3
 | 
			
		||||
      id: restore-cache
 | 
			
		||||
      shell: bash
 | 
			
		||||
      env:
 | 
			
		||||
@@ -67,133 +63,42 @@ runs:
 | 
			
		||||
        TARGET_PATH: ${{ inputs.path }}
 | 
			
		||||
        FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }}
 | 
			
		||||
        LOOKUP_ONLY: ${{ inputs.lookup-only }}
 | 
			
		||||
        USE_DELTAS: ${{ inputs.use-deltas }}
 | 
			
		||||
        COMMIT_MSG: ${{ github.event.head_commit.message }}
 | 
			
		||||
      run: |
 | 
			
		||||
        set -euo pipefail
 | 
			
		||||
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        echo "Xahau Cache Restore (S3 + OverlayFS)"
 | 
			
		||||
        echo "Xahau Cache Restore (S3)"
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        echo "Target path: ${TARGET_PATH}"
 | 
			
		||||
        echo "Primary key: ${CACHE_KEY}"
 | 
			
		||||
        echo "Cache key: ${CACHE_KEY}"
 | 
			
		||||
        echo "S3 bucket: s3://${S3_BUCKET}"
 | 
			
		||||
        echo "Use deltas: ${USE_DELTAS}"
 | 
			
		||||
        echo ""
 | 
			
		||||
 | 
			
		||||
        # Normalize target path (expand tilde and resolve to absolute path)
 | 
			
		||||
        # This ensures consistent path comparison in the mount registry
 | 
			
		||||
        if [[ "${TARGET_PATH}" == ~* ]]; then
 | 
			
		||||
          # Expand tilde manually (works even if directory doesn't exist yet)
 | 
			
		||||
          TARGET_PATH="${HOME}${TARGET_PATH:1}"
 | 
			
		||||
        fi
 | 
			
		||||
        TARGET_PATH=$(realpath -m "${TARGET_PATH}")
 | 
			
		||||
        echo "Normalized target path: ${TARGET_PATH}"
 | 
			
		||||
        echo ""
 | 
			
		||||
 | 
			
		||||
        # Generate unique cache workspace
 | 
			
		||||
        CACHE_HASH=$(echo "${CACHE_KEY}" | md5sum | cut -d' ' -f1)
 | 
			
		||||
        CACHE_WORKSPACE="/tmp/xahau-cache-${CACHE_HASH}"
 | 
			
		||||
 | 
			
		||||
        echo "Cache workspace: ${CACHE_WORKSPACE}"
 | 
			
		||||
 | 
			
		||||
        # Check for [ci-clear-cache] tag in commit message
 | 
			
		||||
        if echo "${COMMIT_MSG}" | grep -q '\[ci-clear-cache\]'; then
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "🗑️  [ci-clear-cache] detected in commit message"
 | 
			
		||||
          echo "Clearing cache for key: ${CACHE_KEY}"
 | 
			
		||||
          echo ""
 | 
			
		||||
 | 
			
		||||
          # Delete base layer
 | 
			
		||||
          S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
 | 
			
		||||
          if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
 | 
			
		||||
            echo "Deleting base layer: ${S3_BASE_KEY}"
 | 
			
		||||
            aws s3 rm "${S3_BASE_KEY}" --region "${S3_REGION}" 2>/dev/null || true
 | 
			
		||||
            echo "✓ Base layer deleted"
 | 
			
		||||
          else
 | 
			
		||||
            echo "ℹ️  No base layer found to delete"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Delete all delta layers for this key
 | 
			
		||||
          echo "Deleting all delta layers matching: ${CACHE_KEY}-delta-*"
 | 
			
		||||
          DELTA_COUNT=$(aws s3 ls "s3://${S3_BUCKET}/" --region "${S3_REGION}" | grep "${CACHE_KEY}-delta-" | wc -l || echo "0")
 | 
			
		||||
          DELTA_COUNT=$(echo "${DELTA_COUNT}" | tr -d ' \n')  # Trim whitespace
 | 
			
		||||
          if [ "${DELTA_COUNT}" -gt 0 ]; then
 | 
			
		||||
            aws s3 rm "s3://${S3_BUCKET}/" --recursive \
 | 
			
		||||
              --exclude "*" \
 | 
			
		||||
              --include "${CACHE_KEY}-delta-*" \
 | 
			
		||||
              --region "${S3_REGION}" 2>/dev/null || true
 | 
			
		||||
            echo "✓ Deleted ${DELTA_COUNT} delta layer(s)"
 | 
			
		||||
          else
 | 
			
		||||
            echo "ℹ️  No delta layers found to delete"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "✅ Cache cleared successfully"
 | 
			
		||||
          echo "Build will proceed from scratch (bootstrap mode)"
 | 
			
		||||
          echo ""
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        # Create OverlayFS directory structure
 | 
			
		||||
        mkdir -p "${CACHE_WORKSPACE}"/{base,upper,work,merged}
 | 
			
		||||
 | 
			
		||||
        # Function to try downloading from S3
 | 
			
		||||
        # Function to try restoring a cache key
 | 
			
		||||
        try_restore_key() {
 | 
			
		||||
          local try_key="$1"
 | 
			
		||||
          local s3_base="s3://${S3_BUCKET}/${try_key}-base.tar.zst"
 | 
			
		||||
          local key=$1
 | 
			
		||||
          local s3_key="s3://${S3_BUCKET}/${key}-base.tar.zst"
 | 
			
		||||
 | 
			
		||||
          echo "Trying cache key: ${try_key}"
 | 
			
		||||
 | 
			
		||||
          # Check if base exists (one base per key, immutable)
 | 
			
		||||
          echo "Checking for base layer..."
 | 
			
		||||
          if aws s3 ls "${s3_base}" --region "${S3_REGION}" >/dev/null 2>&1; then
 | 
			
		||||
            echo "✓ Found base layer: ${s3_base}"
 | 
			
		||||
 | 
			
		||||
            if [ "${LOOKUP_ONLY}" = "true" ]; then
 | 
			
		||||
              echo "Lookup-only mode: cache exists, skipping download"
 | 
			
		||||
              return 0
 | 
			
		||||
            fi
 | 
			
		||||
 | 
			
		||||
            # Download base layer
 | 
			
		||||
            echo "Downloading base layer..."
 | 
			
		||||
            aws s3 cp "${s3_base}" /tmp/cache-base.tar.zst --region "${S3_REGION}" --quiet
 | 
			
		||||
 | 
			
		||||
            # Extract base layer
 | 
			
		||||
            echo "Extracting base layer..."
 | 
			
		||||
            tar -xf /tmp/cache-base.tar.zst -C "${CACHE_WORKSPACE}/base"
 | 
			
		||||
            rm /tmp/cache-base.tar.zst
 | 
			
		||||
 | 
			
		||||
            # Query for latest timestamped delta (only if use-deltas enabled)
 | 
			
		||||
            if [ "${USE_DELTAS}" = "true" ]; then
 | 
			
		||||
              echo "Querying for latest delta..."
 | 
			
		||||
              LATEST_DELTA=$(aws s3api list-objects-v2 \
 | 
			
		||||
                --bucket "${S3_BUCKET}" \
 | 
			
		||||
                --prefix "${try_key}-delta-" \
 | 
			
		||||
                --region "${S3_REGION}" \
 | 
			
		||||
                --query 'sort_by(Contents, &LastModified)[-1].Key' \
 | 
			
		||||
                --output text 2>/dev/null || echo "")
 | 
			
		||||
 | 
			
		||||
              if [ -n "${LATEST_DELTA}" ] && [ "${LATEST_DELTA}" != "None" ]; then
 | 
			
		||||
                echo "✓ Found latest delta: ${LATEST_DELTA}"
 | 
			
		||||
                echo "Downloading delta layer..."
 | 
			
		||||
                aws s3 cp "s3://${S3_BUCKET}/${LATEST_DELTA}" /tmp/cache-delta.tar.zst --region "${S3_REGION}" --quiet
 | 
			
		||||
 | 
			
		||||
                echo "Extracting delta layer..."
 | 
			
		||||
                tar -xf /tmp/cache-delta.tar.zst -C "${CACHE_WORKSPACE}/upper" 2>/dev/null || true
 | 
			
		||||
                rm /tmp/cache-delta.tar.zst
 | 
			
		||||
              else
 | 
			
		||||
                echo "ℹ No delta layer found (this is fine for first build)"
 | 
			
		||||
              fi
 | 
			
		||||
            else
 | 
			
		||||
              echo "ℹ Delta caching disabled (use-deltas: false)"
 | 
			
		||||
            fi
 | 
			
		||||
          echo "Checking for key: ${key}"
 | 
			
		||||
 | 
			
		||||
          if aws s3 ls "${s3_key}" --region "${S3_REGION}" >/dev/null 2>&1; then
 | 
			
		||||
            echo "✓ Found cache: ${s3_key}"
 | 
			
		||||
            return 0
 | 
			
		||||
          else
 | 
			
		||||
            echo "✗ No base layer found for key: ${try_key}"
 | 
			
		||||
            echo "✗ Not found: ${key}"
 | 
			
		||||
            return 1
 | 
			
		||||
          fi
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        # Try primary key first
 | 
			
		||||
        # Try exact match first
 | 
			
		||||
        MATCHED_KEY=""
 | 
			
		||||
        EXACT_MATCH="false"
 | 
			
		||||
 | 
			
		||||
@@ -208,12 +113,8 @@ runs:
 | 
			
		||||
            echo ""
 | 
			
		||||
            echo "Primary key not found, trying restore-keys..."
 | 
			
		||||
 | 
			
		||||
            # Split restore-keys by newline
 | 
			
		||||
            while IFS= read -r restore_key; do
 | 
			
		||||
              # Skip empty lines
 | 
			
		||||
              [ -z "${restore_key}" ] && continue
 | 
			
		||||
 | 
			
		||||
              # Trim whitespace
 | 
			
		||||
              restore_key=$(echo "${restore_key}" | xargs)
 | 
			
		||||
 | 
			
		||||
              if try_restore_key "${restore_key}"; then
 | 
			
		||||
@@ -231,7 +132,6 @@ runs:
 | 
			
		||||
        if [ -z "${MATCHED_KEY}" ]; then
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "❌ No cache found for key: ${CACHE_KEY}"
 | 
			
		||||
          echo "This is BOOTSTRAP mode - first build for this cache key"
 | 
			
		||||
 | 
			
		||||
          if [ "${FAIL_ON_MISS}" = "true" ]; then
 | 
			
		||||
            echo "fail-on-cache-miss is enabled, failing workflow"
 | 
			
		||||
@@ -241,16 +141,11 @@ runs:
 | 
			
		||||
          # Set outputs for cache miss
 | 
			
		||||
          echo "cache-hit=false" >> $GITHUB_OUTPUT
 | 
			
		||||
          echo "cache-primary-key=" >> $GITHUB_OUTPUT
 | 
			
		||||
          echo "cache-matched-key=" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
          # Create empty cache directory for bootstrap
 | 
			
		||||
          # Create empty cache directory
 | 
			
		||||
          mkdir -p "${TARGET_PATH}"
 | 
			
		||||
 | 
			
		||||
          # Record bootstrap mode for save action
 | 
			
		||||
          # Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
 | 
			
		||||
          # For bootstrap: workspace="bootstrap", matched_key=primary_key, exact_match=false
 | 
			
		||||
          MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
 | 
			
		||||
          echo "${TARGET_PATH}:bootstrap:${CACHE_KEY}:${CACHE_KEY}:false:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
 | 
			
		||||
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          echo "Cache restore completed (bootstrap mode)"
 | 
			
		||||
@@ -262,36 +157,30 @@ runs:
 | 
			
		||||
        # If lookup-only, we're done
 | 
			
		||||
        if [ "${LOOKUP_ONLY}" = "true" ]; then
 | 
			
		||||
          echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
 | 
			
		||||
          echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
          # Clean up workspace
 | 
			
		||||
          rm -rf "${CACHE_WORKSPACE}"
 | 
			
		||||
          echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
 | 
			
		||||
          echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          echo "Cache lookup completed (lookup-only mode)"
 | 
			
		||||
          echo "Cache exists: ${MATCHED_KEY}"
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          exit 0
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        # Mount OverlayFS
 | 
			
		||||
        # Download and extract cache
 | 
			
		||||
        S3_KEY="s3://${S3_BUCKET}/${MATCHED_KEY}-base.tar.zst"
 | 
			
		||||
        TEMP_TARBALL="/tmp/xahau-cache-restore-$$.tar.zst"
 | 
			
		||||
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "Mounting OverlayFS..."
 | 
			
		||||
        sudo mount -t overlay overlay \
 | 
			
		||||
          -o lowerdir="${CACHE_WORKSPACE}/base",upperdir="${CACHE_WORKSPACE}/upper",workdir="${CACHE_WORKSPACE}/work" \
 | 
			
		||||
          "${CACHE_WORKSPACE}/merged"
 | 
			
		||||
        echo "Downloading cache..."
 | 
			
		||||
        aws s3 cp "${S3_KEY}" "${TEMP_TARBALL}" --region "${S3_REGION}"
 | 
			
		||||
 | 
			
		||||
        # Verify mount
 | 
			
		||||
        if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
 | 
			
		||||
          echo "✓ OverlayFS mounted successfully"
 | 
			
		||||
        else
 | 
			
		||||
          echo "❌ Failed to mount OverlayFS"
 | 
			
		||||
          exit 1
 | 
			
		||||
        fi
 | 
			
		||||
        TARBALL_SIZE=$(du -h "${TEMP_TARBALL}" | cut -f1)
 | 
			
		||||
        echo "✓ Downloaded: ${TARBALL_SIZE}"
 | 
			
		||||
 | 
			
		||||
        # Create target directory parent if needed
 | 
			
		||||
        TARGET_PARENT=$(dirname "${TARGET_PATH}")
 | 
			
		||||
        mkdir -p "${TARGET_PARENT}"
 | 
			
		||||
        # Create parent directory if needed
 | 
			
		||||
        mkdir -p "$(dirname "${TARGET_PATH}")"
 | 
			
		||||
 | 
			
		||||
        # Remove existing target if it exists
 | 
			
		||||
        if [ -e "${TARGET_PATH}" ]; then
 | 
			
		||||
@@ -299,30 +188,24 @@ runs:
 | 
			
		||||
          rm -rf "${TARGET_PATH}"
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        # Symlink target path to merged view
 | 
			
		||||
        echo "Creating symlink: ${TARGET_PATH} -> ${CACHE_WORKSPACE}/merged"
 | 
			
		||||
        ln -s "${CACHE_WORKSPACE}/merged" "${TARGET_PATH}"
 | 
			
		||||
        # Create target directory and extract
 | 
			
		||||
        mkdir -p "${TARGET_PATH}"
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "Extracting cache..."
 | 
			
		||||
        zstd -d -c "${TEMP_TARBALL}" | tar -xf - -C "${TARGET_PATH}"
 | 
			
		||||
        echo "✓ Cache extracted to: ${TARGET_PATH}"
 | 
			
		||||
 | 
			
		||||
        # Save mount info for cleanup/save later
 | 
			
		||||
        # Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
 | 
			
		||||
        # This tells save action whether to create new base (partial match) or just delta (exact match)
 | 
			
		||||
        MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
 | 
			
		||||
        echo "${TARGET_PATH}:${CACHE_WORKSPACE}:${MATCHED_KEY}:${CACHE_KEY}:${EXACT_MATCH}:${USE_DELTAS}" >> "${MOUNT_REGISTRY}"
 | 
			
		||||
        # Cleanup
 | 
			
		||||
        rm -f "${TEMP_TARBALL}"
 | 
			
		||||
 | 
			
		||||
        # Set outputs
 | 
			
		||||
        echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT
 | 
			
		||||
        echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
        # Show statistics
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "Cache statistics:"
 | 
			
		||||
        echo "  Base layer size:  $(du -sh ${CACHE_WORKSPACE}/base 2>/dev/null | cut -f1 || echo '0')"
 | 
			
		||||
        echo "  Delta layer size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1 || echo '0')"
 | 
			
		||||
        echo "  Merged view size: $(du -sh ${CACHE_WORKSPACE}/merged 2>/dev/null | cut -f1 || echo '0')"
 | 
			
		||||
        echo "cache-primary-key=${CACHE_KEY}" >> $GITHUB_OUTPUT
 | 
			
		||||
        echo "cache-matched-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        echo "Cache restore completed successfully"
 | 
			
		||||
        echo "Exact match: ${EXACT_MATCH}"
 | 
			
		||||
        echo "Cache hit: ${EXACT_MATCH}"
 | 
			
		||||
        echo "Matched key: ${MATCHED_KEY}"
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										368
									
								
								.github/actions/xahau-actions-cache-save/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										368
									
								
								.github/actions/xahau-actions-cache-save/action.yml
									
									
									
									
										vendored
									
									
								
							@@ -1,5 +1,5 @@
 | 
			
		||||
name: 'Xahau Cache Save (S3 + OverlayFS)'
 | 
			
		||||
description: 'Drop-in replacement for actions/cache/save using S3 and OverlayFS for delta caching'
 | 
			
		||||
name: 'Xahau Cache Save (S3)'
 | 
			
		||||
description: 'Drop-in replacement for actions/cache/save using S3 storage'
 | 
			
		||||
 | 
			
		||||
inputs:
 | 
			
		||||
  path:
 | 
			
		||||
@@ -16,10 +16,6 @@ inputs:
 | 
			
		||||
    description: 'S3 region'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'us-east-1'
 | 
			
		||||
  use-deltas:
 | 
			
		||||
    description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'true'
 | 
			
		||||
  # Note: Composite actions can't access secrets.* directly - must be passed from workflow
 | 
			
		||||
  aws-access-key-id:
 | 
			
		||||
    description: 'AWS Access Key ID for S3 access'
 | 
			
		||||
@@ -31,7 +27,7 @@ inputs:
 | 
			
		||||
runs:
 | 
			
		||||
  using: 'composite'
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Save cache to S3 with OverlayFS delta
 | 
			
		||||
    - name: Save cache to S3
 | 
			
		||||
      shell: bash
 | 
			
		||||
      env:
 | 
			
		||||
        AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
 | 
			
		||||
@@ -40,12 +36,11 @@ runs:
 | 
			
		||||
        S3_REGION: ${{ inputs.s3-region }}
 | 
			
		||||
        CACHE_KEY: ${{ inputs.key }}
 | 
			
		||||
        TARGET_PATH: ${{ inputs.path }}
 | 
			
		||||
        USE_DELTAS: ${{ inputs.use-deltas }}
 | 
			
		||||
      run: |
 | 
			
		||||
        set -euo pipefail
 | 
			
		||||
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        echo "Xahau Cache Save (S3 + OverlayFS)"
 | 
			
		||||
        echo "Xahau Cache Save (S3)"
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        echo "Target path: ${TARGET_PATH}"
 | 
			
		||||
        echo "Cache key: ${CACHE_KEY}"
 | 
			
		||||
@@ -53,346 +48,63 @@ runs:
 | 
			
		||||
        echo ""
 | 
			
		||||
 | 
			
		||||
        # Normalize target path (expand tilde and resolve to absolute path)
 | 
			
		||||
        # This ensures consistent path comparison with the mount registry
 | 
			
		||||
        if [[ "${TARGET_PATH}" == ~* ]]; then
 | 
			
		||||
          # Expand tilde manually (works even if directory doesn't exist yet)
 | 
			
		||||
          TARGET_PATH="${HOME}${TARGET_PATH:1}"
 | 
			
		||||
        fi
 | 
			
		||||
        echo "Normalized target path: ${TARGET_PATH}"
 | 
			
		||||
        echo ""
 | 
			
		||||
 | 
			
		||||
        # Find the cache workspace from mount registry
 | 
			
		||||
        MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt"
 | 
			
		||||
 | 
			
		||||
        if [ ! -f "${MOUNT_REGISTRY}" ]; then
 | 
			
		||||
          echo "⚠️  No cache mounts found (mount registry doesn't exist)"
 | 
			
		||||
          echo "This usually means cache restore was not called, or there was no cache to restore."
 | 
			
		||||
        # Check if target directory exists
 | 
			
		||||
        if [ ! -d "${TARGET_PATH}" ]; then
 | 
			
		||||
          echo "⚠️  Target directory does not exist: ${TARGET_PATH}"
 | 
			
		||||
          echo "Skipping cache save."
 | 
			
		||||
          exit 0
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        # Find entry for this path
 | 
			
		||||
        # Format: path:workspace:matched_key:primary_key:exact_match:use_deltas
 | 
			
		||||
        # Bootstrap mode: path:bootstrap:key:key:false:true/false (workspace="bootstrap")
 | 
			
		||||
        CACHE_WORKSPACE=""
 | 
			
		||||
        MATCHED_KEY=""
 | 
			
		||||
        PRIMARY_KEY=""
 | 
			
		||||
        EXACT_MATCH=""
 | 
			
		||||
        REGISTRY_USE_DELTAS=""
 | 
			
		||||
        # Use static base name (one base per key, immutable)
 | 
			
		||||
        S3_BASE_KEY="s3://${S3_BUCKET}/${CACHE_KEY}-base.tar.zst"
 | 
			
		||||
 | 
			
		||||
        while IFS=: read -r mount_path mount_workspace mount_matched_key mount_primary_key mount_exact_match mount_use_deltas; do
 | 
			
		||||
          if [ "${mount_path}" = "${TARGET_PATH}" ]; then
 | 
			
		||||
            CACHE_WORKSPACE="${mount_workspace}"
 | 
			
		||||
            MATCHED_KEY="${mount_matched_key}"
 | 
			
		||||
            PRIMARY_KEY="${mount_primary_key}"
 | 
			
		||||
            EXACT_MATCH="${mount_exact_match}"
 | 
			
		||||
            REGISTRY_USE_DELTAS="${mount_use_deltas}"
 | 
			
		||||
            break
 | 
			
		||||
          fi
 | 
			
		||||
        done < "${MOUNT_REGISTRY}"
 | 
			
		||||
 | 
			
		||||
        if [ -z "${CACHE_WORKSPACE}" ] && [ -z "${MATCHED_KEY}" ]; then
 | 
			
		||||
          echo "⚠️  No cache entry found for path: ${TARGET_PATH}"
 | 
			
		||||
          echo "This usually means cache restore was not called for this path."
 | 
			
		||||
          echo "Skipping cache save."
 | 
			
		||||
        # Check if base already exists (immutability - first write wins)
 | 
			
		||||
        if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
 | 
			
		||||
          echo "⚠️  Cache already exists: ${S3_BASE_KEY}"
 | 
			
		||||
          echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          echo "Cache save completed (already exists)"
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          exit 0
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        # Determine cache mode
 | 
			
		||||
        if [ "${CACHE_WORKSPACE}" = "bootstrap" ]; then
 | 
			
		||||
          CACHE_MODE="bootstrap"
 | 
			
		||||
          PRIMARY_KEY="${MATCHED_KEY}"  # In bootstrap, matched_key field contains primary key
 | 
			
		||||
          echo "Cache mode: BOOTSTRAP (first build for this key)"
 | 
			
		||||
          echo "Primary key: ${PRIMARY_KEY}"
 | 
			
		||||
        elif [ "${EXACT_MATCH}" = "false" ]; then
 | 
			
		||||
          CACHE_MODE="partial-match"
 | 
			
		||||
          echo "Cache mode: PARTIAL MATCH (restore-key used)"
 | 
			
		||||
          echo "Cache workspace: ${CACHE_WORKSPACE}"
 | 
			
		||||
          echo "Matched key from restore: ${MATCHED_KEY}"
 | 
			
		||||
          echo "Primary key (will save new base): ${PRIMARY_KEY}"
 | 
			
		||||
        else
 | 
			
		||||
          CACHE_MODE="exact-match"
 | 
			
		||||
          echo "Cache mode: EXACT MATCH (cache hit)"
 | 
			
		||||
          echo "Cache workspace: ${CACHE_WORKSPACE}"
 | 
			
		||||
          echo "Matched key: ${MATCHED_KEY}"
 | 
			
		||||
        fi
 | 
			
		||||
        echo "Use deltas: ${REGISTRY_USE_DELTAS}"
 | 
			
		||||
        # Create tarball
 | 
			
		||||
        BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
 | 
			
		||||
 | 
			
		||||
        echo "Creating cache tarball..."
 | 
			
		||||
        tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
 | 
			
		||||
 | 
			
		||||
        BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
 | 
			
		||||
        echo "✓ Cache tarball created: ${BASE_SIZE}"
 | 
			
		||||
        echo ""
 | 
			
		||||
 | 
			
		||||
        # Handle different cache modes
 | 
			
		||||
        if [ "${CACHE_MODE}" = "bootstrap" ]; then
 | 
			
		||||
          # Bootstrap: Save entire cache as base layer (no OverlayFS was used)
 | 
			
		||||
          echo "Bootstrap mode: Creating initial base layer from ${TARGET_PATH}"
 | 
			
		||||
        # Upload to S3
 | 
			
		||||
        echo "Uploading cache to S3..."
 | 
			
		||||
        echo "  Key: ${CACHE_KEY}-base.tar.zst"
 | 
			
		||||
 | 
			
		||||
          BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
 | 
			
		||||
          echo "Creating base tarball..."
 | 
			
		||||
          tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
 | 
			
		||||
        aws s3api put-object \
 | 
			
		||||
          --bucket "${S3_BUCKET}" \
 | 
			
		||||
          --key "${CACHE_KEY}-base.tar.zst" \
 | 
			
		||||
          --body "${BASE_TARBALL}" \
 | 
			
		||||
          --tagging 'type=base' \
 | 
			
		||||
          --region "${S3_REGION}" \
 | 
			
		||||
          >/dev/null
 | 
			
		||||
 | 
			
		||||
          BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
 | 
			
		||||
          echo "✓ Base tarball created: ${BASE_SIZE}"
 | 
			
		||||
          echo ""
 | 
			
		||||
        echo "✓ Uploaded: ${S3_BASE_KEY}"
 | 
			
		||||
 | 
			
		||||
          # Use static base name (one base per key, immutable)
 | 
			
		||||
          S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
 | 
			
		||||
 | 
			
		||||
          # Check if base already exists (immutability - first write wins)
 | 
			
		||||
          if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
 | 
			
		||||
            echo "⚠️  Base layer already exists: ${S3_BASE_KEY}"
 | 
			
		||||
            echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
 | 
			
		||||
          else
 | 
			
		||||
            echo "Uploading base layer to S3..."
 | 
			
		||||
            echo "  Key: ${PRIMARY_KEY}-base.tar.zst"
 | 
			
		||||
 | 
			
		||||
            aws s3api put-object \
 | 
			
		||||
              --bucket "${S3_BUCKET}" \
 | 
			
		||||
              --key "${PRIMARY_KEY}-base.tar.zst" \
 | 
			
		||||
              --body "${BASE_TARBALL}" \
 | 
			
		||||
              --tagging 'type=base' \
 | 
			
		||||
              --region "${S3_REGION}" \
 | 
			
		||||
              >/dev/null
 | 
			
		||||
 | 
			
		||||
            echo "✓ Uploaded: ${S3_BASE_KEY}"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Cleanup
 | 
			
		||||
          rm -f "${BASE_TARBALL}"
 | 
			
		||||
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          echo "Bootstrap cache save completed"
 | 
			
		||||
          echo "Base size: ${BASE_SIZE}"
 | 
			
		||||
          echo "Cache key: ${PRIMARY_KEY}"
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          exit 0
 | 
			
		||||
 | 
			
		||||
        elif [ "${CACHE_MODE}" = "partial-match" ]; then
 | 
			
		||||
          # Partial match: Save merged view as new base ONLY (no delta)
 | 
			
		||||
          # The delta is relative to the OLD base, not the NEW base we're creating
 | 
			
		||||
          echo "Partial match mode: Saving new base layer for primary key"
 | 
			
		||||
          echo "Note: Delta will NOT be saved (it's relative to old base)"
 | 
			
		||||
 | 
			
		||||
          BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst"
 | 
			
		||||
          echo "Creating base tarball from merged view..."
 | 
			
		||||
          tar -cf - -C "${CACHE_WORKSPACE}/merged" . | zstd -3 -T0 -q -o "${BASE_TARBALL}"
 | 
			
		||||
 | 
			
		||||
          BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1)
 | 
			
		||||
          echo "✓ Base tarball created: ${BASE_SIZE}"
 | 
			
		||||
          echo ""
 | 
			
		||||
 | 
			
		||||
          # Use static base name (one base per key, immutable)
 | 
			
		||||
          S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst"
 | 
			
		||||
 | 
			
		||||
          # Check if base already exists (immutability - first write wins)
 | 
			
		||||
          if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then
 | 
			
		||||
            echo "⚠️  Base layer already exists: ${S3_BASE_KEY}"
 | 
			
		||||
            echo "Skipping upload (immutability - first write wins, like GitHub Actions)"
 | 
			
		||||
          else
 | 
			
		||||
            echo "Uploading new base layer to S3..."
 | 
			
		||||
            echo "  Key: ${PRIMARY_KEY}-base.tar.zst"
 | 
			
		||||
 | 
			
		||||
            aws s3api put-object \
 | 
			
		||||
              --bucket "${S3_BUCKET}" \
 | 
			
		||||
              --key "${PRIMARY_KEY}-base.tar.zst" \
 | 
			
		||||
              --body "${BASE_TARBALL}" \
 | 
			
		||||
              --tagging 'type=base' \
 | 
			
		||||
              --region "${S3_REGION}" \
 | 
			
		||||
              >/dev/null
 | 
			
		||||
 | 
			
		||||
            echo "✓ Uploaded: ${S3_BASE_KEY}"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Cleanup
 | 
			
		||||
          rm -f "${BASE_TARBALL}"
 | 
			
		||||
 | 
			
		||||
          # Unmount and cleanup
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "Cleaning up..."
 | 
			
		||||
          if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
 | 
			
		||||
            sudo umount "${CACHE_WORKSPACE}/merged" || {
 | 
			
		||||
              echo "⚠️  Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
 | 
			
		||||
              echo "Attempting lazy unmount..."
 | 
			
		||||
              sudo umount -l "${CACHE_WORKSPACE}/merged" || true
 | 
			
		||||
            }
 | 
			
		||||
          fi
 | 
			
		||||
          rm -rf "${CACHE_WORKSPACE}"
 | 
			
		||||
 | 
			
		||||
          # Remove from registry
 | 
			
		||||
          if [ -f "${MOUNT_REGISTRY}" ]; then
 | 
			
		||||
            grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
 | 
			
		||||
            mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo "✓ Cleanup completed"
 | 
			
		||||
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          echo "Partial match cache save completed"
 | 
			
		||||
          echo "New base created for: ${PRIMARY_KEY}"
 | 
			
		||||
          echo "Base size: ${BASE_SIZE}"
 | 
			
		||||
          if [ "${REGISTRY_USE_DELTAS}" = "true" ]; then
 | 
			
		||||
            echo "Next exact-match build will create deltas from this base"
 | 
			
		||||
          else
 | 
			
		||||
            echo "Next exact-match build will reuse this base (base-only mode)"
 | 
			
		||||
          fi
 | 
			
		||||
          echo "=========================================="
 | 
			
		||||
          exit 0
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        # For exact-match ONLY: Save delta (if use-deltas enabled)
 | 
			
		||||
        if [ "${CACHE_MODE}" = "exact-match" ]; then
 | 
			
		||||
          # If deltas are disabled, just cleanup and exit
 | 
			
		||||
          if [ "${REGISTRY_USE_DELTAS}" != "true" ]; then
 | 
			
		||||
            echo "ℹ️  Delta caching disabled (use-deltas: false)"
 | 
			
		||||
            echo "Base already exists for this key, nothing to save."
 | 
			
		||||
 | 
			
		||||
            # Unmount and cleanup
 | 
			
		||||
            echo ""
 | 
			
		||||
            echo "Cleaning up..."
 | 
			
		||||
            if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
 | 
			
		||||
              sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
 | 
			
		||||
            fi
 | 
			
		||||
            rm -rf "${CACHE_WORKSPACE}"
 | 
			
		||||
 | 
			
		||||
            # Remove from registry
 | 
			
		||||
            if [ -f "${MOUNT_REGISTRY}" ]; then
 | 
			
		||||
              grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
 | 
			
		||||
              mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
 | 
			
		||||
            fi
 | 
			
		||||
 | 
			
		||||
            echo ""
 | 
			
		||||
            echo "=========================================="
 | 
			
		||||
            echo "Cache save completed (base-only mode)"
 | 
			
		||||
            echo "=========================================="
 | 
			
		||||
            exit 0
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Check if upper layer has any changes
 | 
			
		||||
          if [ -z "$(ls -A ${CACHE_WORKSPACE}/upper 2>/dev/null)" ]; then
 | 
			
		||||
            echo "ℹ️  No changes detected in upper layer (cache is unchanged)"
 | 
			
		||||
            echo "Skipping delta upload to save bandwidth."
 | 
			
		||||
 | 
			
		||||
            # Still unmount and cleanup
 | 
			
		||||
            echo ""
 | 
			
		||||
            echo "Cleaning up..."
 | 
			
		||||
            sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true
 | 
			
		||||
            rm -rf "${CACHE_WORKSPACE}"
 | 
			
		||||
 | 
			
		||||
            echo ""
 | 
			
		||||
            echo "=========================================="
 | 
			
		||||
            echo "Cache save completed (no changes)"
 | 
			
		||||
            echo "=========================================="
 | 
			
		||||
            exit 0
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Show delta statistics
 | 
			
		||||
          echo "Delta layer statistics:"
 | 
			
		||||
          echo "  Files changed: $(find ${CACHE_WORKSPACE}/upper -type f 2>/dev/null | wc -l)"
 | 
			
		||||
          echo "  Delta size:    $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1)"
 | 
			
		||||
          echo ""
 | 
			
		||||
 | 
			
		||||
          # Create delta tarball from upper layer
 | 
			
		||||
          echo "Creating delta tarball..."
 | 
			
		||||
          DELTA_TARBALL="/tmp/xahau-cache-delta-$$.tar.zst"
 | 
			
		||||
 | 
			
		||||
          tar -cf - -C "${CACHE_WORKSPACE}/upper" . | zstd -3 -T0 -q -o "${DELTA_TARBALL}"
 | 
			
		||||
 | 
			
		||||
          DELTA_SIZE=$(du -h "${DELTA_TARBALL}" | cut -f1)
 | 
			
		||||
          echo "✓ Delta tarball created: ${DELTA_SIZE}"
 | 
			
		||||
          echo ""
 | 
			
		||||
 | 
			
		||||
          # Upload timestamped delta (no overwrites = zero concurrency issues)
 | 
			
		||||
          TIMESTAMP=$(date +%Y%m%d%H%M%S)
 | 
			
		||||
          COMMIT_SHA=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
 | 
			
		||||
 | 
			
		||||
          # Use PRIMARY_KEY for delta (ensures deltas match their base)
 | 
			
		||||
          S3_DELTA_TIMESTAMPED="s3://${S3_BUCKET}/${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
 | 
			
		||||
 | 
			
		||||
          echo "Uploading timestamped delta to S3..."
 | 
			
		||||
          echo "  Key: ${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst"
 | 
			
		||||
 | 
			
		||||
          # Upload with tag (deltas cleaned up inline - keep last 1)
 | 
			
		||||
          aws s3api put-object \
 | 
			
		||||
            --bucket "${S3_BUCKET}" \
 | 
			
		||||
            --key "${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst" \
 | 
			
		||||
            --body "${DELTA_TARBALL}" \
 | 
			
		||||
            --tagging 'type=delta-archive' \
 | 
			
		||||
            --region "${S3_REGION}" \
 | 
			
		||||
            >/dev/null
 | 
			
		||||
 | 
			
		||||
          echo "✓ Uploaded: ${S3_DELTA_TIMESTAMPED}"
 | 
			
		||||
 | 
			
		||||
          # Inline cleanup: Keep only latest delta (the one we just uploaded)
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "Cleaning up old deltas (keeping only latest)..."
 | 
			
		||||
 | 
			
		||||
          # List all deltas for this key, sorted by LastModified (oldest first)
 | 
			
		||||
          ALL_DELTAS=$(aws s3api list-objects-v2 \
 | 
			
		||||
            --bucket "${S3_BUCKET}" \
 | 
			
		||||
            --prefix "${PRIMARY_KEY}-delta-" \
 | 
			
		||||
            --region "${S3_REGION}" \
 | 
			
		||||
            --query 'sort_by(Contents, &LastModified)[*].Key' \
 | 
			
		||||
            --output json 2>/dev/null || echo "[]")
 | 
			
		||||
 | 
			
		||||
          DELTA_COUNT=$(echo "${ALL_DELTAS}" | jq 'length' 2>/dev/null || echo "0")
 | 
			
		||||
 | 
			
		||||
          if [ "${DELTA_COUNT}" -gt 1 ]; then
 | 
			
		||||
            # Keep last 1 (newest), delete all older ones (all except last 1 = [0:-1])
 | 
			
		||||
            OLD_DELTAS=$(echo "${ALL_DELTAS}" | jq -r '.[0:-1][]' 2>/dev/null)
 | 
			
		||||
 | 
			
		||||
            if [ -n "${OLD_DELTAS}" ]; then
 | 
			
		||||
              DELETE_COUNT=$((DELTA_COUNT - 1))
 | 
			
		||||
              echo "  Found ${DELETE_COUNT} old delta(s) to delete"
 | 
			
		||||
 | 
			
		||||
              # Create delete batch request JSON
 | 
			
		||||
              DELETE_OBJECTS=$(echo "${OLD_DELTAS}" | jq -R -s -c 'split("\n") | map(select(length > 0)) | map({Key: .}) | {Objects: ., Quiet: true}' 2>/dev/null)
 | 
			
		||||
 | 
			
		||||
              if [ -n "${DELETE_OBJECTS}" ]; then
 | 
			
		||||
                aws s3api delete-objects \
 | 
			
		||||
                  --bucket "${S3_BUCKET}" \
 | 
			
		||||
                  --delete "${DELETE_OBJECTS}" \
 | 
			
		||||
                  --region "${S3_REGION}" \
 | 
			
		||||
                  >/dev/null 2>&1
 | 
			
		||||
 | 
			
		||||
                echo "✓ Deleted ${DELETE_COUNT} old delta(s)"
 | 
			
		||||
              fi
 | 
			
		||||
            fi
 | 
			
		||||
          else
 | 
			
		||||
            echo "ℹ️  Only ${DELTA_COUNT} delta(s) exist, no cleanup needed"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Cleanup delta tarball
 | 
			
		||||
          rm -f "${DELTA_TARBALL}"
 | 
			
		||||
 | 
			
		||||
          # Cleanup: Unmount OverlayFS and remove workspace
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "Cleaning up..."
 | 
			
		||||
 | 
			
		||||
          if mount | grep -q "${CACHE_WORKSPACE}/merged"; then
 | 
			
		||||
            sudo umount "${CACHE_WORKSPACE}/merged" || {
 | 
			
		||||
              echo "⚠️  Warning: Failed to unmount ${CACHE_WORKSPACE}/merged"
 | 
			
		||||
              echo "Attempting lazy unmount..."
 | 
			
		||||
              sudo umount -l "${CACHE_WORKSPACE}/merged" || true
 | 
			
		||||
            }
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Remove workspace
 | 
			
		||||
          rm -rf "${CACHE_WORKSPACE}"
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        # Remove from registry
 | 
			
		||||
        if [ -f "${MOUNT_REGISTRY}" ]; then
 | 
			
		||||
          grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true
 | 
			
		||||
          mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        echo "✓ Cleanup completed"
 | 
			
		||||
        # Cleanup
 | 
			
		||||
        rm -f "${BASE_TARBALL}"
 | 
			
		||||
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        echo "Cache save completed successfully"
 | 
			
		||||
        echo "Mode: ${CACHE_MODE}"
 | 
			
		||||
        echo "Cache key: ${PRIMARY_KEY}"
 | 
			
		||||
        if [ -n "${DELTA_SIZE:-}" ]; then
 | 
			
		||||
          echo "Delta size: ${DELTA_SIZE}"
 | 
			
		||||
        fi
 | 
			
		||||
        echo "Cache size: ${BASE_SIZE}"
 | 
			
		||||
        echo "Cache key: ${CACHE_KEY}"
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
 
 | 
			
		||||
@@ -1,45 +0,0 @@
 | 
			
		||||
name: 'Configure ccache'
 | 
			
		||||
description: 'Sets up ccache with consistent configuration'
 | 
			
		||||
 | 
			
		||||
inputs:
 | 
			
		||||
  cache_dir:
 | 
			
		||||
    description: 'Path to ccache directory'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: '~/.ccache'
 | 
			
		||||
  max_size:
 | 
			
		||||
    description: 'Maximum cache size'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: '2G'
 | 
			
		||||
  hash_dir:
 | 
			
		||||
    description: 'Whether to include directory paths in hash'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'true'
 | 
			
		||||
  compiler_check:
 | 
			
		||||
    description: 'How to check compiler for changes'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'content'
 | 
			
		||||
 | 
			
		||||
runs:
 | 
			
		||||
  using: 'composite'
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Configure ccache
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        # Use ccache's default cache_dir (~/.ccache) - don't override it
 | 
			
		||||
        # This avoids tilde expansion issues when setting it explicitly
 | 
			
		||||
 | 
			
		||||
        # Create cache directory using ccache's default
 | 
			
		||||
        mkdir -p ~/.ccache
 | 
			
		||||
 | 
			
		||||
        # Configure ccache settings (but NOT cache_dir - use default)
 | 
			
		||||
        ccache --set-config=max_size=${{ inputs.max_size }}
 | 
			
		||||
        ccache --set-config=hash_dir=${{ inputs.hash_dir }}
 | 
			
		||||
        ccache --set-config=compiler_check=${{ inputs.compiler_check }}
 | 
			
		||||
 | 
			
		||||
        # Note: Not setting CCACHE_DIR - let ccache use its default (~/.ccache)
 | 
			
		||||
 | 
			
		||||
        # Print config for verification
 | 
			
		||||
        ccache -p
 | 
			
		||||
 | 
			
		||||
        # Zero statistics before the build
 | 
			
		||||
        ccache -z
 | 
			
		||||
							
								
								
									
										66
									
								
								.github/actions/xahau-ga-build/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										66
									
								
								.github/actions/xahau-ga-build/action.yml
									
									
									
									
										vendored
									
									
								
							@@ -47,6 +47,18 @@ inputs:
 | 
			
		||||
    description: 'GCC version to use for Clang toolchain (e.g. 11, 13)'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: ''
 | 
			
		||||
  ccache_max_size:
 | 
			
		||||
    description: 'Maximum ccache size'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: '2G'
 | 
			
		||||
  ccache_hash_dir:
 | 
			
		||||
    description: 'Whether to include directory paths in hash'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'true'
 | 
			
		||||
  ccache_compiler_check:
 | 
			
		||||
    description: 'How to check compiler for changes'
 | 
			
		||||
    required: false
 | 
			
		||||
    default: 'content'
 | 
			
		||||
  aws-access-key-id:
 | 
			
		||||
    description: 'AWS Access Key ID for S3 cache storage'
 | 
			
		||||
    required: true
 | 
			
		||||
@@ -79,6 +91,31 @@ runs:
 | 
			
		||||
        aws-access-key-id: ${{ inputs.aws-access-key-id }}
 | 
			
		||||
        aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
 | 
			
		||||
 | 
			
		||||
    - name: Configure ccache
 | 
			
		||||
      if: inputs.ccache_enabled == 'true'
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        # Use ccache's default cache_dir (~/.ccache) - don't override it
 | 
			
		||||
        # This avoids tilde expansion issues when setting it explicitly
 | 
			
		||||
 | 
			
		||||
        # Create cache directory using ccache's default
 | 
			
		||||
        mkdir -p ~/.ccache
 | 
			
		||||
 | 
			
		||||
        # Configure ccache settings (but NOT cache_dir - use default)
 | 
			
		||||
        # This overwrites any cached config to ensure fresh configuration
 | 
			
		||||
        ccache --set-config=max_size=${{ inputs.ccache_max_size }}
 | 
			
		||||
        ccache --set-config=hash_dir=${{ inputs.ccache_hash_dir }}
 | 
			
		||||
        ccache --set-config=compiler_check=${{ inputs.ccache_compiler_check }}
 | 
			
		||||
 | 
			
		||||
        # Note: Not setting CCACHE_DIR - let ccache use its default (~/.ccache)
 | 
			
		||||
 | 
			
		||||
        # Print config for verification
 | 
			
		||||
        echo "=== ccache configuration ==="
 | 
			
		||||
        ccache -p
 | 
			
		||||
 | 
			
		||||
        # Zero statistics before the build
 | 
			
		||||
        ccache -z
 | 
			
		||||
 | 
			
		||||
    - name: Configure project
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
@@ -157,31 +194,22 @@ runs:
 | 
			
		||||
          -DCMAKE_TOOLCHAIN_FILE:FILEPATH=${TOOLCHAIN_FILE} \
 | 
			
		||||
          -DCMAKE_BUILD_TYPE=${{ inputs.configuration }}
 | 
			
		||||
 | 
			
		||||
    - name: Show ccache config before build
 | 
			
		||||
      if: inputs.ccache_enabled == 'true'
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        echo "ccache configuration before build"
 | 
			
		||||
        echo "=========================================="
 | 
			
		||||
        ccache -p
 | 
			
		||||
        echo ""
 | 
			
		||||
 | 
			
		||||
    - name: Build project
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        cd ${{ inputs.build_dir }}
 | 
			
		||||
        # TEMPORARY: Add -v to see compile commands for ccache debugging
 | 
			
		||||
        cmake --build . --config ${{ inputs.configuration }} --parallel $(nproc) -- -v
 | 
			
		||||
 | 
			
		||||
    - name: Debug ccache directory
 | 
			
		||||
      if: inputs.ccache_enabled == 'true'
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "=== ccache directory contents ==="
 | 
			
		||||
        ls -laR ~/.ccache || echo "Directory doesn't exist"
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "=== Disk space ==="
 | 
			
		||||
        df -h ~
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "=== ccache config ==="
 | 
			
		||||
        ccache --show-config | head -30
 | 
			
		||||
        echo ""
 | 
			
		||||
        echo "=== Directory sizes ==="
 | 
			
		||||
        du -sh ~/.ccache 2>/dev/null || echo "No directory"
 | 
			
		||||
        find ~/.ccache -type f -name "*.conf" -o -type f ! -name "*.conf" 2>/dev/null | head -20 || true
 | 
			
		||||
        echo ""
 | 
			
		||||
 | 
			
		||||
    - name: Show ccache statistics
 | 
			
		||||
      if: inputs.ccache_enabled == 'true'
 | 
			
		||||
      shell: bash
 | 
			
		||||
 
 | 
			
		||||
@@ -81,7 +81,6 @@ runs:
 | 
			
		||||
        restore-keys: |
 | 
			
		||||
          ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-
 | 
			
		||||
          ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
 | 
			
		||||
        use-deltas: 'false'
 | 
			
		||||
        aws-access-key-id: ${{ inputs.aws-access-key-id }}
 | 
			
		||||
        aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
 | 
			
		||||
 | 
			
		||||
@@ -166,6 +165,5 @@ runs:
 | 
			
		||||
      with:
 | 
			
		||||
        path: ~/.conan2
 | 
			
		||||
        key: ${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-${{ inputs.configuration }}
 | 
			
		||||
        use-deltas: 'false'
 | 
			
		||||
        aws-access-key-id: ${{ inputs.aws-access-key-id }}
 | 
			
		||||
        aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										11
									
								
								.github/workflows/xahau-ga-macos.yml.disabled
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								.github/workflows/xahau-ga-macos.yml.disabled
									
									
									
									
										vendored
									
									
								
							@@ -78,14 +78,6 @@ jobs:
 | 
			
		||||
      - name: Install ccache
 | 
			
		||||
        run: brew install ccache
 | 
			
		||||
 | 
			
		||||
      - name: Configure ccache
 | 
			
		||||
        uses: ./.github/actions/xahau-configure-ccache
 | 
			
		||||
        with:
 | 
			
		||||
          max_size: 2G
 | 
			
		||||
          hash_dir: true
 | 
			
		||||
          compiler_check: content
 | 
			
		||||
          is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }}
 | 
			
		||||
 | 
			
		||||
      - name: Check environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "PATH:"
 | 
			
		||||
@@ -130,6 +122,9 @@ jobs:
 | 
			
		||||
          compiler-id: clang
 | 
			
		||||
          cache_version: ${{ env.CACHE_VERSION }}
 | 
			
		||||
          main_branch: ${{ env.MAIN_BRANCH_NAME }}
 | 
			
		||||
          stdlib: libcxx
 | 
			
		||||
          aws-access-key-id: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_KEY_ID }}
 | 
			
		||||
          aws-secret-access-key: ${{ secrets.XAHAUD_GITHUB_ACTIONS_CACHE_NIQ_AWS_ACCESS_KEY }}
 | 
			
		||||
 | 
			
		||||
      - name: Test
 | 
			
		||||
        run: |
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/workflows/xahau-ga-nix.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/xahau-ga-nix.yml
									
									
									
									
										vendored
									
									
								
							@@ -231,13 +231,6 @@ jobs:
 | 
			
		||||
          # Install Conan 2
 | 
			
		||||
          pip install --upgrade "conan>=2.0,<3"
 | 
			
		||||
 | 
			
		||||
      - name: Configure ccache
 | 
			
		||||
        uses: ./.github/actions/xahau-configure-ccache
 | 
			
		||||
        with:
 | 
			
		||||
          max_size: 2G
 | 
			
		||||
          hash_dir: true
 | 
			
		||||
          compiler_check: content
 | 
			
		||||
 | 
			
		||||
      - name: Check environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "PATH:"
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user