From ce7b1c4f1dfca041a9e752d800dba1bd01140320 Mon Sep 17 00:00:00 2001 From: Nicholas Dudfield Date: Wed, 29 Oct 2025 13:07:40 +0700 Subject: [PATCH] feat: add custom S3+OverlayFS cache actions with configurable delta support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements drop-in replacement for actions/cache using S3 backend and OverlayFS for delta caching: - xahau-actions-cache-restore: Downloads immutable base + optional latest delta - xahau-actions-cache-save: Saves immutable bases (bootstrap/partial-match) or timestamped deltas (exact-match) Key features: - Immutable bases: One static base per key (first-write-wins, GitHub Actions semantics) - Timestamped deltas: Always-timestamped to eliminate concurrency issues - Configurable use-deltas parameter (default true): - true: For symbolic keys (branch-based) - massive bandwidth savings via incremental deltas - false: For content-based keys (hash-based) - base-only mode, no delta complexity - Three cache modes: bootstrap, partial-match (restore-keys), exact-match - OverlayFS integration: Automatic delta extraction via upperdir, whiteout file support - S3 lifecycle ready: Bases tagged 'type=base', deltas tagged 'type=delta-archive' Decision rule for use-deltas: - Content-based discriminator (hashFiles, commit SHA) → use-deltas: false - Symbolic discriminator (branch name, tag, PR) → use-deltas: true Also disables existing workflows temporarily during development. --- .../xahau-actions-cache-restore/action.yml | 282 +++++++++++++++ .../xahau-actions-cache-save/action.yml | 342 ++++++++++++++++++ .../workflows/build-in-docker.yml.disabled | 95 +++++ .github/workflows/clang-format.yml.disabled | 72 ++++ .github/workflows/levelization.yml.disabled | 49 +++ .../verify-generated-headers.yml.disabled | 36 ++ .github/workflows/xahau-ga-macos.yml.disabled | 149 ++++++++ .github/workflows/xahau-ga-nix.yml.disabled | 332 +++++++++++++++++ Builds/levelization/README.md | 6 +- 9 files changed, 1360 insertions(+), 3 deletions(-) create mode 100644 .github/actions/xahau-actions-cache-restore/action.yml create mode 100644 .github/actions/xahau-actions-cache-save/action.yml create mode 100644 .github/workflows/build-in-docker.yml.disabled create mode 100644 .github/workflows/clang-format.yml.disabled create mode 100644 .github/workflows/levelization.yml.disabled create mode 100644 .github/workflows/verify-generated-headers.yml.disabled create mode 100644 .github/workflows/xahau-ga-macos.yml.disabled create mode 100644 .github/workflows/xahau-ga-nix.yml.disabled diff --git a/.github/actions/xahau-actions-cache-restore/action.yml b/.github/actions/xahau-actions-cache-restore/action.yml new file mode 100644 index 000000000..120385713 --- /dev/null +++ b/.github/actions/xahau-actions-cache-restore/action.yml @@ -0,0 +1,282 @@ +name: 'Xahau Cache Restore (S3 + OverlayFS)' +description: 'Drop-in replacement for actions/cache/restore using S3 and OverlayFS for delta caching' + +inputs: + path: + description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)' + required: true + key: + description: 'An explicit key for restoring the cache' + required: true + restore-keys: + description: 'An ordered list of prefix-matched keys to use for restoring stale cache if no cache hit occurred for key' + required: false + default: '' + s3-bucket: + description: 'S3 bucket name for cache storage' + required: false + default: 'xahaud-github-actions-cache-niq' + s3-region: + description: 'S3 region' + required: false + default: 'us-east-1' + fail-on-cache-miss: + description: 'Fail the workflow if cache entry is not found' + required: false + default: 'false' + lookup-only: + description: 'Check if a cache entry exists for the given input(s) without downloading it' + required: false + default: 'false' + use-deltas: + description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.' + required: false + default: 'true' + # Note: Composite actions can't access secrets.* directly - must be passed from workflow + aws-access-key-id: + description: 'AWS Access Key ID for S3 access' + required: true + aws-secret-access-key: + description: 'AWS Secret Access Key for S3 access' + required: true + +outputs: + cache-hit: + description: 'A boolean value to indicate an exact match was found for the primary key' + value: ${{ steps.restore-cache.outputs.cache-hit }} + cache-primary-key: + description: 'The key that was used to restore the cache (may be from restore-keys)' + value: ${{ steps.restore-cache.outputs.cache-primary-key }} + cache-matched-key: + description: 'The key that matched (same as cache-primary-key for compatibility)' + value: ${{ steps.restore-cache.outputs.cache-primary-key }} + +runs: + using: 'composite' + steps: + - name: Restore cache from S3 with OverlayFS + id: restore-cache + shell: bash + env: + AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }} + AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }} + S3_BUCKET: ${{ inputs.s3-bucket }} + S3_REGION: ${{ inputs.s3-region }} + CACHE_KEY: ${{ inputs.key }} + RESTORE_KEYS: ${{ inputs.restore-keys }} + TARGET_PATH: ${{ inputs.path }} + FAIL_ON_MISS: ${{ inputs.fail-on-cache-miss }} + LOOKUP_ONLY: ${{ inputs.lookup-only }} + USE_DELTAS: ${{ inputs.use-deltas }} + run: | + set -euo pipefail + + echo "==========================================" + echo "Xahau Cache Restore (S3 + OverlayFS)" + echo "==========================================" + echo "Target path: ${TARGET_PATH}" + echo "Primary key: ${CACHE_KEY}" + echo "S3 bucket: s3://${S3_BUCKET}" + echo "Use deltas: ${USE_DELTAS}" + echo "" + + # Generate unique cache workspace + CACHE_HASH=$(echo "${CACHE_KEY}" | md5sum | cut -d' ' -f1) + CACHE_WORKSPACE="/tmp/xahau-cache-${CACHE_HASH}" + + echo "Cache workspace: ${CACHE_WORKSPACE}" + + # Create OverlayFS directory structure + mkdir -p "${CACHE_WORKSPACE}"/{base,upper,work,merged} + + # Function to try downloading from S3 + try_restore_key() { + local try_key="$1" + local s3_base="s3://${S3_BUCKET}/${try_key}-base.tar.zst" + + echo "Trying cache key: ${try_key}" + + # Check if base exists (one base per key, immutable) + echo "Checking for base layer..." + if aws s3 ls "${s3_base}" --region "${S3_REGION}" >/dev/null 2>&1; then + echo "✓ Found base layer: ${s3_base}" + + if [ "${LOOKUP_ONLY}" = "true" ]; then + echo "Lookup-only mode: cache exists, skipping download" + return 0 + fi + + # Download base layer + echo "Downloading base layer..." + aws s3 cp "${s3_base}" /tmp/cache-base.tar.zst --region "${S3_REGION}" --quiet + + # Extract base layer + echo "Extracting base layer..." + tar -xf /tmp/cache-base.tar.zst -C "${CACHE_WORKSPACE}/base" + rm /tmp/cache-base.tar.zst + + # Query for latest timestamped delta (only if use-deltas enabled) + if [ "${USE_DELTAS}" = "true" ]; then + echo "Querying for latest delta..." + LATEST_DELTA=$(aws s3api list-objects-v2 \ + --bucket "${S3_BUCKET}" \ + --prefix "${try_key}-delta-" \ + --region "${S3_REGION}" \ + --query 'sort_by(Contents, &LastModified)[-1].Key' \ + --output text 2>/dev/null || echo "") + + if [ -n "${LATEST_DELTA}" ] && [ "${LATEST_DELTA}" != "None" ]; then + echo "✓ Found latest delta: ${LATEST_DELTA}" + echo "Downloading delta layer..." + aws s3 cp "s3://${S3_BUCKET}/${LATEST_DELTA}" /tmp/cache-delta.tar.zst --region "${S3_REGION}" --quiet + + echo "Extracting delta layer..." + tar -xf /tmp/cache-delta.tar.zst -C "${CACHE_WORKSPACE}/upper" 2>/dev/null || true + rm /tmp/cache-delta.tar.zst + else + echo "ℹ No delta layer found (this is fine for first build)" + fi + else + echo "ℹ Delta caching disabled (use-deltas: false)" + fi + + return 0 + else + echo "✗ No base layer found for key: ${try_key}" + return 1 + fi + } + + # Try primary key first + MATCHED_KEY="" + EXACT_MATCH="false" + + if try_restore_key "${CACHE_KEY}"; then + MATCHED_KEY="${CACHE_KEY}" + EXACT_MATCH="true" + echo "" + echo "🎯 Exact cache hit for key: ${CACHE_KEY}" + else + # Try restore-keys (prefix matching) + if [ -n "${RESTORE_KEYS}" ]; then + echo "" + echo "Primary key not found, trying restore-keys..." + + # Split restore-keys by newline + while IFS= read -r restore_key; do + # Skip empty lines + [ -z "${restore_key}" ] && continue + + # Trim whitespace + restore_key=$(echo "${restore_key}" | xargs) + + if try_restore_key "${restore_key}"; then + MATCHED_KEY="${restore_key}" + EXACT_MATCH="false" + echo "" + echo "✓ Cache restored from fallback key: ${restore_key}" + break + fi + done <<< "${RESTORE_KEYS}" + fi + fi + + # Check if we found anything + if [ -z "${MATCHED_KEY}" ]; then + echo "" + echo "❌ No cache found for key: ${CACHE_KEY}" + echo "This is BOOTSTRAP mode - first build for this cache key" + + if [ "${FAIL_ON_MISS}" = "true" ]; then + echo "fail-on-cache-miss is enabled, failing workflow" + exit 1 + fi + + # Set outputs for cache miss + echo "cache-hit=false" >> $GITHUB_OUTPUT + echo "cache-primary-key=" >> $GITHUB_OUTPUT + + # Create empty cache directory for bootstrap + mkdir -p "${TARGET_PATH}" + + # Record bootstrap mode for save action + # Format: path:workspace:matched_key:primary_key:exact_match:use_deltas + # For bootstrap: workspace="bootstrap", matched_key=primary_key, exact_match=false + MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt" + echo "${TARGET_PATH}:bootstrap:${CACHE_KEY}:${CACHE_KEY}:false:${USE_DELTAS}" >> "${MOUNT_REGISTRY}" + + echo "" + echo "==========================================" + echo "Cache restore completed (bootstrap mode)" + echo "Created empty cache directory: ${TARGET_PATH}" + echo "==========================================" + exit 0 + fi + + # If lookup-only, we're done + if [ "${LOOKUP_ONLY}" = "true" ]; then + echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT + echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT + + # Clean up workspace + rm -rf "${CACHE_WORKSPACE}" + + echo "" + echo "==========================================" + echo "Cache lookup completed (lookup-only mode)" + echo "==========================================" + exit 0 + fi + + # Mount OverlayFS + echo "" + echo "Mounting OverlayFS..." + sudo mount -t overlay overlay \ + -o lowerdir="${CACHE_WORKSPACE}/base",upperdir="${CACHE_WORKSPACE}/upper",workdir="${CACHE_WORKSPACE}/work" \ + "${CACHE_WORKSPACE}/merged" + + # Verify mount + if mount | grep -q "${CACHE_WORKSPACE}/merged"; then + echo "✓ OverlayFS mounted successfully" + else + echo "❌ Failed to mount OverlayFS" + exit 1 + fi + + # Create target directory parent if needed + TARGET_PARENT=$(dirname "${TARGET_PATH}") + mkdir -p "${TARGET_PARENT}" + + # Remove existing target if it exists + if [ -e "${TARGET_PATH}" ]; then + echo "Removing existing target: ${TARGET_PATH}" + rm -rf "${TARGET_PATH}" + fi + + # Symlink target path to merged view + echo "Creating symlink: ${TARGET_PATH} -> ${CACHE_WORKSPACE}/merged" + ln -s "${CACHE_WORKSPACE}/merged" "${TARGET_PATH}" + + # Save mount info for cleanup/save later + # Format: path:workspace:matched_key:primary_key:exact_match:use_deltas + # This tells save action whether to create new base (partial match) or just delta (exact match) + MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt" + echo "${TARGET_PATH}:${CACHE_WORKSPACE}:${MATCHED_KEY}:${CACHE_KEY}:${EXACT_MATCH}:${USE_DELTAS}" >> "${MOUNT_REGISTRY}" + + # Set outputs + echo "cache-hit=${EXACT_MATCH}" >> $GITHUB_OUTPUT + echo "cache-primary-key=${MATCHED_KEY}" >> $GITHUB_OUTPUT + + # Show statistics + echo "" + echo "Cache statistics:" + echo " Base layer size: $(du -sh ${CACHE_WORKSPACE}/base 2>/dev/null | cut -f1 || echo '0')" + echo " Delta layer size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1 || echo '0')" + echo " Merged view size: $(du -sh ${CACHE_WORKSPACE}/merged 2>/dev/null | cut -f1 || echo '0')" + + echo "" + echo "==========================================" + echo "Cache restore completed successfully" + echo "Exact match: ${EXACT_MATCH}" + echo "Matched key: ${MATCHED_KEY}" + echo "==========================================" diff --git a/.github/actions/xahau-actions-cache-save/action.yml b/.github/actions/xahau-actions-cache-save/action.yml new file mode 100644 index 000000000..537980b82 --- /dev/null +++ b/.github/actions/xahau-actions-cache-save/action.yml @@ -0,0 +1,342 @@ +name: 'Xahau Cache Save (S3 + OverlayFS)' +description: 'Drop-in replacement for actions/cache/save using S3 and OverlayFS for delta caching' + +inputs: + path: + description: 'A list of files, directories, and wildcard patterns to cache (currently only single path supported)' + required: true + key: + description: 'An explicit key for saving the cache' + required: true + s3-bucket: + description: 'S3 bucket name for cache storage' + required: false + default: 'xahaud-github-actions-cache-niq' + s3-region: + description: 'S3 region' + required: false + default: 'us-east-1' + use-deltas: + description: 'Enable delta caching (download/upload incremental changes). Set to false for base-only caching.' + required: false + default: 'true' + # Note: Composite actions can't access secrets.* directly - must be passed from workflow + aws-access-key-id: + description: 'AWS Access Key ID for S3 access' + required: true + aws-secret-access-key: + description: 'AWS Secret Access Key for S3 access' + required: true + +runs: + using: 'composite' + steps: + - name: Save cache to S3 with OverlayFS delta + shell: bash + env: + AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }} + AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }} + S3_BUCKET: ${{ inputs.s3-bucket }} + S3_REGION: ${{ inputs.s3-region }} + CACHE_KEY: ${{ inputs.key }} + TARGET_PATH: ${{ inputs.path }} + USE_DELTAS: ${{ inputs.use-deltas }} + run: | + set -euo pipefail + + echo "==========================================" + echo "Xahau Cache Save (S3 + OverlayFS)" + echo "==========================================" + echo "Target path: ${TARGET_PATH}" + echo "Cache key: ${CACHE_KEY}" + echo "S3 bucket: s3://${S3_BUCKET}" + echo "" + + # Find the cache workspace from mount registry + MOUNT_REGISTRY="/tmp/xahau-cache-mounts.txt" + + if [ ! -f "${MOUNT_REGISTRY}" ]; then + echo "⚠️ No cache mounts found (mount registry doesn't exist)" + echo "This usually means cache restore was not called, or there was no cache to restore." + echo "Skipping cache save." + exit 0 + fi + + # Find entry for this path + # Format: path:workspace:matched_key:primary_key:exact_match:use_deltas + # Bootstrap mode: path:bootstrap:key:key:false:true/false (workspace="bootstrap") + CACHE_WORKSPACE="" + MATCHED_KEY="" + PRIMARY_KEY="" + EXACT_MATCH="" + REGISTRY_USE_DELTAS="" + + while IFS=: read -r mount_path mount_workspace mount_matched_key mount_primary_key mount_exact_match mount_use_deltas; do + if [ "${mount_path}" = "${TARGET_PATH}" ]; then + CACHE_WORKSPACE="${mount_workspace}" + MATCHED_KEY="${mount_matched_key}" + PRIMARY_KEY="${mount_primary_key}" + EXACT_MATCH="${mount_exact_match}" + REGISTRY_USE_DELTAS="${mount_use_deltas}" + break + fi + done < "${MOUNT_REGISTRY}" + + if [ -z "${CACHE_WORKSPACE}" ] && [ -z "${MATCHED_KEY}" ]; then + echo "⚠️ No cache entry found for path: ${TARGET_PATH}" + echo "This usually means cache restore was not called for this path." + echo "Skipping cache save." + exit 0 + fi + + # Determine cache mode + if [ "${CACHE_WORKSPACE}" = "bootstrap" ]; then + CACHE_MODE="bootstrap" + PRIMARY_KEY="${MATCHED_KEY}" # In bootstrap, matched_key field contains primary key + echo "Cache mode: BOOTSTRAP (first build for this key)" + echo "Primary key: ${PRIMARY_KEY}" + elif [ "${EXACT_MATCH}" = "false" ]; then + CACHE_MODE="partial-match" + echo "Cache mode: PARTIAL MATCH (restore-key used)" + echo "Cache workspace: ${CACHE_WORKSPACE}" + echo "Matched key from restore: ${MATCHED_KEY}" + echo "Primary key (will save new base): ${PRIMARY_KEY}" + else + CACHE_MODE="exact-match" + echo "Cache mode: EXACT MATCH (cache hit)" + echo "Cache workspace: ${CACHE_WORKSPACE}" + echo "Matched key: ${MATCHED_KEY}" + fi + echo "Use deltas: ${REGISTRY_USE_DELTAS}" + echo "" + + # Handle different cache modes + if [ "${CACHE_MODE}" = "bootstrap" ]; then + # Bootstrap: Save entire cache as base layer (no OverlayFS was used) + echo "Bootstrap mode: Creating initial base layer from ${TARGET_PATH}" + + BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst" + echo "Creating base tarball..." + tar -cf - -C "${TARGET_PATH}" . | zstd -3 -T0 -q -o "${BASE_TARBALL}" + + BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1) + echo "✓ Base tarball created: ${BASE_SIZE}" + echo "" + + # Use static base name (one base per key, immutable) + S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst" + + # Check if base already exists (immutability - first write wins) + if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then + echo "⚠️ Base layer already exists: ${S3_BASE_KEY}" + echo "Skipping upload (immutability - first write wins, like GitHub Actions)" + else + echo "Uploading base layer to S3..." + echo " Key: ${PRIMARY_KEY}-base.tar.zst" + + aws s3 cp "${BASE_TARBALL}" "${S3_BASE_KEY}" \ + --region "${S3_REGION}" \ + --tagging "type=base" \ + --quiet + + echo "✓ Uploaded: ${S3_BASE_KEY}" + fi + + # Cleanup + rm -f "${BASE_TARBALL}" + + echo "" + echo "==========================================" + echo "Bootstrap cache save completed" + echo "Base size: ${BASE_SIZE}" + echo "Cache key: ${PRIMARY_KEY}" + echo "==========================================" + exit 0 + + elif [ "${CACHE_MODE}" = "partial-match" ]; then + # Partial match: Save merged view as new base ONLY (no delta) + # The delta is relative to the OLD base, not the NEW base we're creating + echo "Partial match mode: Saving new base layer for primary key" + echo "Note: Delta will NOT be saved (it's relative to old base)" + + BASE_TARBALL="/tmp/xahau-cache-base-$$.tar.zst" + echo "Creating base tarball from merged view..." + tar -cf - -C "${CACHE_WORKSPACE}/merged" . | zstd -3 -T0 -q -o "${BASE_TARBALL}" + + BASE_SIZE=$(du -h "${BASE_TARBALL}" | cut -f1) + echo "✓ Base tarball created: ${BASE_SIZE}" + echo "" + + # Use static base name (one base per key, immutable) + S3_BASE_KEY="s3://${S3_BUCKET}/${PRIMARY_KEY}-base.tar.zst" + + # Check if base already exists (immutability - first write wins) + if aws s3 ls "${S3_BASE_KEY}" --region "${S3_REGION}" >/dev/null 2>&1; then + echo "⚠️ Base layer already exists: ${S3_BASE_KEY}" + echo "Skipping upload (immutability - first write wins, like GitHub Actions)" + else + echo "Uploading new base layer to S3..." + echo " Key: ${PRIMARY_KEY}-base.tar.zst" + + aws s3 cp "${BASE_TARBALL}" "${S3_BASE_KEY}" \ + --region "${S3_REGION}" \ + --tagging "type=base" \ + --quiet + + echo "✓ Uploaded: ${S3_BASE_KEY}" + fi + + # Cleanup + rm -f "${BASE_TARBALL}" + + # Unmount and cleanup + echo "" + echo "Cleaning up..." + if mount | grep -q "${CACHE_WORKSPACE}/merged"; then + sudo umount "${CACHE_WORKSPACE}/merged" || { + echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged" + echo "Attempting lazy unmount..." + sudo umount -l "${CACHE_WORKSPACE}/merged" || true + } + fi + rm -rf "${CACHE_WORKSPACE}" + + # Remove from registry + if [ -f "${MOUNT_REGISTRY}" ]; then + grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true + mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true + fi + + echo "✓ Cleanup completed" + + echo "" + echo "==========================================" + echo "Partial match cache save completed" + echo "New base created for: ${PRIMARY_KEY}" + echo "Base size: ${BASE_SIZE}" + if [ "${REGISTRY_USE_DELTAS}" = "true" ]; then + echo "Next exact-match build will create deltas from this base" + else + echo "Next exact-match build will reuse this base (base-only mode)" + fi + echo "==========================================" + exit 0 + fi + + # For exact-match ONLY: Save delta (if use-deltas enabled) + if [ "${CACHE_MODE}" = "exact-match" ]; then + # If deltas are disabled, just cleanup and exit + if [ "${REGISTRY_USE_DELTAS}" != "true" ]; then + echo "ℹ️ Delta caching disabled (use-deltas: false)" + echo "Base already exists for this key, nothing to save." + + # Unmount and cleanup + echo "" + echo "Cleaning up..." + if mount | grep -q "${CACHE_WORKSPACE}/merged"; then + sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true + fi + rm -rf "${CACHE_WORKSPACE}" + + # Remove from registry + if [ -f "${MOUNT_REGISTRY}" ]; then + grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true + mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true + fi + + echo "" + echo "==========================================" + echo "Cache save completed (base-only mode)" + echo "==========================================" + exit 0 + fi + + # Check if upper layer has any changes + if [ -z "$(ls -A ${CACHE_WORKSPACE}/upper 2>/dev/null)" ]; then + echo "ℹ️ No changes detected in upper layer (cache is unchanged)" + echo "Skipping delta upload to save bandwidth." + + # Still unmount and cleanup + echo "" + echo "Cleaning up..." + sudo umount "${CACHE_WORKSPACE}/merged" 2>/dev/null || true + rm -rf "${CACHE_WORKSPACE}" + + echo "" + echo "==========================================" + echo "Cache save completed (no changes)" + echo "==========================================" + exit 0 + fi + + # Show delta statistics + echo "Delta layer statistics:" + echo " Files changed: $(find ${CACHE_WORKSPACE}/upper -type f 2>/dev/null | wc -l)" + echo " Delta size: $(du -sh ${CACHE_WORKSPACE}/upper 2>/dev/null | cut -f1)" + echo "" + + # Create delta tarball from upper layer + echo "Creating delta tarball..." + DELTA_TARBALL="/tmp/xahau-cache-delta-$$.tar.zst" + + tar -cf - -C "${CACHE_WORKSPACE}/upper" . | zstd -3 -T0 -q -o "${DELTA_TARBALL}" + + DELTA_SIZE=$(du -h "${DELTA_TARBALL}" | cut -f1) + echo "✓ Delta tarball created: ${DELTA_SIZE}" + echo "" + + # Upload timestamped delta (no overwrites = zero concurrency issues) + TIMESTAMP=$(date +%Y%m%d%H%M%S) + COMMIT_SHA=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown") + + # Use PRIMARY_KEY for delta (ensures deltas match their base) + S3_DELTA_TIMESTAMPED="s3://${S3_BUCKET}/${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst" + + echo "Uploading timestamped delta to S3..." + echo " Key: ${PRIMARY_KEY}-delta-${TIMESTAMP}-${COMMIT_SHA}.tar.zst" + + # Upload with tag for auto-deletion after 7 days + aws s3 cp "${DELTA_TARBALL}" "${S3_DELTA_TIMESTAMPED}" \ + --region "${S3_REGION}" \ + --tagging "type=delta-archive" \ + --quiet + + echo "✓ Uploaded: ${S3_DELTA_TIMESTAMPED}" + echo " (tagged for auto-deletion after 7 days)" + + # Cleanup delta tarball + rm -f "${DELTA_TARBALL}" + + # Cleanup: Unmount OverlayFS and remove workspace + echo "" + echo "Cleaning up..." + + if mount | grep -q "${CACHE_WORKSPACE}/merged"; then + sudo umount "${CACHE_WORKSPACE}/merged" || { + echo "⚠️ Warning: Failed to unmount ${CACHE_WORKSPACE}/merged" + echo "Attempting lazy unmount..." + sudo umount -l "${CACHE_WORKSPACE}/merged" || true + } + fi + + # Remove workspace + rm -rf "${CACHE_WORKSPACE}" + fi + + # Remove from registry + if [ -f "${MOUNT_REGISTRY}" ]; then + grep -v "^${TARGET_PATH}:" "${MOUNT_REGISTRY}" > "${MOUNT_REGISTRY}.tmp" 2>/dev/null || true + mv "${MOUNT_REGISTRY}.tmp" "${MOUNT_REGISTRY}" 2>/dev/null || true + fi + + echo "✓ Cleanup completed" + + echo "" + echo "==========================================" + echo "Cache save completed successfully" + echo "Mode: ${CACHE_MODE}" + echo "Cache key: ${PRIMARY_KEY}" + if [ -n "${DELTA_SIZE:-}" ]; then + echo "Delta size: ${DELTA_SIZE}" + fi + echo "==========================================" diff --git a/.github/workflows/build-in-docker.yml.disabled b/.github/workflows/build-in-docker.yml.disabled new file mode 100644 index 000000000..e57dda8b4 --- /dev/null +++ b/.github/workflows/build-in-docker.yml.disabled @@ -0,0 +1,95 @@ +name: Build using Docker + +on: + push: + branches: ["dev", "candidate", "release", "jshooks"] + pull_request: + branches: ["dev", "candidate", "release", "jshooks"] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP: 1 + +jobs: + checkout: + runs-on: [self-hosted, vanity] + outputs: + checkout_path: ${{ steps.vars.outputs.checkout_path }} + steps: + - name: Prepare checkout path + id: vars + run: | + SAFE_BRANCH=$(echo "${{ github.ref_name }}" | sed -e 's/[^a-zA-Z0-9._-]/-/g') + CHECKOUT_PATH="${SAFE_BRANCH}-${{ github.sha }}" + echo "checkout_path=${CHECKOUT_PATH}" >> "$GITHUB_OUTPUT" + + - uses: actions/checkout@v4 + with: + path: ${{ steps.vars.outputs.checkout_path }} + clean: true + fetch-depth: 2 # Only get the last 2 commits, to avoid fetching all history + + build: + runs-on: [self-hosted, vanity] + needs: [checkout] + defaults: + run: + working-directory: ${{ needs.checkout.outputs.checkout_path }} + steps: + - name: Set Cleanup Script Path + run: | + echo "JOB_CLEANUP_SCRIPT=$(mktemp)" >> $GITHUB_ENV + + - name: Build using Docker + run: /bin/bash release-builder.sh + + - name: Stop Container (Cleanup) + if: always() + run: | + echo "Running cleanup script: $JOB_CLEANUP_SCRIPT" + /bin/bash -e -x "$JOB_CLEANUP_SCRIPT" + CLEANUP_EXIT_CODE=$? + + if [[ "$CLEANUP_EXIT_CODE" -eq 0 ]]; then + echo "Cleanup script succeeded." + rm -f "$JOB_CLEANUP_SCRIPT" + echo "Cleanup script removed." + else + echo "⚠️ Cleanup script failed! Keeping for debugging: $JOB_CLEANUP_SCRIPT" + fi + + if [[ "${DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP}" == "1" ]]; then + echo "🔍 Checking for leftover containers..." + BUILD_CONTAINERS=$(docker ps --format '{{.Names}}' | grep '^xahaud_cached_builder' || echo "") + + if [[ -n "$BUILD_CONTAINERS" ]]; then + echo "⚠️ WARNING: Some build containers are still running" + echo "$BUILD_CONTAINERS" + else + echo "✅ No build containers found" + fi + fi + + tests: + runs-on: [self-hosted, vanity] + needs: [build, checkout] + defaults: + run: + working-directory: ${{ needs.checkout.outputs.checkout_path }} + steps: + - name: Unit tests + run: /bin/bash docker-unit-tests.sh + + cleanup: + runs-on: [self-hosted, vanity] + needs: [tests, checkout] + if: always() + steps: + - name: Cleanup workspace + run: | + CHECKOUT_PATH="${{ needs.checkout.outputs.checkout_path }}" + echo "Cleaning workspace for ${CHECKOUT_PATH}" + rm -rf "${{ github.workspace }}/${CHECKOUT_PATH}" diff --git a/.github/workflows/clang-format.yml.disabled b/.github/workflows/clang-format.yml.disabled new file mode 100644 index 000000000..00f860dec --- /dev/null +++ b/.github/workflows/clang-format.yml.disabled @@ -0,0 +1,72 @@ +name: clang-format + +on: [push, pull_request] + +jobs: + check: + runs-on: ubuntu-22.04 + env: + CLANG_VERSION: 10 + steps: + - uses: actions/checkout@v3 + # - name: Install clang-format + # run: | + # codename=$( lsb_release --codename --short ) + # sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null < "$tmp" + diff -u ${{ matrix.target }} "$tmp" diff --git a/.github/workflows/xahau-ga-macos.yml.disabled b/.github/workflows/xahau-ga-macos.yml.disabled new file mode 100644 index 000000000..66c7e6877 --- /dev/null +++ b/.github/workflows/xahau-ga-macos.yml.disabled @@ -0,0 +1,149 @@ +name: MacOS - GA Runner + +on: + push: + branches: ["dev", "candidate", "release"] + pull_request: + branches: ["dev", "candidate", "release"] + schedule: + - cron: '0 0 * * *' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + test: + strategy: + matrix: + generator: + - Ninja + configuration: + - Debug + runs-on: macos-15 + env: + build_dir: .build + # Bump this number to invalidate all caches globally. + CACHE_VERSION: 1 + MAIN_BRANCH_NAME: dev + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Conan + run: | + brew install conan + # Verify Conan 2 is installed + conan --version + + - name: Install Coreutils + run: | + brew install coreutils + echo "Num proc: $(nproc)" + + - name: Install Ninja + if: matrix.generator == 'Ninja' + run: brew install ninja + + - name: Install Python + run: | + if which python3 > /dev/null 2>&1; then + echo "Python 3 executable exists" + python3 --version + else + brew install python@3.12 + fi + # Create 'python' symlink if it doesn't exist (for tools expecting 'python') + if ! which python > /dev/null 2>&1; then + sudo ln -sf $(which python3) /usr/local/bin/python + fi + + - name: Install CMake + run: | + # Install CMake 3.x to match local dev environments + # With Conan 2 and the policy args passed to CMake, newer versions + # can have issues with dependencies that require cmake_minimum_required < 3.5 + brew uninstall cmake --ignore-dependencies 2>/dev/null || true + + # Download and install CMake 3.31.7 directly + curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz + tar -xzf cmake.tar.gz + + # Move the entire CMake.app to /Applications + sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/ + + echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH + /Applications/CMake.app/Contents/bin/cmake --version + + - name: Install ccache + run: brew install ccache + + - name: Configure ccache + uses: ./.github/actions/xahau-configure-ccache + with: + max_size: 2G + hash_dir: true + compiler_check: content + is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }} + + - name: Check environment + run: | + echo "PATH:" + echo "${PATH}" | tr ':' '\n' + which python && python --version || echo "Python not found" + which conan && conan --version || echo "Conan not found" + which cmake && cmake --version || echo "CMake not found" + clang --version + ccache --version + echo "---- Full Environment ----" + env + + - name: Configure Conan + run: | + # Create the default profile directory if it doesn't exist + mkdir -p ~/.conan2/profiles + + # Detect compiler version + COMPILER_VERSION=$(clang --version | grep -oE 'version [0-9]+' | grep -oE '[0-9]+') + + # Create profile with our specific settings + cat > ~/.conan2/profiles/default < 3 > 2 > 1, Clang will + # pick GCC 11 over our renamed versions. It's dumb but it works! + # + # Example: GCC 12→1, GCC 13→2, GCC 14→3, so Clang picks 11 (highest number) + if [ -n "${{ matrix.clang_gcc_toolchain }}" ] && [ "${{ matrix.compiler_version }}" -lt "16" ]; then + echo "=== Hiding GCC versions newer than ${{ matrix.clang_gcc_toolchain }} for Clang < 16 ===" + target_version=${{ matrix.clang_gcc_toolchain }} + counter=1 # Start with 1 - these will be seen as "GCC version 1, 2, 3" etc + for dir in /usr/lib/gcc/x86_64-linux-gnu/*/; do + if [ -d "$dir" ]; then + version=$(basename "$dir") + # Check if version is numeric and greater than target + if [[ "$version" =~ ^[0-9]+$ ]] && [ "$version" -gt "$target_version" ]; then + echo "Hiding GCC $version -> renaming to $counter (will be seen as GCC version $counter)" + # Safety check: ensure target doesn't already exist + if [ ! -e "/usr/lib/gcc/x86_64-linux-gnu/$counter" ]; then + sudo mv "$dir" "/usr/lib/gcc/x86_64-linux-gnu/$counter" + else + echo "ERROR: Cannot rename GCC $version - /usr/lib/gcc/x86_64-linux-gnu/$counter already exists" + exit 1 + fi + counter=$((counter + 1)) + fi + fi + done + fi + + # Verify what Clang will use + if [ -n "${{ matrix.clang_gcc_toolchain }}" ]; then + echo "=== Verifying GCC toolchain selection ===" + echo "Available GCC versions:" + ls -la /usr/lib/gcc/x86_64-linux-gnu/ | grep -E "^d.*[0-9]+$" || true + + echo "" + echo "Clang's detected GCC installation:" + ${{ matrix.cxx }} -v -E -x c++ /dev/null -o /dev/null 2>&1 | grep "Found candidate GCC installation" || true + fi + + # Install libc++ dev packages if using libc++ (not needed for libstdc++) + if [ "${{ matrix.stdlib }}" = "libcxx" ]; then + sudo apt-get install -y libc++-${{ matrix.compiler_version }}-dev libc++abi-${{ matrix.compiler_version }}-dev + fi + + # Install Conan 2 + pip install --upgrade "conan>=2.0,<3" + + - name: Configure ccache + uses: ./.github/actions/xahau-configure-ccache + with: + max_size: 2G + hash_dir: true + compiler_check: content + is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }} + + - name: Configure Conan + run: | + # Create the default profile directory if it doesn't exist + mkdir -p ~/.conan2/profiles + + # Determine the correct libcxx based on stdlib parameter + if [ "${{ matrix.stdlib }}" = "libcxx" ]; then + LIBCXX="libc++" + else + LIBCXX="libstdc++11" + fi + + # Create profile with our specific settings + cat > ~/.conan2/profiles/default <> "$GITHUB_OUTPUT" + echo "Using artifact name: ${ARTIFACT_NAME}" + + - name: Debug build directory + run: | + echo "Checking build directory contents: ${{ env.build_dir }}" + ls -la ${{ env.build_dir }} || echo "Build directory not found or empty" + + - name: Run tests + run: | + # Ensure the binary exists before trying to run + if [ -f "${{ env.build_dir }}/rippled" ]; then + ${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc) + else + echo "Error: rippled executable not found in ${{ env.build_dir }}" + exit 1 + fi diff --git a/Builds/levelization/README.md b/Builds/levelization/README.md index 4ff3a5423..5ba20fcbe 100644 --- a/Builds/levelization/README.md +++ b/Builds/levelization/README.md @@ -72,15 +72,15 @@ It generates many files of [results](results): desired as described above. In a perfect repo, this file will be empty. This file is committed to the repo, and is used by the [levelization - Github workflow](../../.github/workflows/levelization.yml) to validate + Github workflow](../../.github/workflows/levelization.yml.disabled) to validate that nothing changed. * [`ordering.txt`](results/ordering.txt): A list showing relationships between modules where there are no loops as they actually exist, as opposed to how they are desired as described above. This file is committed to the repo, and is used by the [levelization - Github workflow](../../.github/workflows/levelization.yml) to validate + Github workflow](../../.github/workflows/levelization.yml.disabled) to validate that nothing changed. -* [`levelization.yml`](../../.github/workflows/levelization.yml) +* [`levelization.yml`](../../.github/workflows/levelization.yml.disabled) Github Actions workflow to test that levelization loops haven't changed. Unfortunately, if changes are detected, it can't tell if they are improvements or not, so if you have resolved any issues or