From fde24ace60e7bec75aec0afbd2aaab3a985c02a1 Mon Sep 17 00:00:00 2001 From: Joe Doss Date: Wed, 22 Apr 2026 20:28:47 -0500 Subject: [PATCH] Warn and exit non-zero on Podman/Infisical secret drift MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `_register_secrets` only deletes-then-recreates the names it is given, so any `--*` Podman secret that falls out of the fetch persists. It still resolves via the shell driver, but `_generate_drop_in` only writes `Secret=` lines for keys in the current fetch, so containers boot without the matching env var. This failed silently when a workload's secrets moved into an Infisical subfolder and `recursive: true` was not set — the drop-in regenerated without those keys, the stale Podman secrets stayed functional, and nobody noticed until a container broke. Fix the silence: - Between `_register_secrets` and `_generate_drop_in`, compare the `--*` namespace against the fetched set and log a WARNING per stale name with a one-line remediation pointer. - Accumulate drift across workloads; `run_setup` raises `DriftDetectedError` at the end so the setup systemd unit (and `psi cache refresh`) exit non-zero. - Extend `psi setup --dry-run` to diff each workload's drop-in `Secret=` targets against its `--*` Podman secrets and report both directions per workload. --- psi/errors.py | 4 + psi/setup.py | 174 ++++++++++++++++++++++++++++-- tests/test_dry_run_setup.py | 205 ++++++++++++++++++++++++++++++++++++ tests/test_setup.py | 175 ++++++++++++++++++++++++++++-- 4 files changed, 544 insertions(+), 14 deletions(-) diff --git a/psi/errors.py b/psi/errors.py index 32f5d48..4c4d356 100644 --- a/psi/errors.py +++ b/psi/errors.py @@ -21,3 +21,7 @@ def __init__(self, message: str, *, provider_name: str = "") -> None: class SecretNotFoundError(PsiError): """Secret mapping not found in state directory.""" + + +class DriftDetectedError(PsiError): + """Podman secret state diverged from the fetch — drop-ins are incomplete.""" diff --git a/psi/setup.py b/psi/setup.py index f609ed2..0eaca95 100644 --- a/psi/setup.py +++ b/psi/setup.py @@ -9,7 +9,7 @@ import httpx from loguru import logger -from psi.errors import ProviderError +from psi.errors import DriftDetectedError, ProviderError from psi.systemd import daemon_reload if TYPE_CHECKING: @@ -40,6 +40,12 @@ def run_setup( Args: settings: PSI configuration. provider: If set, only process workloads using this provider. + + Raises: + DriftDetectedError: when one or more Podman secrets under ``--*`` + are missing from the current fetch. Drop-ins are still written + and systemd is still reloaded — the error fires at the end so + the caller (and the setup systemd unit) sees a non-zero exit. """ settings.state_dir.mkdir(parents=True, exist_ok=True) @@ -47,6 +53,7 @@ def run_setup( # Keyed by the canonical mapping bytes so the caller can compute the # HMAC cache key once the cache object is available. values_by_mapping: dict[bytes, bytes] = {} + drift: list[str] = [] try: for workload_name, workload in settings.workloads.items(): @@ -56,7 +63,7 @@ def run_setup( logger.info("Workload: {}", workload_name) if workload.provider == "infisical": - _setup_infisical_workload(settings, workload_name, values_by_mapping) + _setup_infisical_workload(settings, workload_name, values_by_mapping, drift) elif workload.provider == "nitrokeyhsm": logger.info("Nitrokey HSM workload — secrets created via 'psi nitrokeyhsm store'") else: @@ -75,6 +82,17 @@ def run_setup( daemon_reload(settings.scope) logger.info("Setup complete.") + if drift: + msg = ( + f"Drift detected: {len(drift)} Podman secret(s) not present in " + "this fetch — drop-ins will not reference them, so containers " + "will boot without those env vars. Add 'recursive: true' to the " + "source(s) in config.yaml if secrets live in a subfolder, or " + "remove the stale secrets with 'podman secret rm'. Run " + "'psi setup --dry-run' for per-workload details." + ) + raise DriftDetectedError(msg) + def _open_setup_cache(settings: PsiSettings) -> Cache | None: """Open the cache for write during setup, or return None on any failure.""" @@ -124,12 +142,13 @@ def _setup_infisical_workload( settings: PsiSettings, workload_name: str, values_by_mapping: dict[bytes, bytes], + drift: list[str], ) -> None: """Run Infisical-specific setup for a workload with retry.""" last_exc: Exception | None = None for attempt in range(len(_RETRY_DELAYS) + 1): try: - _fetch_and_register_infisical(settings, workload_name, values_by_mapping) + _fetch_and_register_infisical(settings, workload_name, values_by_mapping, drift) return except (httpx.ConnectError, httpx.HTTPStatusError, ProviderError) as e: cause = e.__cause__ if isinstance(e, ProviderError) else e @@ -154,6 +173,7 @@ def _fetch_and_register_infisical( settings: PsiSettings, workload_name: str, values_by_mapping: dict[bytes, bytes], + drift: list[str], ) -> None: """Fetch secrets from Infisical and register with Podman. @@ -162,6 +182,13 @@ def _fetch_and_register_infisical( is available. Keying by mapping content makes the cache survive Podman's delete+create churn — the same mapping always produces the same cache key, regardless of the hex ID Podman has assigned to it today. + + Between ``_register_secrets`` and ``_generate_drop_in``, compares the + ``--*`` Podman namespace against ``merged`` and appends any + stale names (present in Podman, absent from this fetch) to ``drift``. + Logs a warning per item. The drop-in is still generated from ``merged`` + alone — this keeps the fix local to the fetch, and the caller decides + what to do about the accumulated drift (``run_setup`` raises at the end). """ from psi.provider import mapping_cache_bytes, parse_mapping from psi.providers.infisical import InfisicalProvider @@ -211,6 +238,20 @@ def _fetch_and_register_infisical( logger.info("Merged: {} unique secrets", len(merged)) _register_secrets(settings, workload_name, merged) + + orphans = _check_workload_drift(workload_name, merged) + for orphan in orphans: + logger.warning( + "Drift: Podman secret '{}' is not in this fetch — the " + "drop-in will not reference it. If the key lives in an " + "Infisical subfolder, add 'recursive: true' to the source " + "in config.yaml. Otherwise remove the stale secret: " + "podman secret rm {}", + orphan, + orphan, + ) + drift.extend(orphans) + _generate_drop_in(settings, workload_name, merged) for key, value in values.items(): @@ -291,6 +332,11 @@ def dry_run_setup(settings: PsiSettings) -> None: - ``orphaned`` — no mapping file in ``state_dir``. A lookup would return 404. Candidate for a future ``psi orphans --prune``. + For each configured workload, also diffs ``--*`` Podman secrets + against the ``Secret=`` targets in its drop-in. Drift on either side — + Podman secrets missing from the drop-in, or drop-in references with no + backing Podman secret — is reported per-workload. + Does not fetch from Infisical/HSM or contact anything other than the local Podman API and the on-disk ``state_dir``. Safe to run at any time. """ @@ -308,7 +354,60 @@ def dry_run_setup(settings: PsiSettings) -> None: raise ProviderError(msg, provider_name="podman") from e managed, stale, orphaned = _classify_secrets(secrets, settings.state_dir, current_opts) - _print_dry_run_report(managed, stale, orphaned) + drift = _workload_dropin_drift(settings, secrets) + _print_dry_run_report(managed, stale, orphaned, drift) + + +def _parse_dropin_secret_targets(dropin_path: Path) -> set[str]: + """Parse ``Secret=,...`` lines from a drop-in file. + + Returns the set of Podman secret names (the first comma-separated field + of each ``Secret=`` value). Returns an empty set if the file does not + exist — this matches the "no drop-in yet" state before first setup. + """ + if not dropin_path.exists(): + return set() + names: set[str] = set() + for line in dropin_path.read_text().splitlines(): + stripped = line.strip() + if not stripped.startswith("Secret="): + continue + value = stripped[len("Secret=") :] + name = value.split(",", 1)[0].strip() + if name: + names.add(name) + return names + + +def _workload_dropin_drift( + settings: PsiSettings, + secrets: list[dict], +) -> dict[str, dict[str, list[str]]]: + """Per-workload diff between ``--*`` Podman secrets and drop-in targets. + + Returns a dict keyed by workload name, with each value shaped as:: + + { + "in_podman_not_in_dropin": [...], # stale Podman secrets + "in_dropin_not_in_podman": [...], # dangling drop-in refs + } + + Only workloads with drift on either side are included. Sorted lists for + stable output. + """ + result: dict[str, dict[str, list[str]]] = {} + for workload_name in settings.workloads: + podman_names = _workload_podman_names(workload_name, secrets) + dropin_path = settings.systemd_dir / f"{workload_name}.container.d" / "50-secrets.conf" + dropin_names = _parse_dropin_secret_targets(dropin_path) + missing_from_dropin = sorted(podman_names - dropin_names) + missing_from_podman = sorted(dropin_names - podman_names) + if missing_from_dropin or missing_from_podman: + result[workload_name] = { + "in_podman_not_in_dropin": missing_from_dropin, + "in_dropin_not_in_podman": missing_from_podman, + } + return result _SHELL_OPT_KEYS = ("lookup", "store", "delete", "list") @@ -340,6 +439,46 @@ def _list_podman_shell_secrets() -> list[dict]: return [s for s in secrets if s.get("Spec", {}).get("Driver", {}).get("Name") == "shell"] +def _workload_podman_names(workload_name: str, secrets: list[dict]) -> set[str]: + """Return Podman shell-secret names matching ``--*``.""" + prefix = f"{workload_name}--" + return { + s["Spec"]["Name"] for s in secrets if s.get("Spec", {}).get("Name", "").startswith(prefix) + } + + +def _check_workload_drift( + workload_name: str, + merged: dict[str, str], +) -> list[str]: + """Return Podman secrets in ``--*`` namespace absent from ``merged``. + + These are typically subfolder keys fetched by a previous ``psi setup`` + run with different source paths or with ``recursive: true`` set, and + never removed — ``_register_secrets`` only deletes-then-recreates the + names it's given, so anything that falls out of the fetch persists. + Such secrets still resolve via the shell driver (direct lookup by key + still hits Infisical) but the drop-in never references them, so + containers boot without those env vars. + + Returns a sorted list. Returns an empty list if the Podman API is + unreachable; the primary fetch-and-register path would already have + failed loudly in that case. + """ + expected = {f"{workload_name}--{key}" for key in merged} + try: + secrets = _list_podman_shell_secrets() + except httpx.HTTPError as e: + logger.warning( + "Cannot list Podman secrets to check drift for '{}': {}", + workload_name, + e, + ) + return [] + existing = _workload_podman_names(workload_name, secrets) + return sorted(existing - expected) + + def _classify_secrets( secrets: list[dict], state_dir: Path, @@ -372,7 +511,12 @@ def _classify_secrets( return managed, stale, orphaned -def _print_dry_run_report(managed: list[str], stale: list[str], orphaned: list[str]) -> None: +def _print_dry_run_report( + managed: list[str], + stale: list[str], + orphaned: list[str], + drift: dict[str, dict[str, list[str]]], +) -> None: from rich.console import Console from rich.table import Table @@ -385,6 +529,7 @@ def _print_dry_run_report(managed: list[str], stale: list[str], orphaned: list[s summary.add_row("[green]managed[/green]", str(len(managed))) summary.add_row("[yellow]stale-opts[/yellow]", str(len(stale))) summary.add_row("[red]orphaned[/red]", str(len(orphaned))) + summary.add_row("[red]workload drift[/red]", str(len(drift))) console.print(summary) if stale: @@ -402,5 +547,22 @@ def _print_dry_run_report(managed: list[str], stale: list[str], orphaned: list[s for name in orphaned: console.print(f" {name}") - if not stale and not orphaned: + if drift: + console.print( + "\n[red]Workload drift[/red] — drop-in and Podman registry diverge. " + "Containers will miss any env vars listed as " + "[bold]in Podman, not in drop-in[/bold]:" + ) + for workload_name, groups in drift.items(): + console.print(f" [bold]{workload_name}[/bold]") + if groups["in_podman_not_in_dropin"]: + console.print( + " in Podman, not in drop-in: " + ", ".join(groups["in_podman_not_in_dropin"]) + ) + if groups["in_dropin_not_in_podman"]: + console.print( + " in drop-in, not in Podman: " + ", ".join(groups["in_dropin_not_in_podman"]) + ) + + if not stale and not orphaned and not drift: console.print("\n[green]All secrets are managed — nothing to do.[/green]") diff --git a/tests/test_dry_run_setup.py b/tests/test_dry_run_setup.py index 39cc8a3..958f0a2 100644 --- a/tests/test_dry_run_setup.py +++ b/tests/test_dry_run_setup.py @@ -13,6 +13,8 @@ from psi.setup import ( _classify_secrets, _parse_driver_opts, + _parse_dropin_secret_targets, + _workload_dropin_drift, dry_run_setup, ) @@ -26,8 +28,11 @@ def _fake_settings(tmp_path: Path): settings = MagicMock() settings.state_dir = tmp_path / "state" settings.state_dir.mkdir() + settings.systemd_dir = tmp_path / "systemd" + settings.systemd_dir.mkdir() settings.scope = SystemdScope.SYSTEM settings.socket_token = None + settings.workloads = {} return settings @@ -206,3 +211,203 @@ def test_prints_report_and_does_not_mutate( "id-ok", "id-stale", ] + + +class TestParseDropinSecretTargets: + def test_returns_empty_set_when_file_missing(self, tmp_path: Path) -> None: + assert _parse_dropin_secret_targets(tmp_path / "missing.conf") == set() + + def test_parses_simple_secret_lines(self, tmp_path: Path) -> None: + conf = tmp_path / "50-secrets.conf" + conf.write_text( + "[Container]\n" + "Secret=myapp--DB_URL,type=env,target=DB_URL\n" + "Secret=myapp--API_KEY,type=env,target=API_KEY\n" + ) + assert _parse_dropin_secret_targets(conf) == { + "myapp--DB_URL", + "myapp--API_KEY", + } + + def test_ignores_non_secret_lines(self, tmp_path: Path) -> None: + conf = tmp_path / "50-secrets.conf" + conf.write_text( + "[Unit]\n" + "After=psi-secrets-setup.service\n" + "Wants=psi-secrets-setup.service\n" + "\n" + "[Container]\n" + "Secret=myapp--DB_URL,type=env,target=DB_URL\n" + ) + assert _parse_dropin_secret_targets(conf) == {"myapp--DB_URL"} + + def test_handles_template_unit_names(self, tmp_path: Path) -> None: + conf = tmp_path / "50-secrets.conf" + conf.write_text("Secret=windmill-worker@--DB_HOST,type=env,target=DB_HOST\n") + assert _parse_dropin_secret_targets(conf) == {"windmill-worker@--DB_HOST"} + + def test_empty_file_returns_empty_set(self, tmp_path: Path) -> None: + conf = tmp_path / "50-secrets.conf" + conf.write_text("") + assert _parse_dropin_secret_targets(conf) == set() + + +def _write_dropin(settings_obj, workload_name: str, names: list[str]) -> None: + path = settings_obj.systemd_dir / f"{workload_name}.container.d" + path.mkdir(parents=True, exist_ok=True) + lines = ["[Container]"] + for n in names: + target = n.split("--", 1)[1] + lines.append(f"Secret={n},type=env,target={target}") + (path / "50-secrets.conf").write_text("\n".join(lines) + "\n") + + +class TestWorkloadDropinDrift: + def test_no_drift_when_dropin_matches_podman(self, tmp_path: Path) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"myapp": None} + _write_dropin(settings, "myapp", ["myapp--DB_URL", "myapp--API_KEY"]) + secrets = [ + _shell_secret("myapp--DB_URL"), + _shell_secret("myapp--API_KEY"), + ] + assert _workload_dropin_drift(settings, secrets) == {} + + def test_detects_podman_secret_missing_from_dropin(self, tmp_path: Path) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"myapp": None} + _write_dropin(settings, "myapp", ["myapp--DB_URL"]) + secrets = [ + _shell_secret("myapp--DB_URL"), + _shell_secret("myapp--MODE"), + _shell_secret("myapp--NUM_WORKERS"), + ] + drift = _workload_dropin_drift(settings, secrets) + assert drift == { + "myapp": { + "in_podman_not_in_dropin": ["myapp--MODE", "myapp--NUM_WORKERS"], + "in_dropin_not_in_podman": [], + } + } + + def test_detects_dangling_dropin_reference(self, tmp_path: Path) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"myapp": None} + _write_dropin(settings, "myapp", ["myapp--DB_URL", "myapp--GHOST"]) + secrets = [_shell_secret("myapp--DB_URL")] + drift = _workload_dropin_drift(settings, secrets) + assert drift == { + "myapp": { + "in_podman_not_in_dropin": [], + "in_dropin_not_in_podman": ["myapp--GHOST"], + } + } + + def test_both_directions_reported(self, tmp_path: Path) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"myapp": None} + _write_dropin(settings, "myapp", ["myapp--DB_URL", "myapp--GHOST"]) + secrets = [ + _shell_secret("myapp--DB_URL"), + _shell_secret("myapp--MODE"), + ] + drift = _workload_dropin_drift(settings, secrets) + assert drift == { + "myapp": { + "in_podman_not_in_dropin": ["myapp--MODE"], + "in_dropin_not_in_podman": ["myapp--GHOST"], + } + } + + def test_workloads_without_drift_omitted(self, tmp_path: Path) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"clean": None, "dirty": None} + _write_dropin(settings, "clean", ["clean--OK"]) + _write_dropin(settings, "dirty", ["dirty--ONLY_IN_DROPIN"]) + secrets = [ + _shell_secret("clean--OK"), + _shell_secret("dirty--ONLY_IN_PODMAN"), + ] + drift = _workload_dropin_drift(settings, secrets) + assert list(drift.keys()) == ["dirty"] + + def test_no_dropin_but_podman_has_secrets(self, tmp_path: Path) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"myapp": None} + secrets = [_shell_secret("myapp--DB_URL")] + drift = _workload_dropin_drift(settings, secrets) + assert drift == { + "myapp": { + "in_podman_not_in_dropin": ["myapp--DB_URL"], + "in_dropin_not_in_podman": [], + } + } + + +class TestDryRunDriftSection: + def test_drift_section_printed_with_per_workload_details( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"myapp": None} + _write_dropin(settings, "myapp", ["myapp--DB_URL"]) + + from psi.unitgen import generate_driver_conf + + opts = _parse_driver_opts(generate_driver_conf(settings.scope, token=None)) + (settings.state_dir / "id-1").write_text("{}") + (settings.state_dir / "id-2").write_text("{}") + + secrets = [ + { + "ID": "id-1", + "Spec": { + "Name": "myapp--DB_URL", + "Driver": {"Name": "shell", "Options": opts}, + }, + }, + { + "ID": "id-2", + "Spec": { + "Name": "myapp--MODE", + "Driver": {"Name": "shell", "Options": opts}, + }, + }, + ] + + with patch("psi.setup._list_podman_shell_secrets", return_value=secrets): + dry_run_setup(settings) + + out = capsys.readouterr().out + assert "Workload drift" in out + assert "myapp" in out + assert "myapp--MODE" in out + assert "in Podman, not in drop-in" in out + + def test_no_drift_section_when_clean( + self, tmp_path: Path, capsys: pytest.CaptureFixture[str] + ) -> None: + settings = _fake_settings(tmp_path) + settings.workloads = {"myapp": None} + _write_dropin(settings, "myapp", ["myapp--DB_URL"]) + + from psi.unitgen import generate_driver_conf + + opts = _parse_driver_opts(generate_driver_conf(settings.scope, token=None)) + (settings.state_dir / "id-1").write_text("{}") + + secrets = [ + { + "ID": "id-1", + "Spec": { + "Name": "myapp--DB_URL", + "Driver": {"Name": "shell", "Options": opts}, + }, + }, + ] + + with patch("psi.setup._list_podman_shell_secrets", return_value=secrets): + dry_run_setup(settings) + + out = capsys.readouterr().out + assert "All secrets are managed" in out diff --git a/tests/test_setup.py b/tests/test_setup.py index ee64d04..7254b2f 100644 --- a/tests/test_setup.py +++ b/tests/test_setup.py @@ -14,10 +14,12 @@ from psi.settings import PsiSettings from psi.setup import ( _RETRY_DELAYS, + _check_workload_drift, _generate_drop_in, _is_retryable, _register_secrets, _setup_infisical_workload, + run_setup, ) if TYPE_CHECKING: @@ -209,7 +211,7 @@ class TestSetupRetry: def test_retries_on_connect_error_then_succeeds(self, tmp_path: Path) -> None: call_count = 0 - def mock_fetch(settings, workload_name, cache_updates): + def mock_fetch(settings, workload_name, cache_updates, drift): nonlocal call_count call_count += 1 if call_count < 3: @@ -229,7 +231,7 @@ def mock_fetch(settings, workload_name, cache_updates): patch("psi.setup._fetch_and_register_infisical", side_effect=mock_fetch), patch("psi.setup.time.sleep"), ): - _setup_infisical_workload(settings, "myapp", {}) + _setup_infisical_workload(settings, "myapp", {}, []) assert call_count == 3 @@ -252,7 +254,7 @@ def test_raises_after_all_retries_exhausted(self, tmp_path: Path) -> None: patch("psi.setup.time.sleep"), pytest.raises(httpx.ConnectError, match="refused"), ): - _setup_infisical_workload(settings, "myapp", {}) + _setup_infisical_workload(settings, "myapp", {}, []) def test_non_retryable_error_raises_immediately(self, tmp_path: Path) -> None: request = httpx.Request("GET", "http://test") @@ -276,7 +278,7 @@ def test_non_retryable_error_raises_immediately(self, tmp_path: Path) -> None: ), pytest.raises(httpx.HTTPStatusError, match="unauthorized"), ): - _setup_infisical_workload(settings, "myapp", {}) + _setup_infisical_workload(settings, "myapp", {}, []) def test_auth_502_retries_then_raises_provider_error(self, tmp_path: Path) -> None: """Auth endpoint 502 wrapped as ProviderError is retried via __cause__.""" @@ -284,7 +286,7 @@ def test_auth_502_retries_then_raises_provider_error(self, tmp_path: Path) -> No response = httpx.Response(502, request=request) call_count = 0 - def mock_fetch(settings, workload_name, cache_updates): + def mock_fetch(settings, workload_name, cache_updates, drift): nonlocal call_count call_count += 1 http_err = httpx.HTTPStatusError("502", request=request, response=response) @@ -308,7 +310,7 @@ def mock_fetch(settings, workload_name, cache_updates): patch("psi.setup.time.sleep"), pytest.raises(ProviderError, match="authentication failed"), ): - _setup_infisical_workload(settings, "myapp", {}) + _setup_infisical_workload(settings, "myapp", {}, []) assert call_count == len(_RETRY_DELAYS) + 1 @@ -318,7 +320,7 @@ def test_auth_401_wrapped_as_provider_error_not_retried(self, tmp_path: Path) -> response = httpx.Response(401, request=request) call_count = 0 - def mock_fetch(settings, workload_name, cache_updates): + def mock_fetch(settings, workload_name, cache_updates, drift): nonlocal call_count call_count += 1 http_err = httpx.HTTPStatusError("401", request=request, response=response) @@ -341,7 +343,7 @@ def mock_fetch(settings, workload_name, cache_updates): patch("psi.setup._fetch_and_register_infisical", side_effect=mock_fetch), pytest.raises(ProviderError, match="invalid credentials"), ): - _setup_infisical_workload(settings, "myapp", {}) + _setup_infisical_workload(settings, "myapp", {}, []) assert call_count == 1 @@ -371,3 +373,160 @@ def test_calls_podman_api_with_delete_then_create_per_secret(self, tmp_path: Pat "myapp--DB_URL" in client.delete.call_args_list[0].args[0] or "myapp--DB_URL" in client.delete.call_args_list[1].args[0] ) + + +def _shell_secret_stub(name: str) -> dict: + return { + "ID": name, + "Spec": { + "Name": name, + "Driver": {"Name": "shell", "Options": {}}, + }, + } + + +class TestCheckWorkloadDrift: + def test_empty_when_podman_matches_fetch(self) -> None: + merged = {"DB_URL": "{}", "API_KEY": "{}"} + podman_secrets = [ + _shell_secret_stub("myapp--DB_URL"), + _shell_secret_stub("myapp--API_KEY"), + ] + with patch("psi.setup._list_podman_shell_secrets", return_value=podman_secrets): + assert _check_workload_drift("myapp", merged) == [] + + def test_returns_stale_podman_secrets_sorted(self) -> None: + merged = {"DB_URL": "{}"} + podman_secrets = [ + _shell_secret_stub("myapp--DB_URL"), + _shell_secret_stub("myapp--NUM_WORKERS"), + _shell_secret_stub("myapp--MODE"), + ] + with patch("psi.setup._list_podman_shell_secrets", return_value=podman_secrets): + drift = _check_workload_drift("myapp", merged) + assert drift == ["myapp--MODE", "myapp--NUM_WORKERS"] + + def test_ignores_secrets_in_other_workload_namespaces(self) -> None: + merged = {"DB_URL": "{}"} + podman_secrets = [ + _shell_secret_stub("myapp--DB_URL"), + _shell_secret_stub("other-workload--MODE"), + _shell_secret_stub("myapp-extra--DB_URL"), + ] + with patch("psi.setup._list_podman_shell_secrets", return_value=podman_secrets): + assert _check_workload_drift("myapp", merged) == [] + + def test_template_workload_prefix_handled(self) -> None: + merged = {"DB_HOST": "{}"} + podman_secrets = [ + _shell_secret_stub("windmill-worker@--DB_HOST"), + _shell_secret_stub("windmill-worker@--STALE_KEY"), + ] + with patch("psi.setup._list_podman_shell_secrets", return_value=podman_secrets): + drift = _check_workload_drift("windmill-worker@", merged) + assert drift == ["windmill-worker@--STALE_KEY"] + + def test_podman_api_error_returns_empty(self) -> None: + with patch( + "psi.setup._list_podman_shell_secrets", + side_effect=httpx.ConnectError("refused"), + ): + assert _check_workload_drift("myapp", {"DB_URL": "{}"}) == [] + + +class TestRunSetupDriftExit: + def test_raises_drift_detected_error_when_drift_accumulates(self, tmp_path: Path) -> None: + from psi.errors import DriftDetectedError + + settings = _make_settings( + tmp_path, + workloads={ + "myapp": WorkloadConfig( + provider="infisical", + secrets=[SecretSource(project="myproject", path="/app")], + ), + }, + ) + + def mock_fetch(settings, workload_name, values_by_mapping, drift): + drift.append(f"{workload_name}--STALE_KEY") + + with ( + patch("psi.setup._fetch_and_register_infisical", side_effect=mock_fetch), + patch("psi.setup.daemon_reload"), + pytest.raises(DriftDetectedError, match="1 Podman secret"), + ): + run_setup(settings) + + def test_no_raise_when_drift_is_empty(self, tmp_path: Path) -> None: + settings = _make_settings( + tmp_path, + workloads={ + "myapp": WorkloadConfig( + provider="infisical", + secrets=[SecretSource(project="myproject", path="/app")], + ), + }, + ) + + def mock_fetch(settings, workload_name, values_by_mapping, drift): + pass + + with ( + patch("psi.setup._fetch_and_register_infisical", side_effect=mock_fetch), + patch("psi.setup.daemon_reload"), + ): + run_setup(settings) + + def test_aggregates_drift_across_workloads(self, tmp_path: Path) -> None: + from psi.errors import DriftDetectedError + + settings = _make_settings( + tmp_path, + workloads={ + "a": WorkloadConfig( + provider="infisical", + secrets=[SecretSource(project="myproject", path="/a")], + ), + "b": WorkloadConfig( + provider="infisical", + secrets=[SecretSource(project="myproject", path="/b")], + ), + }, + ) + + def mock_fetch(settings, workload_name, values_by_mapping, drift): + drift.append(f"{workload_name}--STALE") + + with ( + patch("psi.setup._fetch_and_register_infisical", side_effect=mock_fetch), + patch("psi.setup.daemon_reload"), + pytest.raises(DriftDetectedError, match="2 Podman secret"), + ): + run_setup(settings) + + def test_daemon_reload_runs_before_raise(self, tmp_path: Path) -> None: + """Drift is reported at the end — drop-ins and systemd reload happen first.""" + from psi.errors import DriftDetectedError + + settings = _make_settings( + tmp_path, + workloads={ + "myapp": WorkloadConfig( + provider="infisical", + secrets=[SecretSource(project="myproject", path="/app")], + ), + }, + ) + + def mock_fetch(settings, workload_name, values_by_mapping, drift): + drift.append("myapp--STALE") + + with ( + patch("psi.setup._fetch_and_register_infisical", side_effect=mock_fetch), + patch("psi.setup.daemon_reload") as mock_reload, + pytest.raises(DriftDetectedError), + ): + run_setup(settings) + + mock_reload.assert_called_once()