diff --git a/.gitignore b/.gitignore index a8ee76db..f01db9e2 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,6 @@ package-lock.json .cursor/ .aider* .continue/ + +# Serena MCP local cache +.serena/ diff --git a/custom_components/keymaster/coordinator.py b/custom_components/keymaster/coordinator.py index 58769fe7..f119bae4 100644 --- a/custom_components/keymaster/coordinator.py +++ b/custom_components/keymaster/coordinator.py @@ -122,6 +122,9 @@ def __init__(self, hass: HomeAssistant) -> None: self._timer_store: Store[dict[str, TimerStoreEntry]] = Store( hass, TIMER_STORAGE_VERSION, TIMER_STORAGE_KEY ) + # Shared across all KeymasterTimer instances writing to _timer_store so + # concurrent persists/removes can't drop each other's entries. + self._timer_store_lock = asyncio.Lock() async def initial_setup(self) -> None: """Trigger the initial async_setup.""" @@ -928,6 +931,7 @@ async def _setup_timer(self, kmlock: KeymasterLock) -> None: call_action=functools.partial(self._timer_triggered, kmlock), timer_id=f"{kmlock.keymaster_config_entry_id}_autolock", store=self._timer_store, + store_lock=self._timer_store_lock, ) if kmlock.autolock_timer.is_running: self.async_set_updated_data(dict(self.kmlocks)) @@ -1075,64 +1079,73 @@ async def _update_lock(self, new: KeymasterLock) -> bool: or not old.code_slots ): return False - await KeymasterCoordinator._unsubscribe_listeners(old) - # _LOGGER.debug("[update_lock] %s: old: %s", new.lock_name, old) - del_code_slots: list[int] = [ - old.starting_code_slot + i for i in range(old.number_of_code_slots) - ] - for code_slot_num in range( - new.starting_code_slot, - new.starting_code_slot + new.number_of_code_slots, - ): - if code_slot_num in del_code_slots: - del_code_slots.remove(code_slot_num) - - new.lock_state = old.lock_state - new.door_state = old.door_state - new.autolock_enabled = old.autolock_enabled - new.autolock_min_day = old.autolock_min_day - new.autolock_min_night = old.autolock_min_night - new.retry_lock = old.retry_lock - for code_slot_num, new_kmslot in new.code_slots.items(): - if code_slot_num not in old.code_slots: - continue - old_kmslot: KeymasterCodeSlot = old.code_slots[code_slot_num] - new_kmslot.enabled = old_kmslot.enabled - new_kmslot.name = old_kmslot.name - new_kmslot.pin = old_kmslot.pin - new_kmslot.override_parent = old_kmslot.override_parent - new_kmslot.notifications = old_kmslot.notifications - new_kmslot.accesslimit_count_enabled = old_kmslot.accesslimit_count_enabled - new_kmslot.accesslimit_count = old_kmslot.accesslimit_count - new_kmslot.accesslimit_date_range_enabled = old_kmslot.accesslimit_date_range_enabled - new_kmslot.accesslimit_date_range_start = old_kmslot.accesslimit_date_range_start - new_kmslot.accesslimit_date_range_end = old_kmslot.accesslimit_date_range_end - new_kmslot.accesslimit_day_of_week_enabled = old_kmslot.accesslimit_day_of_week_enabled - if not new_kmslot.accesslimit_day_of_week: - continue - for dow_num, new_dow in new_kmslot.accesslimit_day_of_week.items(): - if not old_kmslot.accesslimit_day_of_week: - continue - old_dow: KeymasterCodeSlotDayOfWeek = old_kmslot.accesslimit_day_of_week[dow_num] - new_dow.dow_enabled = old_dow.dow_enabled - new_dow.limit_by_time = old_dow.limit_by_time - new_dow.include_exclude = old_dow.include_exclude - new_dow.time_start = old_dow.time_start - new_dow.time_end = old_dow.time_end - self.kmlocks[new.keymaster_config_entry_id] = new - # _LOGGER.debug("[update_lock] %s: new: %s", new.lock_name, new) - _LOGGER.debug("[update_lock] Code slot entities to delete: %s", del_code_slots) - for code_slot_num in del_code_slots: - await delete_code_slot_entities( - hass=self.hass, - keymaster_config_entry_id=new.keymaster_config_entry_id, - code_slot_num=code_slot_num, - ) - await self._rebuild_lock_relationships() - await self._update_door_and_lock_state() - await self._update_listeners(self.kmlocks[new.keymaster_config_entry_id]) - await self._setup_timer(self.kmlocks[new.keymaster_config_entry_id]) - await self.async_refresh() + # Unsubscribe listeners FIRST so in-flight provider/door callbacks + # can't reach the about-to-be-detached timer (where start()/cancel() + # would silently fail or no-op). Then detach the old timer; its + # detach() awaits any in-flight _on_expired so mutations it made on + # old (e.g. pending_retry_lock) are visible to the state copy. + # Once detached, the old timer is unusable until the new timer is + # set up — wrap the rest in try/except that restores listeners and + # the autolock on whichever kmlock is current if any step fails. + try: + await KeymasterCoordinator._unsubscribe_listeners(old) + if old.autolock_timer: + await old.autolock_timer.detach() + # _LOGGER.debug("[update_lock] %s: old: %s", new.lock_name, old) + del_code_slots: list[int] = [ + old.starting_code_slot + i for i in range(old.number_of_code_slots) + ] + for code_slot_num in range( + new.starting_code_slot, + new.starting_code_slot + new.number_of_code_slots, + ): + if code_slot_num in del_code_slots: + del_code_slots.remove(code_slot_num) + + new.inherit_state_from(old) + self.kmlocks[new.keymaster_config_entry_id] = new + # _LOGGER.debug("[update_lock] %s: new: %s", new.lock_name, new) + _LOGGER.debug("[update_lock] Code slot entities to delete: %s", del_code_slots) + for code_slot_num in del_code_slots: + await delete_code_slot_entities( + hass=self.hass, + keymaster_config_entry_id=new.keymaster_config_entry_id, + code_slot_num=code_slot_num, + ) + await self._rebuild_lock_relationships() + await self._update_door_and_lock_state() + await self._update_listeners(self.kmlocks[new.keymaster_config_entry_id]) + await self._setup_timer(self.kmlocks[new.keymaster_config_entry_id]) + await self.async_refresh() + except Exception: + current = self.kmlocks.get(new.keymaster_config_entry_id) + if current is not None: + _LOGGER.exception( + "[update_lock] %s: Update failed mid-flight; " + "restoring autolock timer and listeners to keep state from being silently lost", + new.lock_name, + ) + # Restore in REVERSE order of the teardown (mirror the + # listeners-then-detach order in the try block): + # 1. _setup_timer first so the rebound timer is fully + # attached before any listener callback can reach it. + # 2. _update_listeners last so callbacks only fire after + # both the timer and listeners exist. + try: + await self._setup_timer(current) + except Exception: + _LOGGER.exception( + "[update_lock] %s: Failed to restore autolock timer after rollback", + new.lock_name, + ) + try: + await self._update_listeners(current) + except Exception: + _LOGGER.exception( + "[update_lock] %s: Failed to restore listeners after rollback", + new.lock_name, + ) + raise return True async def _delete_lock(self, kmlock: KeymasterLock, _: dt) -> None: diff --git a/custom_components/keymaster/helpers.py b/custom_components/keymaster/helpers.py index 1314118e..87bff9b4 100644 --- a/custom_components/keymaster/helpers.py +++ b/custom_components/keymaster/helpers.py @@ -2,6 +2,7 @@ from __future__ import annotations +import asyncio from collections.abc import Callable, MutableMapping from datetime import datetime as dt, timedelta import logging @@ -82,6 +83,12 @@ def __init__(self) -> None: self._duration: int | None = None self._timer_id: str | None = None self._store: Store[dict[str, TimerStoreEntry]] | None = None + self._store_lock: asyncio.Lock | None = None + self._detached = False + # In-flight action tasks (the scheduled _on_expired AND any setup() + # recovery action). detach() awaits all of them so their mutations + # on the old kmlock are visible to inherit_state_from. + self._inflight_tasks: set[asyncio.Task[None]] = set() async def setup( self, @@ -90,18 +97,62 @@ async def setup( call_action: Callable, timer_id: str, store: Store[dict[str, TimerStoreEntry]], + store_lock: asyncio.Lock, ) -> None: """Set up the timer and recover any persisted state.""" self.hass = hass self._kmlock = kmlock self._call_action = call_action self._timer_id = timer_id + self._store_lock = store_lock self._store = store - - # Recover persisted timer - data = await store.async_load() or {} - timer_data = data.get(timer_id) - if timer_data: + # Reset _detached so cancel() works on the rebound instance. + # _end_time/_duration are NOT cleared here — detach() preserved them + # for an in-flight persist that may still be queued behind the + # store lock; clearing pre-lock would let the queued persist see + # None and abort, dropping a start() from just before reload. + self._detached = False + + # Hold the lock across load + cleanup so we don't race a concurrent + # persist from the outgoing timer during config entry reload. + async with self._store_lock: + data = await store.async_load() or {} + timer_data = data.get(timer_id) + if not timer_data: + # No persisted entry. If detach preserved an in-memory + # _end_time (a start() just before reload that hadn't yet + # persisted), claim it. Otherwise clear stale fields so + # is_running doesn't lie. + if self._end_time and self._duration is not None: + if self._end_time <= dt_util.utcnow(): + # Already expired during reload — fire as recovery, + # don't write back (that would let a subsequent + # reload replay the same overdue autolock). + _LOGGER.debug( + "[KeymasterTimer] %s: Preserved timer expired " + "during reload; firing without resurrecting in store", + timer_id, + ) + self._end_time = None + self._duration = None + self._track_recovery(hass, dt_util.utcnow()) + else: + _LOGGER.debug( + "[KeymasterTimer] %s: Rescuing preserved timer state " + "from pre-reload start(); ending %s", + timer_id, + self._end_time, + ) + data[timer_id] = { + "end_time": self._end_time.isoformat(), + "duration": self._duration, + } + await store.async_save(data) + await self._resume(self._end_time, self._duration) + else: + self._end_time = None + self._duration = None + return try: end_time = dt.fromisoformat(timer_data["end_time"]) except (KeyError, TypeError, ValueError): @@ -109,7 +160,9 @@ async def setup( "[KeymasterTimer] %s: Invalid persisted timer data, removing", timer_id, ) - await self._remove_from_store() + await self._remove_from_store_locked() + self._end_time = None + self._duration = None return duration = timer_data.get("duration", 0) if end_time <= dt_util.utcnow(): @@ -117,8 +170,17 @@ async def setup( "[KeymasterTimer] %s: Persisted timer expired during downtime, firing", timer_id, ) - await self._remove_from_store() - hass.async_create_task(call_action(dt_util.utcnow())) + # Remove the entry atomically with detection so a concurrent + # setup() (e.g. during reload/rollback) can't see the same + # expired entry and schedule the recovery action twice. The + # trade-off: if the recovery action then fails, the autolock + # state is lost (no replay on next restart) — acceptable for + # this startup-only edge case since the user's next lock + # interaction creates a fresh autolock cycle. + await self._remove_from_store_locked() + self._end_time = None + self._duration = None + self._track_recovery(hass, dt_util.utcnow()) else: _LOGGER.debug( "[KeymasterTimer] %s: Resuming persisted timer, ending %s", @@ -153,26 +215,158 @@ async def start(self) -> bool: async def cancel(self, timer_elapsed: dt | None = None) -> None: """Cancel a timer.""" + if self._detached: + # The owning kmlock has been replaced; the replacement timer owns + # the store entry. Do nothing here so we can't trash its state. + _LOGGER.debug("[KeymasterTimer] Cancel called on detached timer; ignoring") + return if timer_elapsed: _LOGGER.debug("[KeymasterTimer] Timer elapsed") else: _LOGGER.debug("[KeymasterTimer] Cancelling auto-lock timer") + await self._clear_timer_state() + + async def _clear_timer_state(self) -> None: + """Clear the persisted entry and reset in-memory timer fields. + + Called from cancel() (user/coordinator-initiated) and from a + successful _on_expired (after the action fired). Direct callers + from _on_expired bypass cancel()'s _detached short-circuit so an + in-flight callback still removes the entry it owns. + """ self._cancel_callbacks() self._end_time = None self._duration = None await self._remove_from_store() + async def detach(self) -> None: + """Drop in-memory callbacks and clear the kmlock binding. + + Unlike cancel(), detach() leaves the persisted store entry so a + replacement KeymasterTimer (created on the new kmlock after a config + entry reload) can resume from it. _end_time and _duration are also + preserved so an in-flight _persist_to_store() queued behind the store + lock will still write the entry — without this, a start() racing with + reload would silently drop the autolock. + + Awaits any in-flight _on_expired callback before clearing the kmlock + binding so callers (e.g. _update_lock copying state from old to new) + observe whatever mutations the action made on the old kmlock. + """ + _LOGGER.debug("[KeymasterTimer] Detaching timer (state preserved in store)") + # Set first so any _on_expired already past its initial guard sees the + # detached state when it tries to call back into us. + self._detached = True + try: + self._cancel_callbacks() + except Exception: + # Best-effort: even if unsub raises (e.g. HA shutdown), we must + # finish nulling refs so the orphan can't operate on stale state. + _LOGGER.exception("[KeymasterTimer] Error cancelling callbacks during detach") + # Wait for any in-flight _on_expired to finish so callers see its + # mutations (e.g. pending_retry_lock) on the old kmlock before they + # copy state to the replacement. + # Await ALL in-flight action tasks (scheduled _on_expired AND any + # setup() recovery actions) so their mutations on the old kmlock + # are visible before inherit_state_from copies state to new. + for task in list(self._inflight_tasks): + if not task.done(): + try: + await task + except Exception: + _LOGGER.exception("[KeymasterTimer] In-flight task raised during detach") + # Force-persist any in-memory state so the REPLACEMENT timer (a + # fresh KeymasterTimer instance on the new kmlock) can resume from + # disk even if a queued _persist_to_store() from a recent start() + # hasn't run yet. The replacement instance can't see our in-memory + # _end_time, only the store. If the queued persist has already + # written, the in-store check makes this a no-op. + # Propagate exceptions: when state exists only in memory (the exact + # race this exists for), failure means the replacement comes up with + # no entry. The caller (_update_lock) needs to know so its rollback + # path can attempt recovery instead of silently dropping the autolock. + if ( + self._end_time + and self._duration is not None + and self._store + and self._store_lock + and self._timer_id + ): + async with self._store_lock: + data = await self._store.async_load() or {} + if self._timer_id not in data: + data[self._timer_id] = { + "end_time": self._end_time.isoformat(), + "duration": self._duration, + } + await self._store.async_save(data) + self.hass = None + self._kmlock = None + self._call_action = None + def _schedule_callbacks(self, delay: float) -> None: """Schedule a single callback that fires the action then cleans up.""" async def _on_expired(now: dt) -> None: - """Fire the action and clean up timer state.""" - if self._call_action: - await self._call_action(now) - await self.cancel(timer_elapsed=now) + """Fire the action then clean up timer state. + + Action runs FIRST so storage failures during cleanup can't + prevent the safety-critical lock from firing. We use + _remove_from_store directly (not cancel()) because cancel() is + a noop on detached timers; an in-flight callback racing with + detach() must still remove the entry or the replacement timer's + setup() will replay the action. + + Registers itself in self._inflight_tasks so that detach() can + await completion — any mutations the action made on the old + kmlock are then visible to _update_lock's state copy. + """ + self._inflight_tasks.add(asyncio.current_task()) + try: + if self._call_action is None: + return + action = self._call_action + try: + await action(now) + except Exception: + # Preserve the store entry so the timer replays on next + # HA restart — the action (e.g. locking the door) is + # safety-critical and a failure here means it didn't + # happen yet. Skip the cleanup so retry remains possible. + _LOGGER.exception( + "[KeymasterTimer] Action raised in _on_expired; " + "store entry preserved for retry on restart" + ) + return + # Action succeeded — clear state. We call _clear_timer_state + # directly instead of cancel() because cancel() is a noop on + # detached timers; an in-flight callback racing with detach + # must still remove the entry it owns. + await self._clear_timer_state() + finally: + self._inflight_tasks.discard(asyncio.current_task()) self._unsub_events.append(async_call_later(hass=self.hass, delay=delay, action=_on_expired)) + async def _run_recovery_action(self, now: dt) -> None: + """Fire a startup-recovery action; tracked by detach().""" + if self._call_action is None: + return + try: + await self._call_action(now) + except Exception: + _LOGGER.exception("[KeymasterTimer] Recovery action raised") + + def _track_recovery(self, hass: HomeAssistant, now: dt) -> None: + """Schedule a recovery action and add it to the in-flight set. + + The task is added before being scheduled so detach() can await it + even if it hasn't started running yet. + """ + task = hass.async_create_task(self._run_recovery_action(now)) + self._inflight_tasks.add(task) + task.add_done_callback(self._inflight_tasks.discard) + def _cancel_callbacks(self) -> None: """Unsubscribe all pending callbacks.""" for unsub in self._unsub_events: @@ -190,15 +384,42 @@ async def _persist_to_store(self) -> None: """Write current timer state to the store.""" if not self._store or not self._timer_id or not self._end_time: return - data = await self._store.async_load() or {} - data[self._timer_id] = { - "end_time": self._end_time.isoformat(), - "duration": self._duration, - } - await self._store.async_save(data) + if not self._store_lock: + # store_lock is set in setup(); a missing lock here is a programming + # error. Silently no-opping would silently lose the autolock across + # the next restart. + raise RuntimeError( + f"[KeymasterTimer] {self._timer_id}: store_lock missing; setup() was not called" + ) + async with self._store_lock: + # Re-check after lock acquisition: cancel() can null _end_time + # while we were queued, in which case there's nothing to persist. + end_time = self._end_time + duration = self._duration + if not end_time: + return + data = await self._store.async_load() or {} + data[self._timer_id] = { + "end_time": end_time.isoformat(), + "duration": duration, + } + await self._store.async_save(data) async def _remove_from_store(self) -> None: """Remove this timer's entry from the store.""" + if not self._store or not self._timer_id: + return + if not self._store_lock: + # store_lock is set in setup(); silently no-opping would leave a + # stale entry that fires a phantom autolock on the next restart. + raise RuntimeError( + f"[KeymasterTimer] {self._timer_id}: store_lock missing; setup() was not called" + ) + async with self._store_lock: + await self._remove_from_store_locked() + + async def _remove_from_store_locked(self) -> None: + """Remove this timer's entry from the store. Caller MUST hold _store_lock.""" if not self._store or not self._timer_id: return data = await self._store.async_load() or {} diff --git a/custom_components/keymaster/lock.py b/custom_components/keymaster/lock.py index 0301479e..44770396 100644 --- a/custom_components/keymaster/lock.py +++ b/custom_components/keymaster/lock.py @@ -26,6 +26,19 @@ class KeymasterCodeSlotDayOfWeek: time_start: dt_time | None = None time_end: dt_time | None = None + @classmethod + def derive_from_existing(cls, old: KeymasterCodeSlotDayOfWeek) -> KeymasterCodeSlotDayOfWeek: + """Build a new DOW instance carrying the user-configurable state of ``old``.""" + return cls( + day_of_week_num=old.day_of_week_num, + day_of_week_name=old.day_of_week_name, + dow_enabled=old.dow_enabled, + limit_by_time=old.limit_by_time, + include_exclude=old.include_exclude, + time_start=old.time_start, + time_end=old.time_end, + ) + @dataclass class KeymasterCodeSlot: @@ -47,6 +60,34 @@ class KeymasterCodeSlot: accesslimit_day_of_week_enabled: bool = False accesslimit_day_of_week: MutableMapping[int, KeymasterCodeSlotDayOfWeek] | None = None + @classmethod + def derive_from_existing(cls, old: KeymasterCodeSlot, *, number: int) -> KeymasterCodeSlot: + """Build a new code slot for ``number`` carrying user state from ``old``. + + Runtime-only fields (``active``, ``synced``) are intentionally left at + their defaults — they belong to the new instance's lifecycle. + """ + new = cls( + number=number, + enabled=old.enabled, + name=old.name, + pin=old.pin, + override_parent=old.override_parent, + notifications=old.notifications, + accesslimit_count_enabled=old.accesslimit_count_enabled, + accesslimit_count=old.accesslimit_count, + accesslimit_date_range_enabled=old.accesslimit_date_range_enabled, + accesslimit_date_range_start=old.accesslimit_date_range_start, + accesslimit_date_range_end=old.accesslimit_date_range_end, + accesslimit_day_of_week_enabled=old.accesslimit_day_of_week_enabled, + ) + if old.accesslimit_day_of_week: + new.accesslimit_day_of_week = { + dow_num: KeymasterCodeSlotDayOfWeek.derive_from_existing(old_dow) + for dow_num, old_dow in old.accesslimit_day_of_week.items() + } + return new + @dataclass class KeymasterLock: @@ -84,6 +125,31 @@ class KeymasterLock: # Transient runtime-only field; excluded from persistence (init=False). masked_code_slots: set[int] = field(default_factory=set, init=False, repr=False) + def inherit_state_from(self, old: KeymasterLock) -> None: + """Carry user/runtime state from a previous instance into this one. + + Called when a config entry is reloaded: the new lock instance is + constructed fresh from config, but state the user owns (autolock + config, current lock/door state, code slot contents, in-flight + retry) must survive the swap. Owning this on the dataclass keeps + the field-by-field copy logic next to the field declarations + rather than scattered through the coordinator. + """ + self.lock_state = old.lock_state + self.door_state = old.door_state + self.autolock_enabled = old.autolock_enabled + self.autolock_min_day = old.autolock_min_day + self.autolock_min_night = old.autolock_min_night + self.retry_lock = old.retry_lock + self.pending_retry_lock = old.pending_retry_lock + if not self.code_slots or not old.code_slots: + return + for code_slot_num in self.code_slots: + if code_slot_num in old.code_slots: + self.code_slots[code_slot_num] = KeymasterCodeSlot.derive_from_existing( + old.code_slots[code_slot_num], number=code_slot_num + ) + keymasterlock_type_lookup: MutableMapping[str, type] = { "lock_name": str, diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py index 533f5a0b..02683137 100644 --- a/tests/test_coordinator.py +++ b/tests/test_coordinator.py @@ -1313,7 +1313,7 @@ def mock_coordinator(self, hass): return coordinator async def test_setup_timer_passes_timer_id_and_store(self, mock_coordinator): - """Test _setup_timer passes timer_id and store to timer.setup().""" + """Test _setup_timer passes timer_id, store, and shared store_lock to setup().""" kmlock = KeymasterLock( lock_name="test_lock", lock_entity_id="lock.test", @@ -1334,6 +1334,10 @@ async def test_setup_timer_passes_timer_id_and_store(self, mock_coordinator): call_kwargs = mock_timer.setup.call_args.kwargs assert call_kwargs["timer_id"] == "test_entry_123_autolock" assert call_kwargs["store"] is mock_coordinator._timer_store + # The shared lock must be the coordinator's, not a per-timer one, + # otherwise concurrent persists from different timers can clobber + # each other's entries in the shared store dict. + assert call_kwargs["store_lock"] is mock_coordinator._timer_store_lock async def test_setup_timer_pushes_data_when_timer_resumed(self, mock_coordinator): """Test _setup_timer pushes data to entities when timer resumes from persistence.""" @@ -1369,6 +1373,184 @@ async def test_setup_timer_skips_if_already_setup(self, mock_coordinator): kmlock.autolock_timer.setup.assert_not_called() + async def test_update_lock_detaches_old_timer_before_any_await(self, mock_coordinator): + """Test _update_lock detaches the old timer BEFORE the await chain. + + If detach happens after one of the awaits, the old timer can fire + in the gap and run _timer_triggered against the (about-to-be-) + orphaned kmlock — for example mutating old.pending_retry_lock, + which the replacement never sees. + """ + entry_id = "test_entry_reload" + call_order: list[str] = [] + + old_lock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test", + keymaster_config_entry_id=entry_id, + ) + old_lock.starting_code_slot = 1 + old_lock.number_of_code_slots = 1 + old_lock.code_slots = {1: Mock(accesslimit_day_of_week=None)} + old_lock.autolock_timer = Mock() + + async def record_detach(): + call_order.append("detach") + + old_lock.autolock_timer.detach = AsyncMock(side_effect=record_detach) + + new_lock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test", + keymaster_config_entry_id=entry_id, + ) + new_lock.starting_code_slot = 1 + new_lock.number_of_code_slots = 1 + new_lock.code_slots = {1: Mock(accesslimit_day_of_week=None)} + + mock_coordinator.kmlocks[entry_id] = old_lock + mock_coordinator._initial_setup_done_event.set() + + async def record_unsub(*args, **kwargs): + call_order.append("unsubscribe_listeners") + + async def record_setup(*args, **kwargs): + call_order.append("setup_timer") + + with ( + patch.object( + KeymasterCoordinator, + "_unsubscribe_listeners", + new=AsyncMock(side_effect=record_unsub), + ), + patch.object(mock_coordinator, "_rebuild_lock_relationships", new=AsyncMock()), + patch.object(mock_coordinator, "_update_door_and_lock_state", new=AsyncMock()), + patch.object(mock_coordinator, "_update_listeners", new=AsyncMock()), + patch.object( + mock_coordinator, "_setup_timer", new=AsyncMock(side_effect=record_setup) + ) as mock_setup_timer, + patch.object(mock_coordinator, "async_refresh", new=AsyncMock()), + ): + await mock_coordinator._update_lock(new_lock) + + old_lock.autolock_timer.detach.assert_called_once_with() + mock_setup_timer.assert_called_once() + # Critical ordering: _unsubscribe_listeners runs BEFORE detach so + # in-flight provider/door callbacks can't reach the about-to-be + # detached timer (where start()/cancel() would silently fail or + # no-op). detach must still run before _setup_timer so the new + # timer can resume from the preserved store entry. + assert "unsubscribe_listeners" in call_order + assert "detach" in call_order + assert "setup_timer" in call_order + assert call_order.index("unsubscribe_listeners") < call_order.index("detach") + assert call_order.index("detach") < call_order.index("setup_timer") + # And the new kmlock (now in self.kmlocks) is what _setup_timer received + assert mock_setup_timer.call_args.args[0] is mock_coordinator.kmlocks[entry_id] + + async def test_update_lock_copies_pending_retry_lock(self, mock_coordinator): + """Test _update_lock copies pending_retry_lock from old to new kmlock. + + If autolock was waiting for the door to close (pending_retry_lock=True) + when the user reloaded, the replacement kmlock must inherit that flag + or it will never re-attempt the lock. + """ + entry_id = "test_entry_pending_retry" + + old_lock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test", + keymaster_config_entry_id=entry_id, + ) + old_lock.starting_code_slot = 1 + old_lock.number_of_code_slots = 1 + old_lock.code_slots = {1: Mock(accesslimit_day_of_week=None)} + old_lock.autolock_timer = Mock() + old_lock.autolock_timer.detach = AsyncMock() + old_lock.pending_retry_lock = True # autolock was waiting + + new_lock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test", + keymaster_config_entry_id=entry_id, + ) + new_lock.starting_code_slot = 1 + new_lock.number_of_code_slots = 1 + new_lock.code_slots = {1: Mock(accesslimit_day_of_week=None)} + # Default is False; the copy in _update_lock should flip it to True + + mock_coordinator.kmlocks[entry_id] = old_lock + mock_coordinator._initial_setup_done_event.set() + + with ( + patch.object(KeymasterCoordinator, "_unsubscribe_listeners", new=AsyncMock()), + patch.object(mock_coordinator, "_rebuild_lock_relationships", new=AsyncMock()), + patch.object(mock_coordinator, "_update_door_and_lock_state", new=AsyncMock()), + patch.object(mock_coordinator, "_update_listeners", new=AsyncMock()), + patch.object(mock_coordinator, "_setup_timer", new=AsyncMock()), + patch.object(mock_coordinator, "async_refresh", new=AsyncMock()), + ): + await mock_coordinator._update_lock(new_lock) + + assert mock_coordinator.kmlocks[entry_id].pending_retry_lock is True + + async def test_update_lock_restores_timer_on_inner_failure(self, mock_coordinator): + """Test _update_lock re-attaches the autolock timer if a later step raises. + + Without rollback, an exception in any of the await steps after detach() + leaves the kmlock with no working timer until the next reload, silently + disabling autolock. + """ + entry_id = "test_entry_rollback" + + old_lock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test", + keymaster_config_entry_id=entry_id, + ) + old_lock.starting_code_slot = 1 + old_lock.number_of_code_slots = 1 + old_lock.code_slots = {1: Mock(accesslimit_day_of_week=None)} + old_lock.autolock_timer = Mock() + old_lock.autolock_timer.detach = AsyncMock() + + new_lock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test", + keymaster_config_entry_id=entry_id, + ) + new_lock.starting_code_slot = 1 + new_lock.number_of_code_slots = 1 + new_lock.code_slots = {1: Mock(accesslimit_day_of_week=None)} + + mock_coordinator.kmlocks[entry_id] = old_lock + mock_coordinator._initial_setup_done_event.set() + + with ( + # Make _rebuild_lock_relationships raise so we get past the initial + # _unsubscribe_listeners (so the rollback has something to restore) + patch.object(KeymasterCoordinator, "_unsubscribe_listeners", new=AsyncMock()), + patch.object( + mock_coordinator, + "_rebuild_lock_relationships", + new=AsyncMock(side_effect=RuntimeError("rebuild failed")), + ), + patch.object( + mock_coordinator, "_update_listeners", new=AsyncMock() + ) as mock_update_listeners, + patch.object(mock_coordinator, "_setup_timer", new=AsyncMock()) as mock_setup_timer, + pytest.raises(RuntimeError, match="rebuild failed"), + ): + await mock_coordinator._update_lock(new_lock) + + # Rollback must restore BOTH listeners and timer on the kmlock + # currently in self.kmlocks, otherwise autolock and event-driven + # state changes are silently disabled. + mock_update_listeners.assert_called_once() + assert mock_update_listeners.call_args.args[0] is mock_coordinator.kmlocks[entry_id] + mock_setup_timer.assert_called_once() + assert mock_setup_timer.call_args.args[0] is mock_coordinator.kmlocks[entry_id] + # ============================================================================ # State Synchronization Tests diff --git a/tests/test_helpers.py b/tests/test_helpers.py index bd7cece3..3a0a5491 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -1,5 +1,6 @@ """Test keymaster helpers.""" +import asyncio from datetime import timedelta from unittest.mock import AsyncMock, MagicMock, patch @@ -193,6 +194,12 @@ def mock_store(): return store +@pytest.fixture +def store_lock(): + """Provide a fresh asyncio.Lock for timer.setup().""" + return asyncio.Lock() + + # KeymasterTimer tests async def test_keymaster_timer_init(): """Test KeymasterTimer initialization.""" @@ -227,7 +234,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) assert timer.hass is hass assert timer._kmlock is kmlock @@ -265,7 +274,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) # Mock sun.is_up to return True (daytime) with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): @@ -299,7 +310,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) # Mock sun.is_up to return False (nighttime) with patch("custom_components.keymaster.helpers.sun.is_up", return_value=False): @@ -332,7 +345,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) # Start timer first time with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): @@ -368,7 +383,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) # Start timer with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): @@ -406,7 +423,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) # Before starting assert timer.is_setup @@ -494,7 +513,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) # Start timer with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): @@ -523,7 +544,9 @@ async def mock_callback(*args): store = AsyncMock() store.async_load = AsyncMock(return_value={}) store.async_save = AsyncMock() - await timer.setup(hass, kmlock, mock_callback, timer_id="test_timer", store=store) + await timer.setup( + hass, kmlock, mock_callback, timer_id="test_timer", store=store, store_lock=asyncio.Lock() + ) unsub = MagicMock() timer._end_time = dt_util.utcnow() - timedelta(seconds=1) @@ -544,7 +567,7 @@ async def mock_callback(*args): assert len(timer._unsub_events) == 1 -async def test_keymaster_timer_setup_recovers_expired_timer(hass, mock_store): +async def test_keymaster_timer_setup_recovers_expired_timer(hass, mock_store, store_lock): """Test setup() fires action immediately when persisted timer has expired (issue #594).""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -564,7 +587,9 @@ async def mock_action(*args): nonlocal action_called action_called = True - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) await hass.async_block_till_done() # Expired timer should have fired the action @@ -579,7 +604,7 @@ async def mock_action(*args): assert not timer.is_running -async def test_keymaster_timer_setup_resumes_active_timer(hass, mock_store): +async def test_keymaster_timer_setup_resumes_active_timer(hass, mock_store, store_lock): """Test setup() resumes timer when persisted timer is still active.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -596,7 +621,9 @@ async def test_keymaster_timer_setup_resumes_active_timer(hass, mock_store): async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) # Timer should be running with the persisted end_time assert timer.is_running @@ -604,7 +631,7 @@ async def mock_action(*args): assert len(timer._unsub_events) == 1 -async def test_keymaster_timer_setup_no_persisted_timer(hass, mock_store): +async def test_keymaster_timer_setup_no_persisted_timer(hass, mock_store, store_lock): """Test setup() does nothing when no persisted timer exists.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -616,14 +643,16 @@ async def test_keymaster_timer_setup_no_persisted_timer(hass, mock_store): async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) # Timer should be set up but not running assert timer.is_setup assert not timer.is_running -async def test_keymaster_timer_start_persists_to_store(hass, mock_store): +async def test_keymaster_timer_start_persists_to_store(hass, mock_store, store_lock): """Test start() persists end_time to the store.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -636,7 +665,9 @@ async def test_keymaster_timer_start_persists_to_store(hass, mock_store): async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): await timer.start() @@ -649,7 +680,7 @@ async def mock_action(*args): assert saved_data["test_timer"]["duration"] == 300 -async def test_keymaster_timer_cancel_removes_from_store(hass, mock_store): +async def test_keymaster_timer_cancel_removes_from_store(hass, mock_store, store_lock): """Test cancel() removes the timer from the store.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -662,7 +693,9 @@ async def test_keymaster_timer_cancel_removes_from_store(hass, mock_store): async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): await timer.start() @@ -699,7 +732,7 @@ def test_async_has_supported_provider_no_args(hass): assert result is False -async def test_keymaster_timer_setup_invalid_end_time_format(hass, mock_store): +async def test_keymaster_timer_setup_invalid_end_time_format(hass, mock_store, store_lock): """Test setup() handles corrupt end_time string in persisted data.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -718,7 +751,9 @@ async def mock_action(*args): nonlocal action_called action_called = True - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) # Action should NOT have fired assert action_called is False @@ -730,7 +765,7 @@ async def mock_action(*args): assert "test_timer" not in saved_data -async def test_keymaster_timer_setup_missing_end_time_key(hass, mock_store): +async def test_keymaster_timer_setup_missing_end_time_key(hass, mock_store, store_lock): """Test setup() handles persisted data with missing end_time key.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -745,7 +780,9 @@ async def test_keymaster_timer_setup_missing_end_time_key(hass, mock_store): async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) assert not timer.is_running # Invalid entry should be removed from store @@ -754,7 +791,7 @@ async def mock_action(*args): assert "test_timer" not in saved_data -async def test_keymaster_timer_setup_null_end_time(hass, mock_store): +async def test_keymaster_timer_setup_null_end_time(hass, mock_store, store_lock): """Test setup() handles persisted data with None end_time.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -770,7 +807,9 @@ async def test_keymaster_timer_setup_null_end_time(hass, mock_store): async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) assert not timer.is_running mock_store.async_save.assert_called() @@ -802,7 +841,1019 @@ async def mock_action(*args): await timer._persist_to_store() -async def test_keymaster_timer_remove_from_store_missing_key(hass, mock_store): +async def test_keymaster_timer_concurrent_persist_and_cancel(hass, mock_store, store_lock): + """Test concurrent _persist_to_store and cancel() don't crash and cancel wins. + + Reproduces the original race: persist passes its initial guard, awaits + async_load, and during that yield cancel() runs. With the asyncio.Lock, + cancel must wait for persist to release the lock — so persist completes + cleanly (no AttributeError) and cancel runs strictly after, leaving the + final store state with the entry removed. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + # Simulate real store: load returns whatever was last saved + store_state: dict = {} + saved_states: list[dict] = [] + + async def record_save(data): + nonlocal store_state + store_state = dict(data) + saved_states.append(dict(data)) + + mock_store.async_save = AsyncMock(side_effect=record_save) + + # Block async_load on an event so persist gets stuck after acquiring the lock. + # Once the event is set, persist completes; cancel (queued behind the lock) + # then runs and removes the entry. + load_release = asyncio.Event() + persist_load_started = asyncio.Event() + load_call_count = 0 + + async def blocking_load(): + nonlocal load_call_count + load_call_count += 1 + if load_call_count == 1: + # First call is from persist — block until cancel has been invoked + persist_load_started.set() + await load_release.wait() + return dict(store_state) + + mock_store.async_load = AsyncMock(side_effect=blocking_load) + + # Prime timer state as start() would + timer._end_time = dt_util.utcnow() + timedelta(minutes=5) + timer._duration = 300 + + # Kick off persist; it will acquire the lock and block on async_load + persist_task = asyncio.create_task(timer._persist_to_store()) + await persist_load_started.wait() + + # Now invoke cancel concurrently — it must wait for the lock + cancel_task = asyncio.create_task(timer.cancel()) + # Yield to let cancel attempt to acquire the lock and block + await asyncio.sleep(0) + assert not cancel_task.done(), "cancel should be blocked waiting for the store lock" + + # Release persist; it should save, then cancel acquires lock and removes + load_release.set() + await asyncio.gather(persist_task, cancel_task) + + # No crash, persist saved the entry, cancel then removed it + assert len(saved_states) == 2 + assert "test_timer" in saved_states[0], "persist should have saved the entry" + assert "test_timer" not in saved_states[1], "cancel should have removed the entry" + + +async def test_keymaster_timer_shared_lock_prevents_cross_timer_update_loss( + hass, mock_store, store_lock +): + """Test two timers sharing a store lock don't drop each other's persisted entries. + + Without a shared lock, timer A and timer B can both load the store dict + concurrently, each mutate their own key, and the later async_save() will + overwrite the earlier write. The shared lock forces serialization so + both entries land in the final saved state. + """ + shared_lock = asyncio.Lock() + + # Simulate the real Store: load returns whatever was last saved + store_state: dict = {} + saved_states: list[dict] = [] + + async def record_save(data): + nonlocal store_state + store_state = dict(data) + saved_states.append(dict(data)) + + # Snapshot store_state on entry, then yield. With the shared lock, only + # one persist enters at a time, so each snapshot reflects the previous + # save. Without the lock, both persists snapshot the empty pre-write + # state and the second save clobbers the first. + async def snapshotting_load(): + snapshot = dict(store_state) + await asyncio.sleep(0) + return snapshot + + async def mock_action(*args): + pass + + timer_a = KeymasterTimer() + timer_b = KeymasterTimer() + kmlock_a = KeymasterLock( + lock_name="lock_a", lock_entity_id="lock.a", keymaster_config_entry_id="entry_a" + ) + kmlock_b = KeymasterLock( + lock_name="lock_b", lock_entity_id="lock.b", keymaster_config_entry_id="entry_b" + ) + await timer_a.setup( + hass, kmlock_a, mock_action, timer_id="timer_a", store=mock_store, store_lock=shared_lock + ) + await timer_b.setup( + hass, kmlock_b, mock_action, timer_id="timer_b", store=mock_store, store_lock=shared_lock + ) + + # Swap the mocks AFTER setup so the barrier only counts persist's loads + mock_store.async_save = AsyncMock(side_effect=record_save) + mock_store.async_load = AsyncMock(side_effect=snapshotting_load) + + timer_a._end_time = dt_util.utcnow() + timedelta(minutes=5) + timer_a._duration = 300 + timer_b._end_time = dt_util.utcnow() + timedelta(minutes=10) + timer_b._duration = 600 + + # Persist both concurrently — without a shared lock, one would overwrite the other + await asyncio.gather(timer_a._persist_to_store(), timer_b._persist_to_store()) + + # Final saved state must contain BOTH entries + assert "timer_a" in store_state + assert "timer_b" in store_state + + +async def test_keymaster_timer_detach_preserves_store(hass, mock_store, store_lock): + """Test detach() cancels callbacks and clears refs but preserves the store entry. + + detach() is used when a kmlock is being replaced (config entry reload). The + replacement's timer must be able to resume from the store, so detach must + NOT call _remove_from_store (unlike cancel()). + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + + # Sanity: timer has a scheduled callback before detach + assert len(timer._unsub_events) == 1 + saved_end_time = timer._end_time + saved_duration = timer._duration + + # The persist from start() already wrote the entry; make load reflect that + # so detach's force-persist check sees the entry exists and skips the write. + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": saved_end_time.isoformat(), "duration": 300}} + ) + mock_store.async_save.reset_mock() + + await timer.detach() + + # Callbacks unsubscribed and kmlock binding cleared + assert timer._unsub_events == [] + assert timer.hass is None + assert timer._kmlock is None + assert timer._call_action is None + assert timer._detached is True + # _end_time/_duration are PRESERVED so an in-flight _persist_to_store can + # still write the entry under the lock (otherwise a start() racing with + # reload would silently lose the autolock state). + assert timer._end_time == saved_end_time + assert timer._duration == saved_duration + # Store entry already present, detach's force-persist skipped the write + mock_store.async_save.assert_not_called() + + +async def test_keymaster_timer_detach_awaits_recovery_action(hass, mock_store, store_lock): + """Test detach() awaits an in-flight setup() recovery action. + + Without this, a reload during recovery could copy the old kmlock's + state before _timer_triggered finishes mutating it (e.g. setting + pending_retry_lock=True on door-open), losing those mutations. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + + action_started = asyncio.Event() + action_release = asyncio.Event() + action_completed = False + + async def slow_action(*args): + nonlocal action_completed + action_started.set() + await action_release.wait() + action_completed = True + + expired_end_time = (dt_util.utcnow() - timedelta(minutes=5)).isoformat() + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": expired_end_time, "duration": 300}} + ) + + # setup() schedules the recovery as a tracked task + await timer.setup( + hass, kmlock, slow_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + await action_started.wait() + assert not action_completed + + # detach must wait for the recovery to complete + detach_task = asyncio.create_task(timer.detach()) + await asyncio.sleep(0) + assert not detach_task.done(), "detach must wait for in-flight recovery action" + + action_release.set() + await detach_task + + assert action_completed + + +async def test_keymaster_timer_detach_propagates_force_persist_failure( + hass, mock_store, store_lock +): + """Test detach() does NOT swallow force-persist failures. + + When the timer state exists only in memory, a storage failure means + the replacement comes up empty. Failing loud lets _update_lock's + rollback path attempt recovery instead of silently disabling autolock. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + + # Make load empty so force-persist would try to write, then make save raise + mock_store.async_load = AsyncMock(return_value={}) + mock_store.async_save = AsyncMock(side_effect=OSError("transient disk error")) + + with pytest.raises(OSError, match="transient disk error"): + await timer.detach() + + +async def test_keymaster_timer_detach_force_persists_when_store_empty(hass, mock_store, store_lock): + """Test detach() writes in-memory state to disk if the queued persist hasn't run. + + Scenario: start() called just before reload sets _end_time and queues + a _persist_to_store behind the shared lock (held by some other op). + detach() runs before that queued persist. Without force-persist, the + REPLACEMENT timer (a fresh KeymasterTimer instance on the new kmlock) + would load an empty store and the autolock would be silently lost. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + + # Simulate the queued persist NOT having run: store is empty + mock_store.async_load = AsyncMock(return_value={}) + mock_store.async_save.reset_mock() + saved_end_time = timer._end_time + + await timer.detach() + + mock_store.async_save.assert_called_once() + saved_data = mock_store.async_save.call_args[0][0] + assert "test_timer" in saved_data + assert saved_data["test_timer"]["end_time"] == saved_end_time.isoformat() + + +async def test_keymaster_timer_cancel_after_detach_is_noop(hass, mock_store, store_lock): + """Test cancel() on a detached timer doesn't touch the store. + + Protects against an _on_expired coroutine that was already scheduled when + detach() ran: its `await self.cancel()` must not remove the store entry + that the replacement timer is about to resume from. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + + # Make the store actually contain the persisted entry so cancel() would + # otherwise remove it + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": "2099-01-01T00:00:00+00:00", "duration": 300}} + ) + await timer.detach() + mock_store.async_save.reset_mock() + + # Simulating an in-flight _on_expired calling cancel after detach + await timer.cancel() + mock_store.async_save.assert_not_called() + + +async def test_keymaster_timer_setup_load_under_lock(hass, mock_store, store_lock): + """Test setup()'s load+resume runs under the store lock. + + If setup() loaded outside the lock, a config entry reload could let the + new timer read an empty store while the outgoing timer's persist was + still queued, silently losing autolock state. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + # Acquire the lock before setup; if setup loads under the lock it must wait + async with store_lock: + setup_task = asyncio.create_task( + timer.setup( + hass, + kmlock, + mock_action, + timer_id="test_timer", + store=mock_store, + store_lock=store_lock, + ) + ) + # Yield to let setup run as far as it can + await asyncio.sleep(0) + assert not setup_task.done(), "setup should be blocked on the store lock" + # async_load must NOT have been called yet — it's inside the lock + mock_store.async_load.assert_not_called() + + # Lock released; setup should now finish + await setup_task + mock_store.async_load.assert_called() + + +async def test_keymaster_timer_on_expired_action_before_remove(hass, mock_store, store_lock): + """Test _on_expired fires the action BEFORE store cleanup. + + Action is the safety-critical operation; if storage cleanup raises + (e.g. transient async_load/async_save failure), the lock must still + have been locked. detach() awaits this task so the action can complete + even if reload races, and the in-flight callback removes the entry + before it returns so the replacement won't replay. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + call_order: list[str] = [] + + async def mock_action(*args): + call_order.append("action") + + real_save = mock_store.async_save + + async def tracking_save(data): + if "test_timer" not in data: + call_order.append("remove") + await real_save(data) + + mock_store.async_save = AsyncMock(side_effect=tracking_save) + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + with patch("custom_components.keymaster.helpers.async_call_later") as mock_call_later: + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + callback_fn = mock_call_later.call_args[1]["action"] + + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": timer._end_time.isoformat(), "duration": 300}} + ) + + await callback_fn(dt_util.utcnow()) + + # Action must fire before remove so storage failures can't block the lock + assert call_order == ["action", "remove"] + + +async def test_keymaster_timer_on_expired_action_fires_even_if_remove_raises( + hass, mock_store, store_lock +): + """Test the action fires even if store cleanup raises. + + Action is safety-critical (locks the door). A transient storage error + must not prevent it. The next restart will replay the timer (entry + still in store), which is acceptable. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + action_called = False + + async def mock_action(*args): + nonlocal action_called + action_called = True + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + with patch("custom_components.keymaster.helpers.async_call_later") as mock_call_later: + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + callback_fn = mock_call_later.call_args[1]["action"] + + # Make remove fail by having async_load raise + mock_store.async_load = AsyncMock(side_effect=OSError("transient disk error")) + + # Callback should propagate the error but action should have fired first + with pytest.raises(OSError): + await callback_fn(dt_util.utcnow()) + + assert action_called, "action must fire even if storage cleanup fails" + + +async def test_keymaster_timer_on_expired_preserves_store_when_action_raises( + hass, mock_store, store_lock +): + """Test the store entry is preserved when the action raises. + + The lock action is safety-critical. If it fails (e.g. lock service + raised), the door is still unlocked — we MUST preserve the store + entry so the timer replays on the next HA restart, otherwise the + autolock state is silently lost forever. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def failing_action(*args): + raise RuntimeError("lock service failed") + + await timer.setup( + hass, kmlock, failing_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + with patch("custom_components.keymaster.helpers.async_call_later") as mock_call_later: + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + callback_fn = mock_call_later.call_args[1]["action"] + + # Load returns the entry, so a remove call would actually save + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": timer._end_time.isoformat(), "duration": 300}} + ) + mock_store.async_save.reset_mock() + + # Should not raise (action exception is caught), but should NOT remove from store + await callback_fn(dt_util.utcnow()) + + # Critical: no save call means the store entry was NOT removed + mock_store.async_save.assert_not_called() + + +async def test_keymaster_timer_on_expired_skipped_after_detach(hass, mock_store, store_lock): + """Test _on_expired is a no-op if detach() ran before it could fire. + + When the timer was queued to fire (async_call_later put _on_expired in + the run queue) but detach ran first, _on_expired must NOT fire the + action — the replacement timer will resume from the preserved store + entry and fire it instead. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + action_called = False + + async def mock_action(*args): + nonlocal action_called + action_called = True + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + with patch("custom_components.keymaster.helpers.async_call_later") as mock_call_later: + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + callback_fn = mock_call_later.call_args[1]["action"] + + # Detach BEFORE the callback fires (simulating reload) + await timer.detach() + mock_store.async_save.reset_mock() + + await callback_fn(dt_util.utcnow()) + + assert not action_called, "action must not fire on a detached timer" + mock_store.async_save.assert_not_called() + + +async def test_keymaster_timer_persist_after_detach_still_saves(hass, mock_store, store_lock): + """Test _persist_to_store after detach() still writes the entry. + + detach() preserves _end_time/_duration so an in-flight persist queued + behind the store lock will still save when it eventually acquires it. + Without this, a start() racing with config entry reload would silently + lose the autolock — the replacement timer would load an empty store. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + + await timer.detach() + mock_store.async_save.reset_mock() + + # Persist should still save the preserved end_time/duration + await timer._persist_to_store() + mock_store.async_save.assert_called_once() + saved_data = mock_store.async_save.call_args[0][0] + assert "test_timer" in saved_data + + +async def test_keymaster_timer_detach_awaits_in_flight_callback(hass, mock_store, store_lock): + """Test detach() awaits any in-flight _on_expired before returning. + + This ensures _update_lock observes whatever mutations the action made + on the old kmlock (e.g. pending_retry_lock) before it copies state to + the replacement. Without the await, detach would return while the + action was still running and the state copy would miss the mutation. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + action_started = asyncio.Event() + action_release = asyncio.Event() + action_completed = False + + async def slow_action(*args): + nonlocal action_completed + action_started.set() + await action_release.wait() + action_completed = True + + await timer.setup( + hass, kmlock, slow_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + with patch("custom_components.keymaster.helpers.async_call_later") as mock_call_later: + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + callback_fn = mock_call_later.call_args[1]["action"] + + # Make load return the entry so remove will save (and reach the action) + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": timer._end_time.isoformat(), "duration": 300}} + ) + + # Kick off the callback; it'll block inside slow_action + expire_task = asyncio.create_task(callback_fn(dt_util.utcnow())) + await action_started.wait() + assert not action_completed + + # Now start detach; it must wait for the in-flight callback + detach_task = asyncio.create_task(timer.detach()) + await asyncio.sleep(0) + assert not detach_task.done(), "detach must wait for in-flight callback" + + # Release the action, both should complete + action_release.set() + await asyncio.gather(expire_task, detach_task) + + assert action_completed + assert timer.hass is None # detach finished its cleanup + + +async def test_keymaster_timer_on_expired_remove_runs_when_detach_races( + hass, mock_store, store_lock +): + """Test _on_expired removes the store entry even if detach races. + + The in-flight callback owns the store entry and must remove it before + completing — otherwise the replacement timer's setup() would replay + the action. detach() awaits the callback so this remove always runs. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + action_started = asyncio.Event() + action_release = asyncio.Event() + save_calls: list[dict] = [] + + async def slow_action(*args): + action_started.set() + await action_release.wait() + + async def record_save(data): + save_calls.append(dict(data)) + + mock_store.async_save = AsyncMock(side_effect=record_save) + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": "2099-01-01T00:00:00+00:00", "duration": 300}} + ) + + await timer.setup( + hass, kmlock, slow_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + with patch("custom_components.keymaster.helpers.async_call_later") as mock_call_later: + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + callback_fn = mock_call_later.call_args[1]["action"] + + save_calls.clear() # ignore start()'s persist + expire_task = asyncio.create_task(callback_fn(dt_util.utcnow())) + await action_started.wait() + + # Reload arrives mid-action; detach must wait for the callback to finish + detach_task = asyncio.create_task(timer.detach()) + await asyncio.sleep(0) + assert not detach_task.done() + + action_release.set() + await asyncio.gather(expire_task, detach_task) + + # Final save must be the cleanup with entry removed + assert save_calls, "remove must have called async_save" + assert "test_timer" not in save_calls[-1], "store entry must be removed" + + +async def test_keymaster_timer_setup_recovery_removes_entry_atomically( + hass, mock_store, store_lock +): + """Test setup() recovery removes the expired entry under the lock. + + Two setup() calls on the same timer_id (e.g. during reload/rollback) + must not both schedule the recovery action. Removing the entry + atomically with detection ensures the second setup sees no entry + and doesn't schedule a duplicate firing. + """ + expired_end_time = (dt_util.utcnow() - timedelta(minutes=5)).isoformat() + # Stateful store: load returns whatever is currently saved + state: dict = {"test_timer": {"end_time": expired_end_time, "duration": 300}} + + async def stateful_load(): + return dict(state) + + async def stateful_save(data): + state.clear() + state.update(data) + + mock_store.async_load = AsyncMock(side_effect=stateful_load) + mock_store.async_save = AsyncMock(side_effect=stateful_save) + + action_call_count = 0 + + async def mock_action(*args): + nonlocal action_call_count + action_call_count += 1 + + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + + # First setup discovers expired entry, claims (removes), schedules action + timer1 = KeymasterTimer() + await timer1.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + # Second setup on the SAME timer_id must see no entry and skip scheduling + timer2 = KeymasterTimer() + await timer2.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + await hass.async_block_till_done() + + assert action_call_count == 1, "recovery action must fire exactly once" + assert "test_timer" not in state, "entry must be removed by the recovering setup" + + +async def test_keymaster_timer_setup_resets_detached_flag(hass, mock_store, store_lock): + """Test setup() resets _detached so the rebound instance's cancel() works. + + Without this reset, cancel() would silently no-op on the rebound timer + (used by _update_lock's rollback path). + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + await timer.detach() + assert timer._detached is True + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + assert timer._detached is False + + +async def test_keymaster_timer_setup_rescues_preserved_state(hass, mock_store, store_lock): + """Test setup() rescues a preserved _end_time when the store has no entry. + + Scenario: start() called just before reload sets _end_time and queues + a persist behind the shared store lock. detach() preserves _end_time + but doesn't await the persist. If the queued persist hadn't run by + the time setup() acquires the lock, the store would still be empty + — but the in-memory _end_time is the valid pending autolock state. + setup() must claim that state (write it back, _resume from it) so + the autolock isn't silently dropped. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): + await timer.start() + preserved_end_time = timer._end_time + preserved_duration = timer._duration + await timer.detach() + assert timer._end_time == preserved_end_time + assert timer._duration == preserved_duration + + # Simulate the queued persist never having written: store returns empty + mock_store.async_load = AsyncMock(return_value={}) + mock_store.async_save.reset_mock() + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + # Rescue: the preserved state was claimed (saved + resumed) + mock_store.async_save.assert_called_once() + saved = mock_store.async_save.call_args[0][0] + assert "test_timer" in saved + assert timer.is_running + + +async def test_keymaster_timer_setup_rescues_expired_state_without_resurrecting( + hass, mock_store, store_lock +): + """Test setup()'s rescue path takes recovery for an already-expired _end_time. + + If reload took longer than the autolock delay, the preserved _end_time + is now in the past. We must NOT write it back to disk + _resume — that + would leave an expired entry that a subsequent reload could replay. + Instead, fire the recovery action immediately and leave the store clean. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + action_call_count = 0 + + async def mock_action(*args): + nonlocal action_call_count + action_call_count += 1 + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + # Manually set an in-the-past _end_time and detach so it's preserved + timer._end_time = dt_util.utcnow() - timedelta(seconds=5) + timer._duration = 300 + # Skip the async_load that would interfere; we want detach's force-persist + # to NOT write (entry already there in our mock) + mock_store.async_load = AsyncMock( + return_value={"test_timer": {"end_time": timer._end_time.isoformat(), "duration": 300}} + ) + await timer.detach() + # Now empty the store and re-setup: the rescue should detect expiry + mock_store.async_load = AsyncMock(return_value={}) + mock_store.async_save.reset_mock() + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + await hass.async_block_till_done() + + assert action_call_count == 1, "expired-rescue must fire action once" + mock_store.async_save.assert_not_called() # no resurrected entry on disk + assert timer._end_time is None + assert timer._duration is None + + +async def test_keymaster_timer_setup_clears_state_when_no_pending(hass, mock_store, store_lock): + """Test setup() clears stale fields when nothing to rescue. + + If the timer was idle (no _end_time) before detach AND store is + empty, setup() must leave fields cleared so is_running doesn't + lie. (Detach only preserves what was set, but a never-started + timer that gets detached and re-setup should look idle.) + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + # Manually leave stale state to simulate a degenerate detach + timer._end_time = None + timer._duration = None + await timer.detach() + mock_store.async_load = AsyncMock(return_value={}) + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + assert timer._end_time is None + assert timer._duration is None + assert not timer.is_running + + +async def test_keymaster_timer_persist_raises_if_store_lock_missing(hass, mock_store): + """Test _persist_to_store raises RuntimeError when _store_lock is missing. + + setup() always sets _store_lock; if it didn't run, persisting would + silently lose the autolock state across restarts. The raise converts + a silent failure into a loud one. + """ + timer = KeymasterTimer() + timer._store = mock_store + timer._timer_id = "test_timer" + timer._end_time = dt_util.utcnow() + timedelta(minutes=5) + # _store_lock intentionally NOT set + + with pytest.raises(RuntimeError, match="store_lock missing"): + await timer._persist_to_store() + + +async def test_keymaster_timer_remove_raises_if_store_lock_missing(hass, mock_store): + """Test _remove_from_store raises RuntimeError when _store_lock is missing. + + Without raising, a stale entry could remain and fire a phantom + autolock on the next restart. + """ + timer = KeymasterTimer() + timer._store = mock_store + timer._timer_id = "test_timer" + # _store_lock intentionally NOT set + + with pytest.raises(RuntimeError, match="store_lock missing"): + await timer._remove_from_store() + + +async def test_keymaster_timer_persist_recheck_aborts_after_cancel(hass, mock_store, store_lock): + """Test _persist_to_store's post-lock recheck aborts when cancel nulled _end_time. + + Scenario: persist is queued behind another lock holder. While queued, + cancel() nulls _end_time. When persist acquires the lock, the recheck + must catch the now-None _end_time and bail out without saving. + """ + timer = KeymasterTimer() + kmlock = KeymasterLock( + lock_name="test_lock", + lock_entity_id="lock.test_lock", + keymaster_config_entry_id="test_entry", + ) + kmlock.autolock_min_day = 5 + + async def mock_action(*args): + pass + + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) + + timer._end_time = dt_util.utcnow() + timedelta(minutes=5) + timer._duration = 300 + mock_store.async_save.reset_mock() + + # Hold the lock externally so persist queues behind us + async with store_lock: + persist_task = asyncio.create_task(timer._persist_to_store()) + # Yield to let persist enter, hit the pre-guard (passes), and block on lock + await asyncio.sleep(0) + assert not persist_task.done(), "persist should be blocked on the lock" + # Simulate cancel() nulling _end_time while persist waits + timer._end_time = None + timer._duration = None + + # Lock released; persist resumes, recheck sees None, returns without saving + await persist_task + mock_store.async_save.assert_not_called() + + +async def test_keymaster_timer_remove_from_store_missing_key(hass, mock_store, store_lock): """Test _remove_from_store is a no-op when timer_id is not in the store.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -819,7 +1870,9 @@ async def test_keymaster_timer_remove_from_store_missing_key(hass, mock_store): async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) mock_store.async_save.reset_mock() @@ -830,7 +1883,9 @@ async def mock_action(*args): mock_store.async_save.assert_not_called() -async def test_keymaster_timer_setup_duration_missing_defaults_to_zero(hass, mock_store): +async def test_keymaster_timer_setup_duration_missing_defaults_to_zero( + hass, mock_store, store_lock +): """Test setup() defaults duration to 0 when missing from persisted data.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -845,13 +1900,15 @@ async def test_keymaster_timer_setup_duration_missing_defaults_to_zero(hass, moc async def mock_action(*args): pass - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) assert timer.is_running assert timer._duration == 0 -async def test_keymaster_timer_on_expired_callback(hass, mock_store): +async def test_keymaster_timer_on_expired_callback(hass, mock_store, store_lock): """Test the _on_expired callback fires the action then cancels the timer.""" timer = KeymasterTimer() kmlock = KeymasterLock( @@ -867,7 +1924,9 @@ async def mock_action(now): nonlocal action_called_with action_called_with = now - await timer.setup(hass, kmlock, mock_action, timer_id="test_timer", store=mock_store) + await timer.setup( + hass, kmlock, mock_action, timer_id="test_timer", store=mock_store, store_lock=store_lock + ) with patch("custom_components.keymaster.helpers.sun.is_up", return_value=True): await timer.start() @@ -907,7 +1966,7 @@ async def test_keymaster_timer_remove_from_store_no_store(hass): await timer._remove_from_store() -async def test_keymaster_timer_remove_from_store_no_timer_id(hass, mock_store): +async def test_keymaster_timer_remove_from_store_no_timer_id(hass, mock_store, store_lock): """Test _remove_from_store is a no-op when timer_id is None.""" timer = KeymasterTimer() timer._store = mock_store