diff --git a/appdaemon/app_management.py b/appdaemon/app_management.py index 8b5efbfde..a571b3fda 100644 --- a/appdaemon/app_management.py +++ b/appdaemon/app_management.py @@ -1,6 +1,8 @@ import asyncio import contextlib +import copy import cProfile +import functools import importlib import inspect import io @@ -12,8 +14,7 @@ import threading import traceback from collections import OrderedDict -from collections.abc import AsyncGenerator, Iterable -import copy +from collections.abc import AsyncGenerator, Generator, Iterable from functools import partial, reduce, wraps from logging import Logger from pathlib import Path @@ -76,10 +77,6 @@ class AppManagement: """Dictionary that maps full module names to sets of those that depend on them """ - app_config: AllAppConfig - """Keeps track of which module and class each app comes from, along with any associated global modules. Gets set at the end of :meth:`~appdaemon.app_management.AppManagement.check_config`. - """ - active_apps_sensor: str = "sensor.active_apps" inactive_apps_sensor: str = "sensor.inactive_apps" total_apps_sensor: str = "sensor.total_apps" @@ -111,8 +108,6 @@ def __init__(self, ad: "AppDaemon"): for service in services: register(service=service) - self.mtimes_python = FileCheck() - self.active_apps = set() self.inactive_apps = set() @@ -136,6 +131,7 @@ def module_dependencies(self) -> dict[str, set[str]]: @property def app_config(self) -> AllAppConfig: + """Keeps track of which module and class each app comes from, along with any associated global modules.""" return self.dependency_manager.app_deps.app_config @property @@ -143,7 +139,10 @@ def running_apps(self) -> set[str]: return set(app_name for app_name, mo in self.objects.items() if mo.running) def is_app_running(self, app_name: str) -> bool: - return (mo := self.objects.get(app_name, False)) and mo.running + match self.objects.get(app_name): + case ManagedObject(type="app", running=bool(running)): + return running + return False @property def loaded_globals(self) -> set[str]: @@ -155,7 +154,10 @@ def loaded_globals(self) -> set[str]: @property def sequence_config(self) -> SequenceConfig | None: - return self.app_config.root.get("sequence") + match self.app_config.root.get("sequence"): + case SequenceConfig() as seq_cfg: + return seq_cfg + raise KeyError("No sequence configuration found") @property def valid_apps(self) -> set[str]: @@ -236,14 +238,26 @@ async def add_entity(self, name: str, state, attributes): async def remove_entity(self, name: str): await self.AD.state.remove_entity("admin", f"app.{name}") - def app_rel_path(self, app_name: str) -> Path: - return self.app_config.root[app_name].config_path.relative_to(self.AD.app_dir.parent) - - def err_app_path(self, app_obj: object) -> Path: - module_path = Path(sys.modules[app_obj.__module__].__file__) - if module_path.is_relative_to(self.AD.app_dir.parent): - return module_path.relative_to(self.AD.app_dir.parent) - return module_path + def app_cfg_rel_path(self, app_name: str) -> Path: + """Get a Path object to the config file for the app, relative to the apps directory.""" + match self.app_config.root.get(app_name): + case AppConfig(config_path=Path() as cfg_path): + if cfg_path.is_relative_to(self.AD.app_dir.parent): + return cfg_path.relative_to(self.AD.app_dir.parent) + return cfg_path + raise KeyError("No config path for app %s", app_name) + + def app_module_rel_path(self, app_obj: object) -> Path: + """Get a Path object to the module file for the app, relative to the apps directory. + + This uses the loaded python modules form the ``sys.modules`` dict.""" + match sys.modules[app_obj.__module__].__file__: + case str(file): + module_path = Path(file) + if module_path.is_relative_to(self.AD.app_dir.parent): + return module_path.relative_to(self.AD.app_dir.parent) + return module_path + raise KeyError("No module path for app object %s", app_obj) async def init_admin_stats(self): # create sensors @@ -288,22 +302,36 @@ def get_app(self, name: str): def get_app_info(self, name: str): return self.objects.get(name) - def get_app_instance(self, name: str, id): - if (obj := self.objects.get(name)) and obj.id == id: - return obj.object + def get_app_instance(self, name: str, id: str): + match self.objects.get(name): + case ManagedObject(type="app", object=obj, id=str(oid)) if oid == id: + return obj def get_app_pin(self, name: str) -> bool: - return self.objects[name].pin_app + match self.objects.get(name): + case ManagedObject(type="app", pin_app=bool(pin)): + return pin + return False - def set_app_pin(self, name: str, pin: bool): + def set_app_pin(self, name: str, pin: bool) -> None: self.objects[name].pin_app = pin - utils.run_coroutine_threadsafe( - self, - self.AD.threading.calculate_pin_threads(), - ) - def get_pin_thread(self, name: str) -> int: - return self.objects[name].pin_thread + def get_pin_thread(self, name: str) -> int | None: + match self.objects.get(name): + case ManagedObject(type="app", pin_app=True, pin_thread=int(pin_thread)): + return pin_thread + return None + + def pinned_apps(self) -> Generator[str]: + """Returns the number of pinned apps currently managed.""" + for app_name, obj in self.objects.items(): + match obj: + case ManagedObject(type="app", pin_app=True): + yield app_name + + def pinned_app_count(self) -> int: + """Returns the number of pinned apps currently managed.""" + return len(list(self.pinned_apps())) def set_pin_thread(self, name: str, thread: int): self.objects[name].pin_thread = thread @@ -313,7 +341,7 @@ async def initialize_app(self, app_name: str): app_obj = self.objects[app_name].object # Get the path that will be used for the exception - err_path = self.err_app_path(app_obj) + err_path = self.app_module_rel_path(app_obj) try: init_func = app_obj.initialize @@ -415,7 +443,7 @@ async def start_app(self, app_name: str): # assert dependencies dependencies = app_cfg.dependencies for dep_name in dependencies: - rel_path = self.app_rel_path(app_name) + rel_path = self.app_cfg_rel_path(app_name) exc_args = ( app_name, rel_path, @@ -525,20 +553,47 @@ async def safe_create(self: "AppManagement"): module_name, ) - if (pin := cfg.pin_thread) is not None and pin >= self.AD.threading.total_threads: - raise ade.PinOutofRange(pin_thread=pin, total_threads=self.AD.threading.total_threads) - if (obj := self.objects.get(app_name)) and obj.pin_thread is not None: - pin = obj.pin_thread - # else pin is already None from cfg.pin_thread + # Deal with the thread pinning settings + if self.AD.config.fully_async: + pin_thread = None + should_be_pinned = False + else: + should_be_pinned = cfg.pin_app if cfg.pin_app is not None else self.AD.config.pin_apps + + # This happens if you try to pin an app to a thread number that's too high + if should_be_pinned and cfg.pin_thread is not None: + if cfg.pin_thread < 0: + raise ade.NegativePinThread(cfg.pin_thread) + if cfg.pin_thread > self.AD.threading.thread_count: + raise ade.PinOutofRange( + pin_thread=cfg.pin_thread, + total_threads=self.AD.threading.thread_count + ) + + # Assign a thread ID if necessary + if should_be_pinned and cfg.pin_thread is None: + await self.AD.threading.create_initial_threads() + counts = self.AD.threading.thread_app_counts() + _, min_tid = min((v, k) for k, v in counts.items()) + pin_thread = min_tid + else: + pin_thread = cfg.pin_thread # This module should already be loaded and stored in sys.modules mod_obj = await utils.run_in_executor(self, importlib.import_module, module_name) + mod_name = mod_obj.__name__ + match mod_obj.__file__: + case str(mod_file): + mod_path = Path(mod_file) + if mod_path.is_relative_to(self.AD.app_dir.parent): + mod_path = mod_path.relative_to(self.AD.app_dir.parent) + case _: + mod_path = Path("") try: app_class: type[ADBase | ADAPI] = getattr(mod_obj, class_name) except AttributeError: - path = mod_obj.__file__ or mod_obj.__path__._path[0] - raise ade.MissingAppClass(app_name, mod_obj.__name__, Path(path).relative_to(self.AD.app_dir.parent), class_name) + raise ade.MissingAppClass(app_name, mod_name, mod_path, class_name) new_obj = app_class(self.AD, cfg) assert isinstance(getattr(new_obj, "AD", None), type(self.AD)), "App objects need to have a reference to the AppDaemon object" @@ -547,15 +602,24 @@ async def safe_create(self: "AppManagement"): self.objects[app_name] = ManagedObject( type="app", object=new_obj, - pin_app=self.AD.threading.app_should_be_pinned(app_name), - pin_thread=pin, + pin_app=should_be_pinned, + pin_thread=pin_thread, running=False, - module_path=Path(mod_obj.__file__), + module_path=mod_path, ) # load the module path into app entity - module_path = await utils.run_in_executor(self, os.path.abspath, mod_obj.__file__) + module_path = await utils.run_in_executor(self, os.path.abspath, mod_path) await self.set_state(app_name, state="created", module_path=module_path) + if should_be_pinned and pin_thread is not None: + thread_entity = f"thread.thread-{pin_thread}" + counts = self.AD.threading.thread_app_counts() + await self.AD.state.set_state( + "_threading", + "admin", + thread_entity, + pinned_apps=counts[pin_thread], + ) return new_obj except Exception as exc: await self.set_state(app_name, state="compile_error") @@ -587,16 +651,22 @@ def add_plugin_object(self, name: str, object: "PluginBase") -> None: ) async def terminate_sequence(self, name: str) -> bool: - """Terminate the sequence""" - assert self.objects.get(name, {}).get("type") == "sequence", f"'{name}' is not a sequence" - - if name in self.objects: - del self.objects[name] + """Terminate the sequence. - await self.AD.callbacks.clear_callbacks(name) - self.AD.futures.cancel_futures(name) - - return True + Returns: + bool: Whether the sequence was found and terminated + """ + match self.objects.get(name): + case ManagedObject(type="sequence"): + del self.objects[name] + await self.AD.callbacks.clear_callbacks(name) + self.AD.futures.cancel_futures(name) + return True + case None: + self.logger.warning("Nothing found for name '%s'", name) + case _ as obj: + self.logger.warning("Object found for '%s', but it's not a sequence: %s", name, obj) + return False async def read_all(self, config_files: Iterable[Path] | None) -> AllAppConfig: config_files = config_files if config_files is not None else self.dependency_manager.app_config_files @@ -667,8 +737,10 @@ async def check_app_config_files(self, update_actions: UpdateActions): # TODO: Move this behavior to the model validation step eventually # It has to be here for now because the files get read in multiple places for gm in freshly_read_cfg.global_modules(): - rel_path = gm.config_path.relative_to(self.AD.app_dir) - self.logger.warning(f"Global modules are deprecated: '{gm.name}' defined in {rel_path}") + cfg_path = gm.config_path + if cfg_path is not None and cfg_path.is_relative_to(self.AD.app_dir): + rel_path = cfg_path.relative_to(self.AD.app_dir) + self.logger.warning(f"Global modules are deprecated: '{gm.name}' defined in {rel_path}") if gm := freshly_read_cfg.root.get("global_modules"): gm = ", ".join(f"'{g}'" for g in gm) @@ -722,14 +794,6 @@ async def check_app_config_files(self, update_actions: UpdateActions): and await self.get_state(name) != "compile_error" } # fmt: skip - if self.AD.threading.pin_apps: - active_apps = self.app_config.active_app_count - if active_apps > self.AD.threading.thread_count: - threads_to_add = active_apps - self.AD.threading.thread_count - self.logger.debug(f"Adding {threads_to_add} threads based on the active app count") - for _ in range(threads_to_add): - await self.AD.threading.add_thread(silent=False) - @utils.executor_decorator def read_config_file(self, file: Path) -> AllAppConfig: """Reads a single YAML or TOML file into a pydantic model. This also sets the ``config_path`` attribute of any AppConfigs. @@ -744,7 +808,7 @@ def read_config_file(self, file: Path) -> AllAppConfig: return config_model @utils.executor_decorator - def import_module(self, module_name: str) -> int: + def import_module(self, module_name: str): """Reads an app into memory by importing or reloading the module it needs""" try: if mod := sys.modules.get(module_name): @@ -756,15 +820,26 @@ def import_module(self, module_name: str) -> int: self.logger.debug("Importing '%s'", module_name) importlib.import_module(module_name) except Exception as exc: + # Try to extract the path from the exception + path = None match exc: - case SyntaxError(): - path = Path(exc.filename) - case NameError(): - path = Path(traceback.extract_tb(exc.__traceback__)[-1].filename) + case ImportError(path=str(filename)): + path = Path(filename) + case SyntaxError(filename=str(filename)): + path = Path(filename) case _: - raise exc - mtime = self.dependency_manager.python_deps.files.mtimes.get(path) - self.dependency_manager.python_deps.bad_files.add((path, mtime)) + tb = traceback.extract_tb(exc.__traceback__) + match tb[-1]: + case traceback.FrameSummary(filename=str(filename)): + path = Path(filename) + + # If there was a path found and it was a tracked file, mark it as bad + match path: + case Path() as path if path in self.dependency_manager.python_deps.files.mtimes: + match self.dependency_manager.python_deps.files.mtimes.get(path): + case float(mtime): + self.dependency_manager.python_deps.bad_files.add((path, mtime)) + raise exc @utils.executor_decorator @@ -1188,7 +1263,7 @@ async def _create_and_start_apps(self, update_actions: UpdateActions) -> None: self.logger.warning(f"App '{app_name}' not found in app config") # Need to have already created the ManagedObjects for the threads to get assigned - await self.AD.threading.calculate_pin_threads() + await self.AD.threading.assign_app_threads() # Account for failures and apps that depend on them failed = update_actions.apps.failed @@ -1288,75 +1363,68 @@ async def _handle_sequence_change(self, update_actions: UpdateActions, update_mo if update_actions.sequences.changes or update_mode == UpdateMode.INIT: await self.AD.sequences.update_sequence_entities(self.sequence_config) - @utils.executor_decorator - def create_app(self, app: str = None, **kwargs): - """Used to create an app, which is written to a config file""" - - executed = True - app_config = {} - new_config = OrderedDict() + async def create_app(self, app: str, **app_config) -> None: + """Create an app - app_module = kwargs.get("module") - app_class = kwargs.get("class") + Args: + app (str): The name of the app to create. + + App Config Kwargs: + class (str): The class name of the app to create. + module (str): The module where the app class is located. + write_app_file(bool, optional): Whether to write the app config to a file. Defaults to True. + app_dir (str, optional): The directory to write the app file to, relative to the appdaemon apps directory. Defaults to "ad_apps". + app_file (str, optional): The name of the app file to write, including extension. Defaults to "{app_name}.yaml". + """ - if app is None: # app name not given - # use the module name as the app's name - app = app_module + match app_config: + case {"module": str(app_module), "class": str(app_class)}: + self.logger.info("Creating app %s (module: %s, class: %s)", app, app_module, app_class) + case _: + self.logger.error("Could not create app %s, as module and class is required", app) + return - app_config[app] = kwargs + new_config = OrderedDict() - else: - if app_module is None and app in kwargs: - app_module = kwargs[app].get("module") - app_class = kwargs[app].get("class") + write_app_file: bool = app_config.pop("write_app_file", True) + if write_app_file: + app_directory: Path = self.AD.app_dir / app_config.pop("app_dir", "ad_apps") + app_file: Path = app_directory / app_config.pop("app_file", f"{app}{self.AD.config.ext}") + if app_file.exists() and app_file.is_file(): + # the file exists so there might be apps there already so read to update + # now open the file and edit the yaml + new_cfg = await self.read_config_file(app_file) + new_config.update(new_cfg.model_dump(mode="python", by_alias=True)) - app_config[app] = kwargs[app] + # now load up the new config + new_config.update({app: app_config}) + new_config.move_to_end(app) + try: + # Make sure the writing doesn't get done in the MainThread + await self.AD.loop.run_in_executor( + executor=self.AD.executor, + func=functools.partial( + utils.write_config_file, + app_file, + **new_config + ) + ) + except Exception as exc: + raise ade.AppConfigWriteFail(app_name=app, path=app_file) from exc else: - app_config[app] = kwargs - - if app_module is None or app_class is None: - self.logger.error("Could not create app %s, as module and class is required", app) - return False - - app_directory: Path = self.AD.app_dir / kwargs.pop("app_dir", "ad_apps") - app_file: Path = app_directory / kwargs.pop("app_file", f"{app}{self.AD.config.ext}") - app_directory = app_file.parent # in case the given app_file is multi level - - try: - app_directory.mkdir(parents=True, exist_ok=True) - except Exception: - self.logger.error("Could not create directory %s", app_directory) - return False - - if app_file.is_file(): - # the file exists so there might be apps there already so read to update - # now open the file and edit the yaml - new_config.update(self.read_config_file(app_file)) - - # now load up the new config - new_config.update(app_config) - new_config.move_to_end(app) - - # at this point now to create write to file - try: - utils.write_config_file(app_file, **new_config) - - data = { - "event_type": "app_created", - "data": {"app": app, **app_config[app]}, - } - self.AD.loop.create_task(self.AD.events.process_event("admin", data)) - - except Exception: - self.error.warning("-" * 60) - self.error.warning("Unexpected error while writing to file: %s", app_file) - self.error.warning("-" * 60) - self.error.warning(traceback.format_exc()) - self.error.warning("-" * 60) - executed = False - - return executed + data = { + "event_type": "app_created", + "data": {"app": app, **app_config}, + } + self.AD.loop.create_task(self.AD.events.process_event("admin", data)) + else: + # just update the in memory config + app_config['name'] = app + self.app_config.root[app] = AppConfig.model_validate(app_config) + await self.create_app_object(app) + await self.start_app(app) + return @utils.executor_decorator def edit_app(self, app: str, **kwargs): @@ -1451,15 +1519,11 @@ async def app_run_context(self, app: str, **kwargs): self.update_app(app, **kwargs) self.logger.debug("Temporarily updated app '%s' with: %s", app, kwargs) - # Ensure there's at least one thread available - if not self.AD.threading.thread_count: - await self.AD.threading.create_initial_threads() - created_app_object = False if app not in self.objects: self.logger.debug("Creating ManagedObject for app '%s'", app) await self.create_app_object(app) - await self.AD.threading.calculate_pin_threads() + await self.AD.threading.assign_app_threads() created_app_object = True await self.start_app(app) @@ -1548,11 +1612,11 @@ async def manage_services( asyncio.create_task(self.check_app_updates(mode=UpdateMode.RELOAD_APPS)) case (_, str()): # first the check app updates needs to be stopped if on - mode = copy.deepcopy(self.AD.production_mode) + # mode = copy.deepcopy(self.AD.production_mode) - if mode is False: # it was off - self.AD.production_mode = True - await self.AD.utility.sleep(0.5, timeout_ok=True) + # if mode is False: # it was off + # self.AD.production_mode = True + # await self.AD.utility.sleep(0.5, timeout_ok=True) match service: case "enable": @@ -1566,9 +1630,9 @@ async def manage_services( case "remove": result = await self.remove_app(app, **kwargs) - if mode is False: # meaning it was not in production mode - await self.AD.utility.sleep(1, timeout_ok=True) - self.AD.production_mode = mode + # if mode is False: # meaning it was not in production mode + # await self.AD.utility.sleep(1, timeout_ok=True) + # self.AD.production_mode = mode return result case _: diff --git a/appdaemon/events.py b/appdaemon/events.py index 117931f3f..d73924b02 100644 --- a/appdaemon/events.py +++ b/appdaemon/events.py @@ -43,7 +43,7 @@ async def add_event_callback( oneshot: bool = False, pin: bool | None = None, pin_thread: int | None = None, - kwargs: dict[str, Any] = None, # Intentionally not expanding the kwargs here so that there are no name clashes + kwargs: dict[str, Any] | None = None, # Intentionally not expanding the kwargs here so that there are no name clashes ) -> str | list[str] | None: """Add an event callback to AppDaemon's internal dicts. @@ -67,6 +67,9 @@ async def add_event_callback( Returns: ``None`` or the reference to the callback handle. """ + # Create the default kwargs dict + kwargs = {} if kwargs is None else kwargs + if oneshot: # this is still a little awkward, but it works until this can be refactored # This needs to be in the kwargs dict here that gets passed around later, so that the dispatcher knows to # cancel the callback after the first run. diff --git a/appdaemon/exceptions.py b/appdaemon/exceptions.py index e143a44fa..6310b1bf2 100644 --- a/appdaemon/exceptions.py +++ b/appdaemon/exceptions.py @@ -384,6 +384,15 @@ class BadAppConfigFile(AppDaemonException): path: Path +@dataclass +class AppConfigWriteFail(AppDaemonException): + app_name: str + path: Path + + def __str__(self): + return f"Failed to write app '{self.app_name}' config to '{self.path}'" + + @dataclass class TimeOutException(AppDaemonException): msg: str @@ -440,7 +449,19 @@ class PinOutofRange(AppDaemonException): total_threads: int def __str__(self): - return f"Pin thread {self.pin_thread} out of range. Must be between 0 and {self.total_threads - 1}" + if self.total_threads == 0: + max_thread_id = self.total_threads - 1 + return f"Pin thread {self.pin_thread} out of range. Must be between 0 and {max_thread_id}" + else: + return f"No pin threads are allowed in this configuration: {self.pin_thread}" + + +@dataclass +class NegativePinThread(AppDaemonException): + pin_thread: int + + def __str__(self): + return f"Pin threads can't be negative: {self.pin_thread}" @dataclass diff --git a/appdaemon/logging.py b/appdaemon/logging.py index dc6313180..8f4d0c0bd 100644 --- a/appdaemon/logging.py +++ b/appdaemon/logging.py @@ -5,11 +5,10 @@ import traceback import uuid from collections import OrderedDict -from logging import LogRecord, Logger, StreamHandler +from logging import Logger, LogRecord, StreamHandler from logging.handlers import RotatingFileHandler from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union - import appdaemon.utils as utils from appdaemon.appdaemon import AppDaemon @@ -184,7 +183,7 @@ class Logging(metaclass=utils.Singleton): """ name: str = "_logging" - config: Dict[str, Dict[str, Any]] + config: dict[str, dict[str, Any]] log_levels = { "CRITICAL": 50, diff --git a/appdaemon/models/config/app.py b/appdaemon/models/config/app.py index d677ccf5e..d4d054ed6 100644 --- a/appdaemon/models/config/app.py +++ b/appdaemon/models/config/app.py @@ -1,7 +1,7 @@ import logging import sys from abc import ABC -from collections.abc import Iterable, Iterator +from collections.abc import Generator, Iterable, Iterator from copy import deepcopy from pathlib import Path from typing import Annotated, Any, Literal @@ -54,7 +54,7 @@ class AppConfig(BaseApp, extra="allow"): class_name: str = Field(alias="class") """Name of the class to use for the app. Must be accessible as an attribute of the imported `module_name` """ - pin_app: bool = True + pin_app: bool | None = None """Pin this app to a particular thread. This is used to ensure that the app is always run on the same thread.""" pin_thread: int | None = None """Which thread ID to pin this app to.""" @@ -200,10 +200,25 @@ def apps_from_file(self, paths: Iterable[Path]): cfg.config_path in paths ) # fmt: skip - @property + def active_apps(self) -> Generator[tuple[str, AppConfig]]: + for app_name, cfg in self.root.items(): + match cfg: + case AppConfig(disable=False): + yield app_name, cfg + def active_app_count(self) -> int: """Active in this case means not disabled""" - return len([cfg for cfg in self.root.values() if isinstance(cfg, AppConfig) and not cfg.disable]) + return len(list(self.active_apps())) + + def pinned_apps(self) -> Generator[tuple[str, AppConfig]]: + for app_name, cfg in self.active_apps(): + match cfg: + case AppConfig(pin_app=pin) if bool(pin): + yield app_name, cfg + + def pinned_app_count(self) -> int: + """Active in this case means not disabled""" + return len(list(self.pinned_apps())) def get_active_app_count(self) -> tuple[int, int, int]: active = 0 diff --git a/appdaemon/models/config/appdaemon.py b/appdaemon/models/config/appdaemon.py index 030f1ac67..2cc29a0fd 100644 --- a/appdaemon/models/config/appdaemon.py +++ b/appdaemon/models/config/appdaemon.py @@ -108,7 +108,7 @@ class AppDaemonConfig(BaseModel, extra="allow"): ascii_encode: bool = True """Set to false to disable ascii encoding of log messages. This is useful for non-English languages.""" - load_distribution: str = "roundrobbin" + load_distribution: Literal["load", "random", "roundrobin"] = "roundrobin" threads: ( Annotated[ int | None, @@ -124,8 +124,13 @@ class AppDaemonConfig(BaseModel, extra="allow"): """If ``True``, AppDaemon apps will be each pinned to a particular thread. This avoids complications around re-entrant code and locking of instance variables.""" pin_threads: int | None = None - """Number of threads to use for pinned apps, allowing the user to section off a sub-pool just for pinned apps. By - default all threads are used for pinned apps.""" + """Number of threads to use for pinned apps. + + AppDaemon will use the threads with ID 0 through (`pin_threads` - 1) for pinned apps. This allows the user to section off + a sub-pool just for pinned apps. By default all threads are used for pinned apps. + + This value initially comes from the user configuration, but is sometimes later modified. + """ thread_duration_warning_threshold: float = 10 threadpool_workers: int = 10 """Number of threads in AppDaemon's internal thread pool, which can be used to execute functions asynchronously in @@ -191,11 +196,12 @@ def model_post_init(self, __context: Any): self.ext = ".toml" if self.write_toml else ".yaml" - if self.total_threads is not None: - self.pin_apps = False - if self.pin_threads is not None and self.total_threads is not None: # assert self.total_threads is not None, "Using pin_threads requires total_threads to be set." assert self.pin_threads <= self.total_threads, ( "Number of pin threads has to be less than or equal to total threads." ) + + @property + def fully_async(self) -> bool: + return self.total_threads == 0 diff --git a/appdaemon/models/internal/app_management.py b/appdaemon/models/internal/app_management.py index c2918b880..2cb0337d4 100644 --- a/appdaemon/models/internal/app_management.py +++ b/appdaemon/models/internal/app_management.py @@ -130,9 +130,9 @@ class UpdateActions: class ManagedObject: type: Literal["app", "plugin", "sequence"] object: Any + pin_app: bool id: str = field(default_factory=lambda: uuid.uuid4().hex) module_path: Path | None = None - pin_app: bool | None = None pin_thread: int | None = None running: bool = False callback_counter: int = 0 diff --git a/appdaemon/scheduler.py b/appdaemon/scheduler.py index e6e7ef22e..0b883e0ee 100644 --- a/appdaemon/scheduler.py +++ b/appdaemon/scheduler.py @@ -153,31 +153,24 @@ async def insert_schedule( callback: Callable | None, repeat: bool = False, type_: str | None = None, - interval: timedelta = timedelta(), - offset: timedelta = timedelta(), + interval: timedelta | None = None, + offset: timedelta | None = None, random_start: timedelta | None = None, random_end: timedelta | None = None, pin: bool | None = None, pin_thread: int | None = None, **kwargs, ) -> str: + interval = interval if interval is not None else timedelta() + offset = offset if offset is not None else timedelta() + assert isinstance(aware_dt, datetime), "aware_dt must be a datetime object" assert aware_dt.tzinfo is not None, "aware_dt must be timezone aware" # aware_dt will include a timezone of some sort - convert to utc timezone basetime = aware_dt.astimezone(pytz.utc) - if pin_thread is not None: - # If the pin_thread is specified, force pin_app to True - pin_app = True - else: - # Otherwise, use the current pin_app setting in app management - if pin is None: - pin_app = self.AD.app_management.objects[name].pin_app - else: - pin_app = pin - - if pin_thread is None: - pin_thread = self.AD.app_management.objects[name].pin_thread + pin_app, pin_thread = self.AD.threading.determine_thread(name, pin, pin_thread) + self.logger.debug("App '%s' scheduled on pinned thread", name, pin_app, pin_thread) # Ensure that there's a dict available for this app name if name not in self.schedule: diff --git a/appdaemon/services.py b/appdaemon/services.py index 643b0f7f8..f5b0a1605 100644 --- a/appdaemon/services.py +++ b/appdaemon/services.py @@ -74,8 +74,8 @@ def register_service( domain (str): Domain of the service service (str): Name of the service callback (Callable): Callback function to be called when the service is invoked - __silent (bool, optional): If True, do not send a registration event. Defaults to False. - __name (str | None, optional): Name of the app registering the service. Defaults to None. + silent (bool, optional): If True, do not send a registration event. Defaults to False (send the event). + name (str | None, optional): Name of the app registering the service. Defaults to None. **kwargs: Additional keyword arguments to be passed to the callback function. """ self.logger.debug( diff --git a/appdaemon/state.py b/appdaemon/state.py index 240d9cce6..d1e18b806 100644 --- a/appdaemon/state.py +++ b/appdaemon/state.py @@ -277,8 +277,8 @@ async def add_state_callback( A string made from ``uuid4().hex`` that is used to identify the callback. This can be used to cancel the callback later. """ - if kwargs is None: - kwargs = {} + # Create the default kwargs dict + kwargs = {} if kwargs is None else kwargs if oneshot: # this is still a little awkward, but it works until this can be refactored # This needs to be in the kwargs dict here that gets passed around later, so that the dispatcher knows to @@ -644,7 +644,7 @@ async def get_state( attribute: str | None = None, default: Any | None = None, copy: bool = True, - ): + ) -> Any: self.logger.debug("get_state: %s.%s %s %s", entity_id, attribute, default, copy) def maybe_copy(data): @@ -711,16 +711,15 @@ def parse_state( return new_state - async def add_to_state(self, name: str, namespace: str, entity_id: str, i): - value = await self.get_state(name, namespace, entity_id) - if value is not None: - value += i - await self.set_state(name, namespace, entity_id, state=value) + async def add_to_state(self, name: str, namespace: str, entity_id: str, i: int = 1): + match (await self.get_state(name, namespace, entity_id)): + case (int() | float()) as value: + await self.set_state(name, namespace, entity_id, state=value + i) async def add_to_attr(self, name: str, namespace: str, entity_id: str, attr, i): state = await self.get_state(name, namespace, entity_id, attribute="all") if state is not None: - state["attributes"][attr] = copy(state["attributes"][attr]) + i + state["attributes"][attr] = copy(state["attributes"].get(attr, 0)) + i await self.set_state(name, namespace, entity_id, attributes=state["attributes"]) def register_state_services(self, namespace: str) -> None: @@ -736,7 +735,7 @@ async def _state_service( *, entity_id: str | None = None, persist: bool = False, - writeback: Literal["safe", "hybrid"] = "safe", + writeback: ADWritebackType = ADWritebackType.safe, **kwargs: Any ) -> Any | None: self.logger.debug("state_services: %s, %s, %s, %s", namespace, domain, service, kwargs) @@ -879,12 +878,13 @@ def set_state_simple(self, namespace: str, entity_id: str, state: Any): async def set_namespace_state(self, namespace: str, state: dict[str, Any], persist: bool = False): if persist: - await self.add_persistent_namespace(namespace, writeback="safe") + await self.add_persistent_namespace(namespace, writeback=ADWritebackType.safe) self.state[namespace].update(state) else: # first in case it had been created before, it should be deleted - if isinstance(self.state.get(namespace), utils.PersistentDict): - await self.remove_persistent_namespace(namespace, self.state[namespace]) + match self.state.get(namespace): + case utils.PersistentDict() as ns: + await self.remove_persistent_namespace(namespace, ns) self.state[namespace] = state def update_namespace_state(self, namespace: str | list[str], state: dict): diff --git a/appdaemon/threads.py b/appdaemon/threads.py index 51d7e4f39..075da1a7e 100644 --- a/appdaemon/threads.py +++ b/appdaemon/threads.py @@ -4,20 +4,21 @@ import inspect import logging import re -import sys import threading import traceback -from collections.abc import Callable +from collections import deque +from collections.abc import Callable, Generator, Iterable +from itertools import cycle from logging import Logger from queue import Queue from random import randint from threading import Thread -from typing import TYPE_CHECKING, Any, ClassVar - +from typing import TYPE_CHECKING, Any from . import exceptions as ade from . import utils from .models.config.app import AppConfig +from .models.internal.app_management import ManagedObject if TYPE_CHECKING: from .adbase import ADBase @@ -46,10 +47,12 @@ class Threading: :class:`~threading.Thread` and :class:`~queue.Queue` objects respectively. """ - last_stats_time: ClassVar[datetime.datetime] = datetime.datetime.fromtimestamp(0) - callback_list: list[dict] + last_stats_time: datetime.datetime = datetime.datetime.min + last_callbacks: deque[dict[str, Any]] + + _roundrobin_cycle: Iterable[str] + """Iterator that produces the name of the next thread when using the round robin load distribition method.""" - next_thread: int = 0 current_callbacks_executed: int = 0 current_callbacks_fired: int = 0 @@ -62,40 +65,13 @@ def __init__(self, ad: "AppDaemon"): self.threads = {} # A few shortcuts - self.add_entity = ad.state.add_entity - self.get_state = ad.state.get_state - self.set_state = ad.state.set_state - self.add_to_state = ad.state.add_to_state - self.add_to_attr = ad.state.add_to_attr + self.add_entity = functools.partial(ad.state.add_entity, "admin") + self.get_state = functools.partial(ad.state.get_state, self.name, "admin") + self.set_state = functools.partial(ad.state.set_state, self.name, "admin") + self.add_to_state = functools.partial(ad.state.add_to_state, self.name, "admin") + self.add_to_attr = functools.partial(ad.state.add_to_attr, self.name, "admin") - self.callback_list = [] - - @property - def auto_pin(self) -> bool: - """This is derived from pin_apps and total_threads, and is True by default.""" - return self.pin_apps and self.total_threads is None - - @property - def pin_apps(self) -> bool: - "Config flag for whether each app should be pinned to a thread" - return self.AD.config.pin_apps - - @pin_apps.setter - def pin_apps(self, new: bool) -> None: - """Set the config flag for whether each app should be pinned to a thread""" - self.AD.config.pin_apps = bool(new) - - @property - def pin_threads(self) -> int | None: - """The number of threads that are dedicated to pinned apps. This should be the actual number of pin threads - that have been created and not the number of pin threads there are supposed to be.""" - return self.AD.config.pin_threads - - @pin_threads.setter - def pin_threads(self, new: int | None) -> None: - """Set the number of threads that are dedicated to pinned apps.""" - assert isinstance(new, int) or new is None, "pin_threads must be an integer or None" - self.AD.config.pin_threads = new + self.last_callbacks = deque(maxlen=10) @property def thread_count(self) -> int: @@ -103,19 +79,6 @@ def thread_count(self) -> int: `threads` dictionary, so it can't be set directly.""" return len(self.threads) - @property - def total_threads(self) -> int | None: - """Number of threads to create for apps. - - By default this is automatically calculated, but can also be manually configured by the user in - ``appdaemon.yaml``. - """ - return self.AD.config.total_threads - - @total_threads.setter - def total_threads(self, new: int): - self.AD.config.total_threads = new - def stop(self): """Stop all threads.""" for thread_name, thread in self.threads.items(): @@ -123,14 +86,23 @@ def stop(self): case {"queue": Queue() as q, "thread": Thread() as t}: self.logger.debug("Stopping %s", thread_name) q.put_nowait(None) - t.join(timeout=1) - self.logger.debug("Joined %s", thread_name) + try: + t.join(timeout=1) + self.logger.debug("Joined %s", thread_name) + except RuntimeError as exc: + self.logger.error("Error joining thread %s: %s", thread_name, exc) + + def _pin_settings(self): + return { + name: {'pinned': obj.pin_app, 'pin_thread': obj.pin_thread} + for name, obj in self.AD.app_management.objects.items() + } async def get_q_update(self): """Updates queue sizes""" for thread in self.threads: qsize = self.get_q(thread).qsize() - await self.set_state("_threading", "admin", "thread.{}".format(thread), q=qsize) + await self.set_state(f"thread.{thread}", q=qsize) async def get_callback_update(self): """Updates the sensors with information about how many callbacks have been fired. Called by the :class:`~appdaemon.admin_loop.AdminLoop` @@ -138,31 +110,24 @@ async def get_callback_update(self): - ``sensor.callbacks_average_fired`` - ``sensor.callbacks_average_executed`` """ - now = datetime.datetime.now() - self.callback_list.append({"fired": self.current_callbacks_fired, "executed": self.current_callbacks_executed, "ts": now}) - - if len(self.callback_list) > 10: - self.callback_list.pop(0) - - fired_sum = 0 - executed_sum = 0 - for item in self.callback_list: - fired_sum += item["fired"] - executed_sum += item["executed"] + now = await self.AD.sched.get_now() + self.last_callbacks.append( + {"fired": self.current_callbacks_fired, "executed": self.current_callbacks_executed, "ts": now} + ) - total_duration = (self.callback_list[len(self.callback_list) - 1]["ts"] - self.callback_list[0]["ts"]).total_seconds() + fired_total = sum(item["fired"] for item in self.last_callbacks) + executed_total = sum(item["executed"] for item in self.last_callbacks) + total_duration = (self.last_callbacks[-1]["ts"] - self.last_callbacks[0]["ts"]).total_seconds() if total_duration == 0: fired_avg = 0 executed_avg = 0 else: - fired_avg = round(fired_sum / total_duration, 1) - executed_avg = round(executed_sum / total_duration, 1) + fired_avg = round(fired_total / total_duration, 1) + executed_avg = round(executed_total / total_duration, 1) - await self.set_state("_threading", "admin", "sensor.callbacks_average_fired", state=fired_avg, _silent=True) + await self.set_state("sensor.callbacks_average_fired", state=fired_avg, _silent=True) await self.set_state( - "_threading", - "admin", "sensor.callbacks_average_executed", state=executed_avg, _silent=True, @@ -173,24 +138,76 @@ async def get_callback_update(self): self.current_callbacks_fired = 0 async def init_admin_stats(self): - # Initialize admin stats - - await self.add_entity("admin", "sensor.callbacks_total_fired", 0) - await self.add_entity("admin", "sensor.callbacks_average_fired", 0) - await self.add_entity("admin", "sensor.callbacks_total_executed", 0) - await self.add_entity("admin", "sensor.callbacks_average_executed", 0) - await self.add_entity("admin", "sensor.threads_current_busy", 0) - await self.add_entity("admin", "sensor.threads_max_busy", 0) - await self.add_entity( - "admin", - "sensor.threads_max_busy_time", - utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), - ) - await self.add_entity( - "admin", - "sensor.threads_last_action_time", - utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), - ) + await self.add_entity("sensor.callbacks_total_fired", 0) + await self.add_entity("sensor.callbacks_average_fired", 0) + await self.add_entity("sensor.callbacks_total_executed", 0) + await self.add_entity("sensor.callbacks_average_executed", 0) + await self.add_entity("sensor.threads_current_busy", 0) + await self.add_entity("sensor.threads_max_busy", 0) + await self.add_entity("sensor.threads_max_busy_time", "never") + await self.add_entity("sensor.threads_last_action_time", "never") + + def resolve_thread_counts(self) -> tuple[int, int]: + """Resolve thread configuration into a concrete count of the total number of threads to create and the number of + them to reserve for pinning.""" + pin_threads = self.AD.config.pin_threads + total_threads = self.AD.config.total_threads + + # Handle determining the counts. Each logical path has an associated log message + match total_threads, pin_threads: + case 0, _: # Special case of 0 threads + # Force pin_threads to 0 + pin_threads = 0 + self.logger.info("Starting apps with no worker threads.") + case int(), int(): # Both are set in the configuration file + assert total_threads > 0, "specified total_threads has to be above 0" + assert pin_threads > 0, "specified pin_threads has to be above 0" + assert pin_threads < total_threads, \ + "pin_threads has to be less than total_threads if both are specified" + self.logger.info( + "Starting apps with %d worker threads, with threads 0-%d reserved for pinned apps", + total_threads, + pin_threads - 1, + ) + case int(), None: # Only total_threads was specified + assert total_threads > 0, "specified total_threads has to be above 0" + self.logger.info("Starting %d worker threads for apps", total_threads) + if self.AD.config.pin_apps: + # If the global setting for apps is to pin them, use all the threads for pinning + pin_threads = total_threads + self.logger.info("All %d threads can be used for pinning.") + case None, None: # AppDaemon will automatically determine thread counts + if self.AD.config.pin_apps: + # If the global setting is to pin apps, then the thread counts are determined by the number of apps + total_threads = self.AD.app_management.dependency_manager.app_deps.app_config.active_app_count() + pin_threads = self.AD.app_management.pinned_app_count() + if total_threads == pin_threads: + self.logger.info("Starting each app with a dedicated thread (%d total)", total_threads) + else: + assert total_threads >= pin_threads + self.logger.info( + "Starting %d total threads, %d threads for pinning", + total_threads, + pin_threads + ) + else: + # Otherwise the thread counts default to 10 + total_threads = pin_threads = 10 + self.logger.info("Startinging with a default of 10 worker threads.") + + # Runtime checks to ensure that nothing weird happened + match total_threads, pin_threads: + case int(), int(): # Confirm thread counts at the end + assert total_threads >= 0 + assert pin_threads <= total_threads, \ + "pin_threads must be lower than total_threads" + return total_threads, pin_threads + case _: # Raise an error with the config if anything is weird + raise ade.InvalidThreadConfiguration( + self.AD.config.total_threads, + self.AD.config.pin_apps, + self.AD.config.pin_threads, + ) async def create_initial_threads(self) -> None: """ @@ -202,51 +219,37 @@ async def create_initial_threads(self) -> None: Also by default, all of the threads created will be for pinned apps, but this can be overridden to be just a subset of the `total_threads` with the `pin_threads` setting. """ - match self.total_threads, self.pin_apps: - case None, True: - self.total_threads = self.pin_threads = self.AD.app_management.dependency_manager.app_deps.app_config.active_app_count or 1 - self.logger.info( - "Starting apps with %s worker threads. Apps will all be assigned threads and pinned to them.", - self.total_threads, - ) - case 0, False: # fully async - self.logger.info("Starting apps with no worker threads.") - self.pin_threads = 0 - case int(), False: - self.logger.info( - "Starting apps with %s worker threads, with %s reserved for pinned apps", - self.total_threads, - self.pin_threads, - ) - self.pin_threads = 0 - case _: - self.logger.error("Invalid thread configuration.") - raise ade.InvalidThreadConfiguration( - self.total_threads, - self.pin_apps, - self.pin_threads, - ) - - assert self.pin_threads is not None - assert self.total_threads is not None - for _ in range(self.total_threads): + total_threads, pin_threads = self.resolve_thread_counts() + for _ in range(total_threads - self.thread_count): await self.add_thread(silent=True) + free_threads = list(self.threads.keys())[pin_threads:] + self._roundrobin_cycle = cycle(free_threads) + # Add thread object to track async - await self.add_entity( - "admin", - "thread.async", - "idle", - { - "q": 0, - "is_alive": True, - "time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), - "pinned_apps": [], - }, - ) + if not self.AD.state.entity_exists("admin", "thread.async"): + await self.add_entity( + "thread.async", + "idle", + { + "q": 0, + "is_alive": True, + "time_called": "never", + "pinned_apps": [], + }, + ) def get_q(self, thread_id: str) -> Queue[dict[str, Any] | None]: - return self.threads[thread_id]["queue"] + match self.threads.get(thread_id): + case {"queue": Queue() as q}: + return q + raise KeyError(f"Invalid thread_id: {thread_id}") + + def get_thread(self, thread_id: str) -> Thread: + match self.threads.get(thread_id): + case {"thread": Thread() as thread}: + return thread + raise KeyError(f"Invalid thread_id: {thread_id}") @staticmethod def atoi(text): @@ -257,34 +260,34 @@ def natural_keys(self, text): # Diagnostics - def total_q_size(self): - qsize = 0 - for thread in self.threads: - qsize += self.threads[thread]["queue"].qsize() - return qsize + def _q_iter(self) -> Generator[tuple[str, Queue]]: + for thread_name, info in self.threads.items(): + match info: + case {"queue": Queue() as q}: + yield thread_name, q - def min_q_id(self): - id = 0 - i = 0 - qsize = sys.maxsize - for thread in self.threads: - if self.threads[thread]["queue"].qsize() < qsize: - qsize = self.threads[thread]["queue"].qsize() - id = i - i += 1 - return id + def total_q_size(self) -> int: + return sum(q.qsize() for _, q in self._q_iter()) + + def min_q_id(self) -> str: + _, min_thread_name = min((q.qsize(), name) for name, q in self._q_iter()) + return min_thread_name async def get_thread_info(self): - info = {} - info["max_busy_time"] = await self.get_state("_threading", "admin", "sensor.threads_max_busy_time") - info["last_action_time"] = await self.get_state("_threading", "admin", "sensor.threads_last_action_time") - info["current_busy"] = await self.get_state("_threading", "admin", "sensor.threads_current_busy") - info["max_busy"] = await self.get_state("_threading", "admin", "sensor.threads_max_busy") + info = { + attr: await self.get_state(f"sensor.threads_{attr}") + for attr in ( + "max_busy_time", + "last_action_time", + "current_busy", + "max_busy" + ) + } info["threads"] = {} for thread in sorted(self.threads, key=self.natural_keys): if thread not in info["threads"]: info["threads"][thread] = {} - t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all") + t = await self.get_state(f"thread.{thread}", attribute="all") info["threads"][thread]["time_called"] = t["attributes"]["time_called"] info["threads"][thread]["callback"] = t["state"] info["threads"][thread]["is_alive"] = t["attributes"]["is_alive"] @@ -294,17 +297,17 @@ async def dump_threads(self): self.diag.info("--------------------------------------------------") self.diag.info("Threads") self.diag.info("--------------------------------------------------") - current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy") - max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy") - max_busy_time = utils.str_to_dt(await self.get_state("_threading", "admin", "sensor.threads_max_busy_time")) - last_action_time = await self.get_state("_threading", "admin", "sensor.threads_last_action_time") + current_busy = await self.get_state("sensor.threads_current_busy") + max_busy = await self.get_state("sensor.threads_max_busy") + max_busy_time = utils.str_to_dt(await self.get_state("sensor.threads_max_busy_time")) + last_action_time = await self.get_state("sensor.threads_last_action_time") self.diag.info("Currently busy threads: %s", current_busy) self.diag.info("Most used threads: %s at %s", max_busy, max_busy_time) self.diag.info("Last activity: %s", last_action_time) self.diag.info("Total Q Entries: %s", self.total_q_size()) self.diag.info("--------------------------------------------------") for thread in sorted(self.threads, key=self.natural_keys): - t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all") + t = await self.get_state(f"thread.{thread}", attribute="all") # print("thread.{}".format(thread), t) self.diag.info( "%s - qsize: %s | current callback: %s | since %s, | alive: %s, | pinned apps: %s", @@ -321,86 +324,71 @@ async def dump_threads(self): # Thread Management # - def select_q(self, args): - # - # Select Q based on distribution method: - # Round Robin - # Random - # Load distribution - # - - # Check for pinned app and if so figure correct thread for app + def select_q(self, args: dict[str, Any]): + """Selects the queue for the thread and calls put_nowait on it to dispatch the callback to the worker thread.""" - if args["pin_app"] is True: - thread = args["pin_thread"] - # Handle the case where an App is unpinned but selects a pinned callback without specifying a thread - # If this happens a lot, thread 0 might get congested but the alternatives are worse! - if thread is None: - self.logger.warning( - "Invalid thread ID for pinned thread in app: %s - assigning to thread 0", - args["name"], + match args: + case {"pin_app": True, "pin_thread": tid, "name": str(name)}: + if tid is None: + tid = 0 + self.logger.warning( + "Invalid thread ID for pinned thread in app: %s - assigning to thread 0", name + ) + thread_name = f'thread-{tid}' + case {"pin_app": False}: + # Putting this here to help with "find references" + pin_threads = self.AD.config.pin_threads + assert pin_threads is not None, ( + "pin_threads has to be defined so AppDaemon knows which threads can be used for unpinned callbacks" + ) + assert pin_threads < self.thread_count, ( + "AppDaemon needs unreserved threads for unpinned callbacks" ) - thread = 0 - else: - if self.thread_count == self.pin_threads: - raise ValueError("pin_threads must be set lower than threads if unpinned_apps are in use") - if self.AD.load_distribution == "load": - thread = self.min_q_id() - elif self.AD.load_distribution == "random": - thread = randint(self.pin_threads, self.thread_count - 1) - else: - # Round Robin is the catch all - thread = self.next_thread - self.next_thread += 1 - if self.next_thread == self.thread_count: - self.next_thread = self.pin_threads - if thread < 0 or thread >= self.thread_count: - raise ValueError(f"invalid thread id: {thread} in app {args['name']}") + match self.AD.config.load_distribution: + case "load": + thread_name = self.min_q_id() + case "random": + tid = randint(pin_threads, self.thread_count - 1) + thread_name = f'thread-{tid}' + case "roundrobin", _: + thread_name = next(self._roundrobin_cycle) + case _: + raise RuntimeError(f'Invalid queue args: {args}') - q = self.threads[f"thread-{thread}"]["queue"] - q.put_nowait(args) + match self.threads.get(thread_name): + case {"queue": Queue() as q}: + q.put_nowait(args) + case _: + raise RuntimeError(f"Invalid thread id {tid} for app '{args['name']}'") async def check_overdue_and_dead_threads(self): if self.AD.real_time is True and self.AD.thread_duration_warning_threshold != 0: for thread_id in self.threads: - if self.threads[thread_id]["thread"].is_alive() is not True: + if not self.get_thread(thread_id).is_alive(): self.logger.critical("Thread %s has died", thread_id) self.logger.critical("Pinned apps were: %s", self.get_pinned_apps(thread_id)) self.logger.critical("Thread will be restarted") id = thread_id.split("-")[1] await self.add_thread(silent=False, id=id) - if await self.get_state("_threading", "admin", "thread.{}".format(thread_id)) != "idle": - start = utils.str_to_dt( - await self.get_state( - "_threading", - "admin", - "thread.{}".format(thread_id), - attribute="time_called", - ) + if await self.get_state(f"thread.{thread_id}") != "idle": + start = datetime.datetime.fromisoformat( + await self.get_state(f"thread.{thread_id}", attribute="time_called") ) dur = (await self.AD.sched.get_now() - start).total_seconds() if dur >= self.AD.thread_duration_warning_threshold and dur % self.AD.thread_duration_warning_threshold == 0: self.logger.warning( "Excessive time spent in callback: %s - %s", - await self.get_state( - "_threading", - "admin", - "thread.{}".format(thread_id), - attribute="callback", - ), + await self.get_state(f"thread.{thread_id}", attribute="callback"), dur, ) async def check_q_size(self, warning_step, warning_iterations): - totalqsize = 0 - for thread in self.threads: - totalqsize += self.threads[thread]["queue"].qsize() - + totalqsize = self.total_q_size() if totalqsize > self.AD.qsize_warning_threshold: if (warning_step == 0 and warning_iterations >= self.AD.qsize_warning_iterations) or warning_iterations == self.AD.qsize_warning_iterations: for thread in self.threads: - qsize = self.threads[thread]["queue"].qsize() + qsize = self.get_q(thread).qsize() if qsize > 0: time_called = await self.get_state( "_threading", @@ -414,8 +402,8 @@ async def check_q_size(self, warning_step, warning_iterations): "Queue size for thread %s is %s, callback is '%s' called at %s - possible thread starvation", thread, qsize, - await self.get_state("_threading", "admin", f"thread.{thread}"), - await self.AD.sched.parse_datetime(time_called), + await self.get_state(f"thread.{thread}"), + await self.get_state(f"thread.{thread}", attribute="time_called"), ) await self.dump_threads() @@ -447,16 +435,12 @@ async def update_thread_info(self, thread_id, callback, app, type, uuid, silent) return appentity = f"{appinfo.type}.{app}" - now = await self.AD.sched.get_now() + now_str = utils.dt_to_str(now, self.AD.tz, round=True) + if callback == "idle": start = utils.str_to_dt( - await self.get_state( - "_threading", - "admin", - "thread.{}".format(thread_id), - attribute="time_called", - ) + await self.get_state(f"thread.{thread_id}", attribute="time_called") ) if start == "never": duration = 0.0 @@ -465,79 +449,63 @@ async def update_thread_info(self, thread_id, callback, app, type, uuid, silent) if self.AD.real_time and duration >= self.AD.thread_duration_warning_threshold: thread_name = f"thread.{thread_id}" - callback = await self.get_state("_threading", "admin", thread_name) + callback = await self.get_state(thread_name) self.logger.warning( f"Excessive time spent in callback {callback}. " f"Thread entity: '{thread_name}' - now complete after {utils.format_timedelta(duration)} " f"(limit={utils.format_timedelta(self.AD.thread_duration_warning_threshold)})" ) - await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", -1) + await self.add_to_state("sensor.threads_current_busy", -1) - await self.add_to_attr("_threading", "admin", appentity, "totalcallbacks", 1) - await self.add_to_attr("_threading", "admin", appentity, "instancecallbacks", 1) + await self.add_to_attr(appentity, "totalcallbacks", 1) + await self.add_to_attr(appentity, "instancecallbacks", 1) - await self.add_to_attr( - "_threading", - "admin", - "{}_callback.{}".format(type, uuid), - "executed", - 1, - ) - await self.add_to_state("_threading", "admin", "sensor.callbacks_total_executed", 1) + await self.add_to_attr(f"{type}_callback.{uuid}", "executed", 1) + await self.add_to_state("sensor.callbacks_total_executed", 1) self.current_callbacks_executed += 1 else: - await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", 1) + await self.add_to_state("sensor.threads_current_busy", 1) self.current_callbacks_fired += 1 - current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy") - max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy") + current_busy: int = await self.get_state("sensor.threads_current_busy") + max_busy: int = await self.get_state("sensor.threads_max_busy") if current_busy > max_busy: - await self.set_state("_threading", "admin", "sensor.threads_max_busy", state=current_busy) - await self.set_state( - "_threading", - "admin", - "sensor.threads_max_busy_time", - state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz), - ) - - await self.set_state( - "_threading", - "admin", - "sensor.threads_last_action_time", - state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz), - ) + await self.set_state("sensor.threads_max_busy", state=current_busy) + await self.set_state("sensor.threads_max_busy_time", state=now_str) + await self.set_state("sensor.threads_last_action_time", state=now_str) # Update thread info if thread_id == "async": await self.set_state( - "_threading", - "admin", - "thread.{}".format(thread_id), + f"thread.{thread_id}", q=0, state=callback, - time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz), + time_called=now_str, is_alive=True, pinned_apps=[], ) else: await self.set_state( - "_threading", - "admin", - "thread.{}".format(thread_id), - q=self.threads[thread_id]["queue"].qsize(), + f"thread.{thread_id}", + q=self.get_q(thread_id).qsize(), state=callback, - time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz), - is_alive=self.threads[thread_id]["thread"].is_alive(), + time_called=now_str, + is_alive=self.get_thread(thread_id).is_alive(), pinned_apps=self.get_pinned_apps(thread_id), ) - await self.set_state("_threading", "admin", appentity, state=callback) + await self.set_state(appentity, state=callback) # # Pinning # - async def add_thread(self, silent: bool = False, id: int | str | None = None) -> None: + async def add_thread(self, silent: bool = False, id: int | None = None) -> None: + """Create a new worker thread. + + This is where the Thread object is actually created and started. The thread will begin running the `worker` + method as its target, and a new entity will be added to the `thread` domain in the `admin` namespace to track it. + """ if id is None: thread_id = self.thread_count else: @@ -548,18 +516,14 @@ async def add_thread(self, silent: bool = False, id: int | str | None = None) -> thread_entity = f"thread.{thread.name}" if id is None: await self.add_entity( - "admin", thread_entity, "idle", - {"q": 0, "is_alive": True, "time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0))}, + {"q": 0, "is_alive": True, "time_called": "never"}, ) - self.threads[thread.name] = {} - self.threads[thread.name]["queue"] = Queue(maxsize=0) + self.threads[thread.name] = {"queue": Queue(maxsize=0)} thread.start() else: await self.set_state( - "_threading", - "admin", thread_entity, state="idle", is_alive=True, @@ -567,84 +531,100 @@ async def add_thread(self, silent: bool = False, id: int | str | None = None) -> self.threads[thread.name]["thread"] = thread - async def calculate_pin_threads(self): - """Assigns thread numbers to apps that are supposed to be pinned""" - if self.pin_threads == 0: - return + async def assign_app_threads(self): + """Assigns thread numbers to apps that are supposed to be pinned. - thread_pins = [0] * self.pin_threads - for name, obj in self.AD.app_management.objects.items(): - # Looking for apps that already have a thread pin value - if obj.pin_app and (thread := obj.pin_thread) is not None: - if thread >= self.thread_count: - raise ValueError("Pinned thread out of range - check apps.yaml for 'pin_thread' or app code for 'set_pin_thread()'") - # Ignore anything outside the pin range as it will have been set by the user - if thread < self.pin_threads: - thread_pins[thread] += 1 - - # Now we know the numbers, go fill in the gaps - for name, obj in self.AD.app_management.objects.items(): - if obj.pin_app and obj.pin_thread is None: - thread = thread_pins.index(min(thread_pins)) - self.AD.app_management.set_pin_thread(name, thread) - thread_pins[thread] += 1 + Apps are assigned to threads based on how many other apps have been assigned to each thread. This depends on the + `ManagedObject` instances having been already created. - for thread in self.threads: - pinned_apps = self.get_pinned_apps(thread) - await self.set_state( - "_threading", - "admin", - "thread.{}".format(thread), - pinned_apps=pinned_apps, - ) + Updates the state of entities in the `thread` domain in the `admin` namespace. For example `thread.thread-0`. + """ + _, pin_threads = self.resolve_thread_counts() - def app_should_be_pinned(self, app_name: str) -> bool: - # Check apps.yaml first - allow override - cfg = self.AD.app_management.app_config.root[app_name] - assert isinstance(cfg, AppConfig) - return cfg.pin_app or self.pin_apps + if not pin_threads > 0: + return - def validate_pin(self, name: str, pin_thread: int | None) -> None: - """Check to see if the ID for the pin thread is valid. + if not self.AD.app_management.objects: + self.logger.warning('No managed app objects to assign threads to.') - Raises: - PinOutofRange: if the pin_thread is not valid. + # Get the apps that need to have threads assigned + apps_to_assign = [ + name + for name, mo in self.AD.app_management.objects.items() + if mo.pin_app and mo.pin_thread is None + ] # fmt: skip - Returns: - None - """ - if pin_thread is not None and (pin_thread < 0 or pin_thread >= self.thread_count): - self.logger.warning( - "Invalid value for pin_thread (%s) in app: %s - discarding callback", - pin_thread, - name, + counts = self.thread_app_counts() + + # Iterate through the names of all the apps that need threads assigned + for app_name in apps_to_assign: + # Get the name of the thread that has the fewest apps pinned to it + _, min_tid = min((v, k) for k, v in counts.items()) + counts[min_tid] += 1 + self.AD.app_management.set_pin_thread(app_name, min_tid) + + for tid, pin_cnt in counts.items(): + await self.AD.state.set_state( + "_threading", + "admin", + f"thread.thread-{tid}", + pinned_apps=pin_cnt, ) - raise ade.PinOutofRange(pin_thread, self.thread_count) - def get_pinned_apps(self, thread: str): + def thread_app_counts(self) -> dict[int, int]: + """Get a dict that maps thread ID nums to how many apps are pinned to each one.""" + counts = {int(k.split('-')[-1]): 0 for k in self.threads} + for obj in self.AD.app_management.objects.values(): + match obj: + case ManagedObject(type="app", pin_thread=int(tid)): + try: + counts[tid] += 1 + except KeyError: + # raise ade.PinThreadNotFound(pin_thread=tid) from exc + continue + return counts + + def get_pinned_apps(self, thread: str | int) -> list[str]: """Gets the names of apps that are pinned to a particular thread""" - id = int(thread.split("-")[1]) - return [app_name for app_name, obj in self.AD.app_management.objects.items() if obj.pin_thread == id] + match thread: + case str(): + thread_id = int(thread.split("-")[1]) + case int(thread_id): + pass + case _: + raise ValueError(f"Invalid thread: {thread}") + return [app_name for app_name, obj in self.AD.app_management.objects.items() if obj.pin_thread == thread_id] + + def determine_thread( + self, + name: str, + cb_pin: bool | None, + cb_pin_thread: int | None + ) -> tuple[bool, int | None]: + """Determine pin settings for a callback using inputs from the callback registration with settings from the app + management as defaults. - def determine_thread(self, name: str, pin: bool | None, pin_thread: int | None) -> tuple[bool, int | None]: - """Determine whether the app should be pinned to a thread and which one. + If the callback thread is not specified, then which thread it gets called in should be calculated at call time + to get good results from the different load distribution strategies. The length of the various thread queues can + be wildly different at call time from when the callback was first registered. - Applies defaults from app management + Dev Note: + This method is a good place to handle things related to thread/pinning at callback registration. Returns: - A tuple of (pin, pin_thread) where pin is ``True`` if the app should be pinned and pin_thread is the - thread ID number + tuple[bool, int | None]: Whether to pin the callback and if so, what thread it should be pinned to. """ - - if pin_thread is None: - pin = self.AD.app_management.objects[name].pin_app if pin is None else pin - pin_thread = self.AD.app_management.objects[name].pin_thread + # Manually specifying a pin_thread implies pin_app=True + if cb_pin_thread is not None: + # Validity check for the pin settings specified at the callback registration + if cb_pin_thread < 0 or cb_pin_thread > self.AD.threading.thread_count: + raise ade.PinOutofRange(cb_pin_thread, self.AD.threading.thread_count) + pin_callback = True else: - assert isinstance(pin_thread, int) - pin = True + pin_callback = cb_pin if cb_pin is not None else self.AD.app_management.get_app_pin(name) - self.validate_pin(name, pin_thread) - return pin, pin_thread + callback_thread = cb_pin_thread if cb_pin_thread is not None else self.AD.app_management.get_pin_thread(name) + return pin_callback, callback_thread # # Constraints @@ -853,8 +833,11 @@ async def check_and_dispatch_state( return executed async def dispatch_worker(self, name: str, args: dict[str, Any]): + """Apply any constraints and if they pass, dispatch the callback to the worker thread by using the ``select_q`` + method.""" + # Give user the option to discard events during the app initialize methods to prevent race conditions - state = await self.AD.state.get_state("_threading", "admin", f"app.{name}") + state = await self.get_state(f"app.{name}") if state == "initializing" and self.AD.config.discard_init_events: self.logger.info("Incoming event while initializing - discarding") return @@ -864,19 +847,20 @@ async def dispatch_worker(self, name: str, args: dict[str, Any]): # Argument Constraints # (plugins have no args so skip if necessary) # - if app_cfg := self.AD.app_management.app_config.root.get(name): - for arg, val in app_cfg.args.items(): - constrained = await self.check_constraint( - arg, - val, - self.AD.app_management.objects[name].object, - ) - if not constrained: + match self.AD.app_management.app_config.root.get(name): + case AppConfig(disable=False) as app_cfg: + for arg, val in app_cfg.args.items(): + constrained = await self.check_constraint( + arg, + val, + self.AD.app_management.objects[name].object, + ) + if not constrained: + unconstrained = False + if not await self.check_time_constraint(app_cfg.args, name): + unconstrained = False + elif not await self.check_days_constraint(app_cfg.args, name): unconstrained = False - if not await self.check_time_constraint(self.AD.app_management.app_config[name].args, name): - unconstrained = False - elif not await self.check_days_constraint(self.AD.app_management.app_config[name].args, name): - unconstrained = False # # Callback level constraints @@ -910,14 +894,8 @@ async def dispatch_worker(self, name: str, args: dict[str, Any]): if "__silent" in args["kwargs"] and args["kwargs"]["__silent"] is True: pass else: - await self.add_to_state("_threading", "admin", "sensor.callbacks_total_fired", 1) - await self.add_to_attr( - "_threading", - "admin", - "{}_callback.{}".format(myargs["type"], myargs["id"]), - "fired", - 1, - ) + await self.add_to_state("sensor.callbacks_total_fired", 1) + await self.add_to_attr(f"{myargs['type']}_callback.{myargs['id']}", "fired", 1) # # And Q # diff --git a/appdaemon/utils.py b/appdaemon/utils.py index b912c3bd1..a05a17128 100644 --- a/appdaemon/utils.py +++ b/appdaemon/utils.py @@ -955,6 +955,7 @@ def get_object_size(obj, seen=None): def write_config_file(file: Path, **kwargs): """Writes a single YAML or TOML file.""" file = Path(file) if not isinstance(file, Path) else file + file.parent.mkdir(parents=True, exist_ok=True) match file.suffix: case ".yaml": return write_yaml_config(file, **kwargs) diff --git a/docs/HISTORY.md b/docs/HISTORY.md index b11b8b599..703094694 100644 --- a/docs/HISTORY.md +++ b/docs/HISTORY.md @@ -17,10 +17,12 @@ - `PyPI Upload` runs for tagged version commits - Runs the `functional` test group before uploading - Stale issues no longer close after 15 days +- Broke up the doc page on the AppDaemon internals into multiple pages and expanded content. **Fixes** - Remove outdated references to `app_init_delay` +- Reworked how the `pin_app` and `pin_thread` settings work their way through the internals, and added corresponding tests. **Breaking Changes** diff --git a/docs/INTERNALS.rst b/docs/INTERNALS.rst deleted file mode 100644 index cce7602bd..000000000 --- a/docs/INTERNALS.rst +++ /dev/null @@ -1,229 +0,0 @@ -Internal Documentation -====================== - -These notes are intended to assist anyone that wants to understand AppDaemon's internals better. - -Structure ---------- -The Python project follows the conventional `PEP 621 `_, using a ``pyproject.toml`` file to define its metadata. -The repository is divided into various folder: - -``./appdaemon`` - source code of the Python package -``./docs`` - source code from which this documentation is built -``./tests`` - tests written with ``pytest`` -``./conf`` - configuration directory, containing some sample files - -AppDaemon is organized into several subsystems managed by the central :class:`~appdaemon.appdaemon.AppDaemon` class. Each subsystem handles a specific aspect of functionality: - -Core Subsystems -~~~~~~~~~~~~~~~ - -- **App Management** (:class:`~appdaemon.app_management.AppManagement`) - Provides the mechanics to manage the lifecycle of user apps, which includes tracking the user files for changes and reloading apps as needed. - -- **Scheduler** (:class:`~appdaemon.scheduler.Scheduler`) - Provides time-based scheduling, sunrise/sunset calculations, and time travel functionality for testing. - -- **State** (:class:`~appdaemon.state.State`) - Tracks and manages entity states across different namespaces, provides state persistence between AppDaemon runs, and handles callbacks based on state changes. - -- **Events** (:class:`~appdaemon.events.Events`) - Provides the mechanics to fire events, hand events off to relevant plugins (such as :doc:`HASS` for Home Assistant), and process event callbacks - -- **Callbacks** (:class:`~appdaemon.callbacks.Callbacks`) - Container for storing and managing all registered callbacks from apps. - -- **Services** (:class:`~appdaemon.services.Services`) - Provides the mechanics to register/deregister services and allows apps to call them with a ``/`` string. - -- **Plugin Management** (:class:`~appdaemon.plugin_management.PluginManagement`) - Manages external plugins that provide connectivity to external systems (Home Assistant, MQTT, etc.). - -Threading & Async -~~~~~~~~~~~~~~~~~ - -- **Threading** (:class:`~appdaemon.threads.Threading`) - Manages worker threads for executing app callbacks and handles thread pinning. - -- **Thread Async** (:class:`~appdaemon.thread_async.ThreadAsync`) - Bridges between threaded callback execution and the main async event loop using queues. - -- **Utility Loop** (:class:`~appdaemon.utility_loop.Utility`) - Runs periodic maintenance tasks like file change detection, thread monitoring, and performance diagnostics. - -Additional Subsystems -~~~~~~~~~~~~~~~~~~~~~ - -- **Sequences** (:class:`~appdaemon.sequences.Sequences`) - Manages configurable automation sequences with steps like service calls, waits, and conditionals. - -- **Futures** (:class:`~appdaemon.futures.Futures`) - Handles asynchronous operations and provides mechanisms for cancelling long-running tasks. - - -Startup -------- - -The top-level entrypoint is :py:func:`appdaemon.__main__.main`, which uses :py:mod:`argparse` to parse the launch -arguments. - -The :py:class:`~appdaemon.__main__.ADMain` class primarily provides a :py:ref:`context manager` that -can be used with a :py:keyword:`with statement`. This contains an :py:class:`~contextlib.ExitStack` instance that -gets closed when its context is exited. Several methods are wrapped with the :py:func:`~contextlib.contextmanager` -decorator and are added to the stack when AppDaemon is run. This guarantees that their respective cleanups are allowed -to run in the correct order, which is the reverse order that the contexts were entered. - -Contexts -~~~~~~~~ - -The various context managers that get entered as AppDaemon starts include the logic for following steps. Some of these -are entered as part of the :py:class:`~appdaemon.__main__.ADMain` context, and some are entered in the -:py:class:`~appdaemon.__main__.ADMain.run` method. All of them are exited in reverse order as the -:py:class:`~contextlib.ExitStack` is closed, which happens when the :py:class:`~appdaemon.__main__.ADMain` context -exits. - - * Backstop logic to catch any exceptions and log them more prettily. - * Creates a PID file for the duration of the context, if necessary/applicapable. - * Creates a new async event loop and cleans it up afterwards. - * Attaches/removes signal handlers to catch termination signals and stop gracefully - * Startup/shutdown text in the logs about the versions and time it took to shut down - * Creates the :py:class:`~appdaemon.appdaemon.AppDaemon` and :py:class:`~appdaemon.http.HTTP` objects. - * Sets/unsets the default exception handler for the event loop to prettify any unhandled exceptions. The difference with the previous exception handler is that this one has access to the :py:class:`~appdaemon.appdaemon.AppDaemon` object - -.. literalinclude:: ../appdaemon/__main__.py - :language: python - :lineno-match: - :pyobject: main - :caption: Top-level entrypoint in __main__.py - :emphasize-lines: 16-17 - -Running -~~~~~~~ - -ADMain runs AppDaemon by calling its :py:meth:`~appdaemon.appdaemon.AppDaemon.start` method followed by the :py:meth:`~asyncio.loop.run_forever` method of the event loop. - -.. literalinclude:: ../appdaemon/__main__.py - :language: python - :lineno-match: - :pyobject: ADMain.run - :caption: ADMain.run() method - :emphasize-lines: 10 - -Shutdown --------- - -Shutdown is initiated by the process running AppDaemon receiving either a :py:obj:`~signal.SIGINT` or :py:obj:`~signal.SIGTERM` signal. - -.. literalinclude:: ../appdaemon/__main__.py - :language: python - :lineno-match: - :pyobject: ADMain.handle_sig - :caption: ADMain.handle_sig() method - :emphasize-lines: 18-20 - -The :py:meth:`~appdaemon.appdaemon.AppDaemon.stop` method is called, which in turn calls the stop methods of the various -subsystems. It doesn't return until all the tasks that existed at the time of the call have finished. This usually -happens almost instantly, but this has a 3s timeout just to be safe. - -.. literalinclude:: ../appdaemon/__main__.py - :language: python - :lineno-match: - :pyobject: ADMain.stop - :caption: ADMain.stop() method - :emphasize-lines: 4 - -This will stop the event loop when all the tasks have finished, which breaks the :py:meth:`~asyncio.loop.run_forever` -call in :py:meth:`~appdaemon.__main__.ADMain.run` and causes the :py:class:`~contextlib.ExitStack` to close. - -The :py:meth:`~appdaemon.appdaemon.AppDaemon.stop` method of the :py:class:`~appdaemon.appdaemon.AppDaemon` object is responsible for stopping all the subsystems in the correct order. It first sets a global stop event that all the subsystems check at the top of their respective loops. It then calls the stop methods of each subsystem in turn, waiting for each to finish before proceeding to the next one. Finally, it cancels any remaining tasks and waits for them to finish, with a timeout of 3 seconds. - -Stop Event -~~~~~~~~~~ - -AppDaemon shutdown is globally indicated by the stop :py:class:`~asyncio.Event` getting set in the top-level :py:class:`~appdaemon.appdaemon.AppDaemon` object. All the subsystems check the status of this event using the :py:meth:`~asyncio.Event.is_set` method at the top of their respective loops, and exit if it is set. The general pattern is like this: - -.. code-block:: python - - import asyncio - import contextlib - - async def loop(self): - while not self.AD.stop_event.is_set(): - ... # Do stuff - with contextlib.suppress(asyncio.TimeoutError): - await asyncio.wait_for(self.AD.stop_event.wait(), timeout=1) - -Rather than using :py:func:`~asyncio.sleep` to wait between iterations, they use :py:func:`~asyncio.wait_for` to wait for the stop event with a timeout. The timeout is suppressed with :py:func:`~contextlib.suppress` so that it doesn't raise an exception. Whenever -the event is set, :py:meth:`~asyncio.Event.wait` returns immediately, which causes :py:func:`~asyncio.wait_for` to return immediately rather than waiting for the timeout. - -Reference ---------- - -.. autoclass:: appdaemon.__main__.ADMain - :members: - :no-index-entry: - -.. autoclass:: appdaemon.appdaemon.AppDaemon - :members: - :no-index-entry: - -.. automodule:: appdaemon.admin - :members: - -.. .. automodule:: appdaemon.admin_loop -.. :members: - -.. automodule:: appdaemon.app_management - :members: - -.. automodule:: appdaemon.callbacks - :members: - -.. .. automodule:: appdaemon.dashboard -.. :members: - -.. autoclass:: appdaemon.dependency_manager.DependencyManager - :members: - -.. automodule:: appdaemon.events - :members: - -.. automodule:: appdaemon.exceptions - :members: exception_context - -.. automodule:: appdaemon.futures - :members: - -.. autoclass:: appdaemon.http.HTTP - :members: - -.. automodule:: appdaemon.logging - :members: - -.. automodule:: appdaemon.plugin_management - :members: - -.. autoclass:: appdaemon.scheduler.Scheduler - :members: - -.. .. automodule:: appdaemon.services -.. :members: - -.. autoclass:: appdaemon.services.ServiceCallback - :private-members: __call__ - -.. autoclass:: appdaemon.services.Services - -.. .. automodule:: appdaemon.sequences -.. :members: - -.. autoclass:: appdaemon.state.State - :members: - -.. .. automodule:: appdaemon.stream -.. :members: - -.. autoclass:: appdaemon.thread_async.ThreadAsync - :members: - -.. .. automodule:: appdaemon.threads -.. :members: - -.. autoclass:: appdaemon.utility_loop.Utility - :members: - :private-members: _init_loop, _loop_iteration_context - -.. .. automodule:: appdaemon.utils -.. :members: diff --git a/docs/index.rst b/docs/index.rst index 60ab9d71f..44c43c3da 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -74,7 +74,7 @@ Contents: DASHBOARD_CREATION WIDGETDEV DEV - INTERNALS + internals/index TESTING REST_STREAM_API UPGRADE_FROM_3.x diff --git a/docs/internals/index.rst b/docs/internals/index.rst new file mode 100644 index 000000000..a2583dabd --- /dev/null +++ b/docs/internals/index.rst @@ -0,0 +1,146 @@ +Internals +========= + +.. admonition:: Subject to Change + :class: warning + + These are just notes over how the internals of AppDaemon work, but the implementation details are subject to change. + +.. toctree:: + :maxdepth: 1 + + threading + scheduler + lifecycle + +These notes are intended to assist anyone that wants to understand AppDaemon's internals better. + +Structure +--------- +The Python project follows the conventional `PEP 621 `_, using a ``pyproject.toml`` file to define its metadata. +The repository is divided into various folder: + +``./appdaemon`` + source code of the Python package +``./docs`` + source code from which this documentation is built +``./tests`` + tests written with ``pytest`` +``./conf`` + configuration directory, containing some sample files + +AppDaemon is organized into several subsystems managed by the central :class:`~appdaemon.appdaemon.AppDaemon` class. Each subsystem handles a specific aspect of functionality: + +Core Subsystems +~~~~~~~~~~~~~~~ + +- **App Management** (:class:`~appdaemon.app_management.AppManagement`) - Provides the mechanics to manage the lifecycle of user apps, which includes tracking the user files for changes and reloading apps as needed. + +- **Scheduler** (:class:`~appdaemon.scheduler.Scheduler`) - Provides time-based scheduling, sunrise/sunset calculations, and time travel functionality for testing. + +- **State** (:class:`~appdaemon.state.State`) - Tracks and manages entity states across different namespaces, provides state persistence between AppDaemon runs, and handles callbacks based on state changes. + +- **Events** (:class:`~appdaemon.events.Events`) - Provides the mechanics to fire events, hand events off to relevant plugins (such as :doc:`HASS<../HASS_API_REFERENCE>` for Home Assistant), and process event callbacks + +- **Callbacks** (:class:`~appdaemon.callbacks.Callbacks`) - Container for storing and managing all registered callbacks from apps. + +- **Services** (:class:`~appdaemon.services.Services`) - Provides the mechanics to register/deregister services and allows apps to call them with a ``/`` string. + +- **Plugin Management** (:class:`~appdaemon.plugin_management.PluginManagement`) - Manages external plugins that provide connectivity to external systems (Home Assistant, MQTT, etc.). + +Threading & Async +~~~~~~~~~~~~~~~~~ + +- **Threading** (:class:`~appdaemon.threads.Threading`) - Manages worker threads for executing app callbacks and handles thread pinning. + +- **Thread Async** (:class:`~appdaemon.thread_async.ThreadAsync`) - Bridges between threaded callback execution and the main async event loop using queues. + +- **Utility Loop** (:class:`~appdaemon.utility_loop.Utility`) - Runs periodic maintenance tasks like file change detection, thread monitoring, and performance diagnostics. + +Additional Subsystems +~~~~~~~~~~~~~~~~~~~~~ + +- **Sequences** (:class:`~appdaemon.sequences.Sequences`) - Manages configurable automation sequences with steps like service calls, waits, and conditionals. + +- **Futures** (:class:`~appdaemon.futures.Futures`) - Handles asynchronous operations and provides mechanisms for cancelling long-running tasks. + + +Reference +--------- + +.. .. autoclass:: appdaemon.__main__.ADMain +.. :members: +.. :no-index-entry: + +.. autoclass:: appdaemon.appdaemon.AppDaemon + :members: + :no-index-entry: + +.. automodule:: appdaemon.admin + :members: + +.. .. automodule:: appdaemon.admin_loop +.. :members: + +.. automodule:: appdaemon.app_management + :members: + +.. automodule:: appdaemon.callbacks + :members: + +.. .. automodule:: appdaemon.dashboard +.. :members: + +.. autoclass:: appdaemon.dependency_manager.DependencyManager + :members: + +.. automodule:: appdaemon.events + :members: + +.. automodule:: appdaemon.exceptions + :members: exception_context + +.. automodule:: appdaemon.futures + :members: + +.. autoclass:: appdaemon.http.HTTP + :members: + +.. automodule:: appdaemon.logging + :members: + +.. automodule:: appdaemon.plugin_management + :members: + +.. .. autoclass:: appdaemon.scheduler.Scheduler +.. :members: + +.. .. automodule:: appdaemon.services +.. :members: + +.. autoclass:: appdaemon.services.ServiceCallback + :private-members: __call__ + +.. autoclass:: appdaemon.services.Services + +.. .. automodule:: appdaemon.sequences +.. :members: + +.. autoclass:: appdaemon.state.State + :members: + +.. .. automodule:: appdaemon.stream +.. :members: + +.. autoclass:: appdaemon.thread_async.ThreadAsync + :members: + +.. .. automodule:: appdaemon.threads +.. :members: + +.. autoclass:: appdaemon.utility_loop.Utility + :members: + :private-members: _init_loop, _loop_iteration_context + +.. .. automodule:: appdaemon.utils +.. :members: diff --git a/docs/internals/lifecycle.md b/docs/internals/lifecycle.md new file mode 100644 index 000000000..9435d6000 --- /dev/null +++ b/docs/internals/lifecycle.md @@ -0,0 +1,95 @@ +# Lifecycle + +"Lifecycle" refers to AppDaemon itself starting, running, and stopping. + +## Contexts + +AppDaemon makes heavy use of {ref}`context managers ` to ensure safe and efficient startup and shutdown. Each context guarantees certain things happen as it enters and exits. The logic for the AppDaemon lifecycle consists of the following contexts, all of which get added to an {py:class}`~contextlib.ExitStack`. + +* Backstop logic to catch any exceptions and log them more prettily. +* Creates a PID file for the duration of the context, if necessary/applicable. +* Creates a new async event loop and cleans it up afterwards. +* Attaches/removes signal handlers to catch termination signals and stop gracefully +* Startup/shutdown text in the logs about the versions and time it took to shut down +* Creates the `AppDaemon` and `HTTP` objects. +* Sets/unsets the default exception handler for the event loop to prettify any unhandled exceptions. The difference with the previous exception handler is that this one has access to the `AppDaemon` object + +Some of these are entered as part of the `ADMain` context, and some are entered in the `ADMain.run` method. All of them are exited in reverse order as the {py:class}`~contextlib.ExitStack` is closed at shutdown. + +## Startup + +The top-level entrypoint is {py:func}`appdaemon.__main__.main`, which uses {py:mod}`argparse` to parse the launch arguments. + +The {py:class}`~appdaemon.__main__.ADMain` class primarily provides a context manager that allows it to be used with a with statement to smoothly control AppDaemon's lifecycle. Internally it uses an instance of {py:class}`~contextlib.ExitStack`. Several methods are wrapped with the {py:func}`~contextlib.contextmanager` decorator to create context managers that get entered as part of AppDaemon starting. + +```{literalinclude} ../../appdaemon/__main__.py +:language: python +:lineno-match: +:pyobject: main +:caption: main() function +:emphasize-lines: 16-17 +``` + +## Running + +ADMain runs AppDaemon by calling the {py:meth}`~appdaemon.appdaemon.AppDaemon.start` method on the top-level AppDaemon object, followed by the {py:meth}`~asyncio.loop.run_forever` method of the event loop. + +```{literalinclude} ../../appdaemon/__main__.py +:language: python +:lineno-match: +:pyobject: ADMain.run +:caption: ADMain.run() method +:emphasize-lines: 10 +``` + +## Shutdown + +Shutdown is initiated by the process running AppDaemon receiving either a {py:obj}`~signal.SIGINT` or {py:obj}`~signal.SIGTERM` signal. + +```{literalinclude} ../../appdaemon/__main__.py +:language: python +:lineno-match: +:pyobject: ADMain.handle_sig +:caption: ADMain.handle_sig() method +:emphasize-lines: 21-23 +``` + +The {py:meth}`~appdaemon.appdaemon.AppDaemon.stop` method is called, which in turn calls the stop methods of the various subsystems. It doesn't return until all the tasks that existed at the time of the call have finished. This usually happens almost instantly, but this has a 3s timeout just to be safe. + +```{literalinclude} ../../appdaemon/__main__.py +:language: python +:lineno-match: +:pyobject: ADMain.stop +:caption: ADMain.stop() method +:emphasize-lines: 4 +``` + +This will stop the event loop when all the tasks have finished, which breaks the {py:meth}`~asyncio.loop.run_forever` call in {py:meth}`~appdaemon.__main__.ADMain.run` and causes the {py:class}`~contextlib.ExitStack` to close. + +The {py:meth}`~appdaemon.appdaemon.AppDaemon.stop` method of the {py:class}`~appdaemon.appdaemon.AppDaemon` object is responsible for stopping all the subsystems in the correct order. It first sets a global stop event that all the subsystems check at the top of their respective loops. It then calls the stop methods of each subsystem in turn, waiting for each to finish before proceeding to the next one. Finally, it cancels any remaining tasks and waits for them to finish, with a timeout of 3 seconds. + +### Stop Event + +AppDaemon shutdown is globally indicated by the stop {py:class}`~asyncio.Event` getting set in the top-level {py:class}`~appdaemon.appdaemon.AppDaemon` object. All the subsystems check the status of this event using the {py:meth}`~asyncio.Event.is_set` method at the top of their respective loops, and exit if it is set. The general pattern is like this: + +```python +import asyncio +import contextlib + +async def loop(self): + while not self.AD.stop_event.is_set(): + ... # Do stuff + with contextlib.suppress(asyncio.TimeoutError): + await asyncio.wait_for(self.AD.stop_event.wait(), timeout=1) +``` + +Rather than using {py:func}`~asyncio.sleep` to wait between iterations, they use {py:func}`~asyncio.wait_for` to wait for the stop event with a timeout. The timeout is suppressed with {py:func}`~contextlib.suppress` so that it doesn't raise an exception. Whenever the event is set, {py:meth}`~asyncio.Event.wait` returns immediately, which causes {py:func}`~asyncio.wait_for` to return immediately rather than waiting for the timeout. + +## Reference + +```{eval-rst} +.. autoclass:: appdaemon.__main__.ADMain + :members: + :undoc-members: + :show-inheritance: +``` diff --git a/docs/internals/scheduler.md b/docs/internals/scheduler.md new file mode 100644 index 000000000..47f56fc60 --- /dev/null +++ b/docs/internals/scheduler.md @@ -0,0 +1,31 @@ +# Scheduler + +Anything that depends on a time is handled by the Scheduler. The general idea is that the scheduler will wait do an async wait until the next scheduled event, and if anything happens + +## Scheduler Entry Structure + +Each entry in the schedule (`self.AD.sched.schedule`) contains the following fields: + +| Field | Type | Description | +|---------------|------|-------------| +| `name` | `str` | The name of the scheduled callback (usually the method name) | +| `id` | `str` | Unique identifier for this scheduled entry | +| `callback` | `functools.partial` | The callback function to be executed, wrapped with bound method | +| `timestamp` | `datetime.datetime` | The next scheduled execution time (timezone-aware) | +| `interval` | `datetime.timedelta` | Time interval between repeated executions (0 for one-time callbacks) | +| `basetime` | `datetime.datetime` | The base time used for calculating subsequent executions | +| `repeat` | `bool` | Whether this callback should repeat after execution | +| `offset` | `datetime.timedelta` | Time offset applied to the base time for scheduling | +| `random_start`| `datetime.timedelta \| None` | Start of random time window (if using random scheduling) | +| `random_end` | `datetime.timedelta \| None` | End of random time window (if using random scheduling) | +| `type` | `str` | Type of schedule (e.g., 'next_rising', 'interval', 'cron') | +| `pin_app` | `bool` | Whether to pin this callback to a specific app thread | +| `pin_thread` | `int` | The thread number to pin this callback to (0 for main thread) | +| `kwargs` | `dict` | Additional keyword arguments to pass to the callback | + +## Reference + +```{eval-rst} +.. autoclass:: appdaemon.scheduler.Scheduler + :members: +``` diff --git a/docs/internals/threading.md b/docs/internals/threading.md new file mode 100644 index 000000000..fb4c9b51b --- /dev/null +++ b/docs/internals/threading.md @@ -0,0 +1,90 @@ +# Threading + +## Configuration + +| Property | Default | Description | +|-----------------|-------------|--------------| +| `pin_apps` | True | Whether to pin apps to threads | +| `total_threads` | None | The total number of threads to create, defaults to the number of active apps. | +| `pin_threads` | None | The number (out of the total) of threads to reserve for pinning; only used by `select_q` when `pin_apps=False`. Defaults to all threads | + +## Thread Creation + +- Threads are created during startup in {py:meth}`~appdaemon.threads.Threading.create_initial_threads`. +- User-configured values are resolved into actual numbers using {py:meth}`~appdaemon.threads.Threading.resolve_thread_counts`. +- Threads all have an ID number (0 - <`appdaemon.total_threads`>) +- Some of them are reserved for pinned callbacks (0 - <`appdaemon.pin_threads`>) +- The remaining are free for unpinned callbacks +- AppDaemon will assign a `pin_thread` to an app when it's created, if `pin_app=True` and `pin_thread=None`. + - Must be configured in file or have global setting to `pin_app=False` to not get an assigned thread. +- Pin behavior and thread assignments are stored in `ManagedObject` attributes + +## Thread Pinning + +These settings get passed around through the AppDaemon internals and ultimately determine which thread handles a callback. + +| Setting | Description | +|---------|-------------| +| `pin_app` | Whether the app should be pinned to a single thread. | +| `pin_thread` | The thread the app is pinned to. This determines which thread the callback gets run in | + +| Level | Description | +|-------|--| +| Global | The global settings are set in the `appdaemon.yaml` file, and each app defaults to the global setting when it's created. | +| Per App | The pinning behavior for apps can be changed at runtime. The current settings are stored in the corresponding `ManagedObject` | +| Per Callback | The pinning behavior for a specific callback can be overridden at registration. | + +- App gets created by the app manager + - App object and settings are stored in the `ManagedObject` + - `pin_app` is set from the config file, defaulting to global setting + - `pin_thread` could be set from the config file, but could be assigned by AD at that point +- App runs and maybe the `pin_app` or `pin_thread` gets changed at some point. + - Any changes affect the settings in `ManagedObject` +- Callback could also override either `pin_app` or `pin_thread` + - `pin_app` should be calculated at callback registration + - If the *callback* specifies a `pin_thread` that implies `pin_app=True`, otherwise the `pin_thread` will get ignored in `select_q` + - If `pin_app` ends up being True, but there's no `pin_thread` assigned, one needs to be assigned at call time - will be 0. + - If `pin_app` is False, `pin_thread` will get ignored at call time + - Callback lives in internal dicts + - `pin_app` is never `None` + - `pin_thread` could be `None` - load distribution should be calculated at call time. +- `pin_app` and `pin_thread` make their way through the internals, eventually to `Threading.select_q` + - `select_q` hinges on `pin_app` being True/False. + - `pin_app` is never `None` + - If `pin_app` is not True, `pin_thread` is ignored, and the thread is automatically chosen according to the load distribution setting + +### Config Precedence + +1) **Callback-level pin**: Top-level override +2) **App-level pin**: Acts as the default for each callback as it's created. +3) **Config-level pin (default)**: Acts as the default when the app manager creates a new `ManagedObject` + +### Default Case + +By default, a new thread will be created for each active app, and each app will get pinned to its own thread. + +- Default (initial case) + - Global: `pin_apps=True` + - App: `pin_app=None` +- Each app has a `ManagedObject` instance that holds the `pin_app` and `pin_thread` settings for that app. +- When an app is created `pin_app=True` gets set (from the global default) and `pin_thread` gets assigned. +- The `pin_app` and `pin_thread` settings for an app can be altered in the `ManagedObject` during runtime. +- The `pin_app` and `pin_thread` settings can be provided during callback registration to override the settings for that callback only. + +## Callback Registration + +The {py:meth}`~appdaemon.threads.Threading.determine_thread` method is used during callback registration to resolve `pin_app` for that callback. + +| Callback Type | Registration | Processing | +|---------------|--------|---------------| +| Schedule | {py:meth}`~appdaemon.scheduler.Scheduler.insert_schedule` | {py:meth}`~appdaemon.scheduler.Scheduler.exec_schedule` | +| Event | {py:meth}`~appdaemon.events.Events.add_event_callback` | {py:meth}`~appdaemon.events.Events.process_event_callbacks` | +| State | {py:meth}`~appdaemon.state.State.add_state_callback` | {py:meth}`~appdaemon.state.State.process_state_callbacks` | +| Log | {py:meth}`~appdaemon.logging.Logging.add_log_callback` | {py:meth}`~appdaemon.logging.Logging.process_log_callbacks` | + +## Reference + +```{eval-rst} +.. autoclass:: appdaemon.threads.Threading + :members: determine_thread, add_thread, dispatch_worker, select_q, dispatch_worker +``` diff --git a/tests/conf/apps/apps.yaml b/tests/conf/apps/apps.yaml deleted file mode 100644 index 7156f6155..000000000 --- a/tests/conf/apps/apps.yaml +++ /dev/null @@ -1,34 +0,0 @@ -state_test_app: - module: state_test_app - class: StateTestApp - delay: 0.5 - -scheduler_test_app: - module: scheduler_test_app - class: SchedulerTestApp - -test_run_in: - module: scheduler_test_app - class: TestSchedulerRunIn - -test_event_app: - module: event_test_app - class: TestEventCallback - event: "test_event" - -test_immediate_state: - module: state_test_app - class: TestImmediate - -test_run_daily: - module: scheduler_test_app - class: TestSchedulerRunDaily - time: "00:00:05" - -basic_namespace_app: - module: namespace_app - class: BasicNamespaceTester - -hybrid_namespace_app: - module: namespace_app - class: HybridWritebackTester diff --git a/tests/conf/apps/hello_world/hello.py b/tests/conf/apps/hello.py similarity index 100% rename from tests/conf/apps/hello_world/hello.py rename to tests/conf/apps/hello.py diff --git a/tests/conf/apps/hello_world/apps.yaml b/tests/conf/apps/hello_world/apps.yaml deleted file mode 100644 index 05f50fada..000000000 --- a/tests/conf/apps/hello_world/apps.yaml +++ /dev/null @@ -1,8 +0,0 @@ -hello_world: - module: hello - class: HelloWorld - -another_app: - module: hello - class: HelloWorld - my_kwarg: "asdf" diff --git a/tests/conf/apps/namespace_app.py b/tests/conf/apps/namespace_app.py index 7f9136147..018dfb35f 100644 --- a/tests/conf/apps/namespace_app.py +++ b/tests/conf/apps/namespace_app.py @@ -1,3 +1,4 @@ +import asyncio import functools from datetime import timedelta from typing import Any @@ -6,30 +7,26 @@ class BasicNamespaceTester(ADAPI): - handle: str | None + changed_event: asyncio.Event def initialize(self) -> None: + self.set_log_level("DEBUG") + self.changed_event = asyncio.Event() self.set_namespace(self.custom_test_namespace) - self.logger.info('Current namespaces: %s', sorted(self.current_namespaces)) - self.run_in(self.show_entities, 0) + self.logger.info("Initial namespaces: %s", sorted(self.current_namespaces)) exists = self.test_entity.exists() - self.logger.info(f"Entity exists: {exists}") + self.logger.info("Entity exists: %s", exists) if not exists: self.add_entity("sensor.test", state="initial", attributes={"friendly_name": "Test Sensor"}) - self.run_in(self.show_entities, 0) - non_existence = "sensor.other_entity" - self.logger.info("Setting %s in default namespace", non_existence) + self.logger.info("Setting %s in namespace %s", non_existence, self.namespace) self.set_state(non_existence, state="other", attributes={"friendly_name": "Other Entity"}) - self.run_in(self.start_test, self.start_delay) self.test_entity.listen_state(self.handle_state) - self.log(f"Initialized {self.name}") - - self.set_namespace('default') - self.remove_namespace(self.custom_test_namespace) + self.run_in(self.start_test, self.start_delay) + self.logger.info("Initialized %s", self.name) @property def current_namespaces(self) -> set[str]: @@ -50,20 +47,21 @@ def test_entity(self) -> Entity: async def show_entities(self, *args, **kwargs) -> None: ns = self.AD.state.state.get(self.custom_test_namespace, {}) entities = sorted(ns.keys()) - self.log('Test entities: %s', entities) + self.logger.info("Test entities: %s", entities) def start_test(self, *args, **kwargs: Any) -> None: match kwargs: case {"__thread_id": str(thread_id)}: - self.log(f"Change called from thread {thread_id}") + self.logger.info("Change called from thread %s", thread_id) self.test_entity.set_state("changed") - def handle_state(self, entity: str, attribute: str, old: Any, new: Any, **kwargs: Any) -> None: - self.log(f"State changed for {entity}: {attribute} = {old} -> {new}") - self.log(f"Test val: {self.args.get('test_val')}") + async def handle_state(self, entity: str, attribute: str, old: Any, new: Any, **kwargs: Any) -> None: + self.logger.info("State changed for %s: %s = %s -> %s", entity, attribute, old, new) + self.logger.info("Test val: %s", self.args.get("test_val")) full_state = self.test_entity.get_state('all') self.log(f"Full state: {full_state}") + self.changed_event.set() def terminate(self) -> None: self.set_namespace('default') @@ -72,11 +70,11 @@ def terminate(self) -> None: class HybridWritebackTester(ADAPI): def initialize(self) -> None: - self.AD.logging.get_child("_state") self.set_namespace(self.custom_test_namespace, writeback="hybrid", persist=True) - self.logger.info("Initialized %s in namespace '%s'", self.name, self.custom_test_namespace) + # self.logger.info("Initialized %s in namespace '%s'", self.name, self.custom_test_namespace) self.run_in(self.rapid_changes, self.start_delay) + self.logger.info("Initialized %s", self.name) @property def custom_test_namespace(self) -> str: @@ -91,8 +89,10 @@ def test_n(self) -> int: return self.args.get("test_n", 10) async def rapid_changes(self, *args, **kwargs) -> None: + entity_id = "sensor.hybrid_test" + for i in range(self.test_n): - func = functools.partial(self.set_state, "sensor.hybrid_test", state=f"change_{i}") + func = functools.partial(self.set_state, entity_id, state=f"change_{i}") delay = i * 0.05 self.AD.loop.call_later(delay, func) diff --git a/tests/conf/apps/pin_thread_app.py b/tests/conf/apps/pin_thread_app.py new file mode 100644 index 000000000..5599cea28 --- /dev/null +++ b/tests/conf/apps/pin_thread_app.py @@ -0,0 +1,17 @@ +from appdaemon.adapi import ADAPI + + +class PinThreadTester(ADAPI): + def initialize(self): + self.set_log_level("DEBUG") + self.set_namespace("test") + self.run_in( + self.example_callback, + delay=self.args.get("register_delay", 0.2), + pin=self.args.get("cb_pin_app"), + pin_thread=self.args.get("cb_pin_thread") + ) + self.logger.info("%s initialized", __class__.__name__) + + def example_callback(self, **kwargs): + self.logger.info('Example callback: %s', kwargs) diff --git a/tests/conf/apps/scheduler_test_app.py b/tests/conf/apps/scheduler_test_app.py index c51a2b0f2..604113f3e 100644 --- a/tests/conf/apps/scheduler_test_app.py +++ b/tests/conf/apps/scheduler_test_app.py @@ -1,61 +1,8 @@ -from enum import Enum - from appdaemon.adapi import ADAPI +from appdaemon.utils import format_timedelta -class SchedulerTestAppMode(str, Enum): - """Enum for different modes of the SchedulerTestApp.""" - - RUN_EVERY = "run_every" - RUN_IN = "run_in" - - -class SchedulerTestApp(ADAPI): - """ - A test app to verify scheduler functionality. - - Configuration Args: - mode (str, optional): The mode of operation. Defaults to 'run_every'. - register_delay (float, optional): Delay before setup in seconds. Defaults to 0.5. - - RUN_EVERY: - interval (int): Interval in seconds for run_every. Required. - msg (str): Message to pass to callback. Required. - start (str, optional): Start time description. Defaults to "now". - """ - def initialize(self): - self.set_log_level("DEBUG") - self.log("SchedulerTestApp initialized") - self.set_namespace("test") - self.run_in(self.setup_callback, delay=self.register_delay) - - def setup_callback(self, **kwargs) -> None: - self.log(f"Running in {self.mode} mode") - match self.mode: - case SchedulerTestAppMode.RUN_EVERY: - match self.args: - case {"interval": interval, "msg": str(msg)}: - start = self.args.get("start", "now") - self.run_every(self.run_every_callback, start=start, interval=interval, msg=msg) - return - case SchedulerTestAppMode.RUN_IN: - pass - raise ValueError(f"Invalid arguments for {self.mode}") - - @property - def mode(self) -> SchedulerTestAppMode: - return SchedulerTestAppMode(self.args.get("mode", SchedulerTestAppMode.RUN_EVERY)) - - @property - def register_delay(self) -> float: - return self.args.get("register_delay", 0.5) - - def run_every_callback(self, **kwargs) -> None: - """Callback function for run_every.""" - self.log(f"Run every callback executed with kwargs: {kwargs}") - - -class TestSchedulerRunIn(ADAPI): +class RunInTestApp(ADAPI): """A test app to verify run_in functionality.""" def initialize(self): @@ -86,12 +33,44 @@ def run_in_callback(self, **kwargs) -> None: self.logger.info("Run in callback executed with kwargs: %s", kwargs) +class RunEveryTestApp(ADAPI): + """ + A test app to verify scheduler functionality. + + Configuration Args: + start (str, optional): Start time description. Defaults to "now". + interval (int): Interval in seconds for run_every. Required. + msg (str): Message to pass to callback. Required. + """ + def initialize(self): + self.set_log_level("DEBUG") + self.logger.info("%s initialized",self.__class__.__name__) + self.set_namespace("test") + + match self.args: + case {"interval": interval, "msg": str(msg)}: + self.logger.info("Registering callbacks every %s", format_timedelta(interval)) + self.run_every( + self.run_every_callback, + interval=interval, + msg=msg, + start=self.args.get("start", "now"), + pin=self.args.get('cb_pin_app'), + pin_thread=self.args.get('cb_pin_thread') + ) + return + raise ValueError("Invalid arguments for run_every") + + def run_every_callback(self, **kwargs) -> None: + """Callback function for run_every.""" + self.logger.info("Run every callback executed with kwargs: %s", kwargs) + + class TestSchedulerRunDaily(ADAPI): """A test app to verify run_daily functionality.""" def initialize(self): self.set_log_level("DEBUG") - self.set_namespace("test") self.run_daily(self.scheduled_callback, self.timing) self.logger.info(f"{self.__class__.__name__} initialized") @@ -102,4 +81,5 @@ def timing(self) -> str: def scheduled_callback(self, **kwargs) -> None: """Callback function for run_daily.""" + self.logger.info("Executed scheduled callback") self.logger.info("Run daily callback executed with kwargs: %s", kwargs) diff --git a/tests/conf/apps/state_test_app.py b/tests/conf/apps/state_test_app.py index 020627b23..a2ef1f998 100644 --- a/tests/conf/apps/state_test_app.py +++ b/tests/conf/apps/state_test_app.py @@ -1,40 +1,32 @@ import asyncio -from enum import Enum, auto from typing import Any from appdaemon.adapi import ADAPI +TEST_ENTITY = "test.some_entity" -class StateTestAppMode(str, Enum): - """Enum for different modes of the StateTestApp.""" - - def _generate_next_value_(name, start, count, last_values): - return name.upper() - - BASIC = auto() - LISTEN_KWARGS = auto() - NEW_STATE_FILTER_POSITIVE = auto() - NEW_STATE_FILTER_NEGATIVE = auto() - ATTRIBUTES = auto() - NEW_ATTRIBUTE_FILTER_POSITIVE = auto() - NEW_ATTRIBUTE_FILTER_NEGATIVE = auto() +class StateTestApp(ADAPI): + """A simple AppDaemon app to test state management. -TEST_ENTITY = "test.some_entity" + App Args: + listen_kwargs: Keyword arguments for the state listener (e.g., filters) + state_kwargs: Keyword arguments for setting the state (e.g., new state value) + delay: Delay before changing the state (default: 0.1 seconds) + """ -class StateTestApp(ADAPI): - """A simple AppDaemon app to test state management.""" + execute_event: asyncio.Event def initialize(self): self.set_log_level("DEBUG") - self.log("Hello from AppDaemon") + self.set_namespace("test", persist=False) self.execute_event = asyncio.Event() - self.set_namespace("test") self.add_entity(TEST_ENTITY, state="initialized") self.listen_state(self.state_callback, TEST_ENTITY, **self.listen_kwargs) if "state_kwargs" in self.args: self.run_in(self.test_change_state, delay=self.delay, **self.state_kwargs) + self.logger.info("%s initialized", self.__class__.__name__) @property def delay(self) -> float: @@ -49,11 +41,14 @@ def state_kwargs(self) -> dict[str, Any]: return self.args.get("state_kwargs", {}) def test_change_state(self, **kwargs): + self.logger.info("Changing state of %s with kwargs: %s", TEST_ENTITY, kwargs) + full_state = self.get_state(TEST_ENTITY, attribute="all") + self.logger.info("Current state of %s: %s", TEST_ENTITY, full_state) self.set_state(TEST_ENTITY, **kwargs) def state_callback(self, entity: str, attribute: str, old: Any, new: Any, **kwargs: Any) -> None: assert isinstance(entity, str), "Entity should be a string" - assert isinstance(attribute, str), "Attribute should be a string" + assert isinstance(attribute, str) or attribute is None, "Attribute should be a string or None" self.log(f" {entity}.{attribute} ".center(40, "-")) self.log(f"{entity}.{attribute} changed from {old} to {new} with kwargs: {kwargs}") @@ -62,14 +57,14 @@ def state_callback(self, entity: str, attribute: str, old: Any, new: Any, **kwar assert isinstance(new_state, dict), "State should be a dictionary" self.log(f"New state for {entity}: {new_state}") - self.log("State callback executed successfully") + self.logger.info("State callback executed successfully") self.execute_event.set() class TestImmediate(ADAPI): def initialize(self): self.set_log_level("DEBUG") - self.set_namespace("test") + self.set_namespace("test", persist=False) self.execute_event = asyncio.Event() self.set_state(TEST_ENTITY, state="on") self.listen_state(self.state_callback, TEST_ENTITY, immediate=True, new="on") diff --git a/tests/conftest.py b/tests/conftest.py index 65a1d4c8b..02d3e6670 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,21 +1,53 @@ import asyncio import logging -from collections.abc import AsyncGenerator, Callable, Generator +from collections.abc import AsyncGenerator, Callable, Iterable from contextlib import AbstractAsyncContextManager, asynccontextmanager -from datetime import datetime +from pathlib import Path +from typing import Any, Protocol import pytest import pytest_asyncio from appdaemon import AppDaemon from appdaemon.dependency_manager import DependencyManager from appdaemon.logging import Logging -from appdaemon.models.config.app import AppConfig +from appdaemon.models.config.app import AllAppConfig, AppConfig from appdaemon.models.config.appdaemon import AppDaemonConfig from appdaemon.utils import format_timedelta, recursive_get_files logger = logging.getLogger("AppDaemon._test") +@pytest_asyncio.fixture(scope="session", loop_scope="session") +async def running_loop(): + return asyncio.get_running_loop() + + +@pytest.fixture(scope="function") +def ad_cfg() -> AppDaemonConfig: + logger.debug("Creating AppDaemonConfig object") + return AppDaemonConfig.model_validate( + dict( + latitude=40.7128, + longitude=-74.0060, + elevation=0, + time_zone="America/New_York", + config_file="tests/conf/appdaemon.yaml", + ext=".yaml", + ) + ) + + +@pytest.fixture(scope="session") +def logging_obj() -> Logging: + logger.debug("Creating Logging object") + return Logging( + { + "main_log": {"format": "{asctime} {levelname} {appname}: {message}"}, + "diag_log": {"level": "WARNING", "filename": "tests/diag.log"}, + } + ) + + @pytest_asyncio.fixture(scope="function") async def ad_obj(running_loop: asyncio.BaseEventLoop, ad_cfg: AppDaemonConfig, logging_obj: Logging) -> AsyncGenerator[AppDaemon]: ad = AppDaemon( @@ -71,84 +103,8 @@ async def ad(ad_obj: AppDaemon, running_loop: asyncio.BaseEventLoop) -> AsyncGen app_config.disable = True -@pytest.fixture(scope="function") -def ad_cfg() -> AppDaemonConfig: - logger.debug("Creating AppDaemonConfig object") - return AppDaemonConfig.model_validate( - dict( - latitude=40.7128, - longitude=-74.0060, - elevation=0, - time_zone="America/New_York", - config_file="tests/conf/appdaemon.yaml", - write_toml=False, - ext=".yaml", - filters=[], - starttime=None, - endtime=None, - timewarp=1.0, - max_clock_skew=1, - # loglevel="INFO", - module_debug={ - "_app_management": "DEBUG", - "_state": "DEBUG", - # "_events": "DEBUG", - # "_scheduler": "DEBUG", - "_utility": "DEBUG", - }, - # namespaces={"test_namespace": {"writeback": "hybrid", "persist": False}}, - ) - ) - - -@pytest_asyncio.fixture(scope="module") -async def ad_obj_fast(logging_obj: Logging, running_loop, ad_cfg: AppDaemonConfig): - logger = logging.getLogger("AppDaemon._test") - logger.info(f"Passed loop: {hex(id(running_loop))}") - - ad_cfg.timewarp = 2000 - ad_cfg.starttime = ad_cfg.time_zone.localize(datetime(2025, 6, 25, 0, 0, 0)) - - ad = AppDaemon( - logging=logging_obj, - loop=running_loop, - ad_config_model=ad_cfg, - ) - - for cfg in ad.logging.config.values(): - logger = logging.getLogger(cfg["name"]) - logger.propagate = True - logger.setLevel("DEBUG") - - # ad.start() - yield ad - # raise_signal(Signals.SIGTERM) - # ad.stop() - pass - - -@pytest.fixture(scope="session") -def event_loop() -> Generator[asyncio.AbstractEventLoop]: - """Create a single event loop for the session.""" - loop = asyncio.new_event_loop() - yield loop - loop.close() - - -@pytest.fixture(scope="session") -def logging_obj() -> Logging: - logger.debug("Creating Logging object") - return Logging( - { - "main_log": {"format": "{asctime} {levelname} {appname}: {message}"}, - "diag_log": {"level": "WARNING", "filename": "tests/diag.log"}, - } - ) - - AsyncTempTest = Callable[..., AbstractAsyncContextManager[tuple[AppDaemon, pytest.LogCaptureFixture]]] - @pytest_asyncio.fixture(scope="function") async def run_app_for_time(ad: AppDaemon, caplog: pytest.LogCaptureFixture) -> AsyncTempTest: @asynccontextmanager @@ -164,6 +120,84 @@ async def _run(app_name: str, run_time: float | None = None, **kwargs): return _run -@pytest_asyncio.fixture(scope="session", loop_scope="session") -async def running_loop(): - return asyncio.get_running_loop() +class ConfiguredAppDaemonFunc(Protocol): + def __call__( + self, + app_cfgs: dict[str, dict[str, Any]] | None = None, + extra_ad_cfg: dict[str, Any] | None = None, + loggers: Iterable[str] | None = None, + ) -> AbstractAsyncContextManager[tuple[AppDaemon, pytest.LogCaptureFixture]]: ... + +@pytest_asyncio.fixture(scope="function") +async def configured_appdaemon( + running_loop: asyncio.BaseEventLoop, + ad_cfg: AppDaemonConfig, + logging_obj: Logging, + caplog: pytest.LogCaptureFixture, +) -> ConfiguredAppDaemonFunc: + """Fixture factory for creating AppDaemon instances with custom configuration. + + Returns a callable that accepts additional AppDaemon config parameters and returns + an async context manager yielding a configured, started AppDaemon instance. + """ + @asynccontextmanager + async def _run( + app_cfgs: dict[str, dict[str, Any]] | None = None, + extra_ad_cfg: dict[str, Any] | None = None, + loggers: Iterable[str] | None = None, + ) -> AsyncGenerator[tuple[AppDaemon, pytest.LogCaptureFixture]]: + assert running_loop == asyncio.get_running_loop(), "The running loop should match the one passed in" + + # Merge kwargs into the base config + config_dict = config_dict = ad_cfg.model_dump(by_alias=True) + extra_ad_cfg = {} if extra_ad_cfg is None else extra_ad_cfg + config_dict.update(extra_ad_cfg) + custom_cfg = AppDaemonConfig.model_validate(config_dict) + + ad = AppDaemon( + logging=logging_obj, + loop=running_loop, + ad_config_model=custom_cfg, + ) + logger.info(f"Created AppDaemon object {hex(id(ad))} with custom config") + + # Enable propagation for all AppDaemon loggers so caplog can capture them + for cfg in ad.logging.config.values(): + match cfg: + case {"name": str(name)}: + logger_ = logging.getLogger(name) + logger_.propagate = True + + loggers = [] if loggers is None else loggers + for logger_name in loggers: + logger.info(f"Setting up logger AppDaemon.{logger_name} for testing") + logger_ = logging.getLogger(f"AppDaemon.{logger_name}") + logger_.propagate = True + logger_.setLevel("DEBUG") + + await ad.app_management._process_import_paths() + config_files = list(recursive_get_files(base=ad.app_dir, suffix={'.yaml', '.toml'})) + ad.app_management.dependency_manager = DependencyManager(python_files=list(), config_files=config_files) + + app_cfgs = app_cfgs if app_cfgs is not None else {} + app_cfgs = { + name: { + "config_path": Path.cwd(), + "name": name, + **cfg + } + for name, cfg in app_cfgs.items() + } + ad.app_management.dependency_manager.app_deps.app_config = AllAppConfig.model_validate(app_cfgs) + ad.app_management.dependency_manager.app_deps.refresh_dep_graph() + + try: + ad.start() + logger.info(f"AppDaemon[{hex(id(ad))}] started") + with caplog.at_level(logging.DEBUG, "AppDaemon"): + yield ad, caplog + finally: + logger.info(f"AppDaemon[{hex(id(ad))}] stopping") + await ad.stop() + + return _run diff --git a/tests/functional/test_event.py b/tests/functional/test_event.py index 5b401e107..a8257dc8f 100644 --- a/tests/functional/test_event.py +++ b/tests/functional/test_event.py @@ -5,7 +5,7 @@ import pytest from appdaemon.app_management import ManagedObject -from .utils import AsyncTempTest +from tests.conftest import ConfiguredAppDaemonFunc logger = logging.getLogger("AppDaemon._test") @@ -15,15 +15,15 @@ class TestEventCallback: """Class to group the various tests for event callbacks.""" - app_name: str = "test_event_app" + app_name: str = "event_test_app" @pytest.mark.asyncio(loop_scope="session") - async def test_event_callback(self, run_app_for_time: AsyncTempTest) -> None: + async def test_event_callback(self, configured_appdaemon: ConfiguredAppDaemonFunc) -> None: """Tests the event callback functionality and the passing of kwargs through events. Process: - Unique values are generated for the event and its kwargs - - The run_app_for_time context manager is used to run the test_event_app temporarily + - The configured_appdaemon context manager is used to run the event_test_app temporarily - Event test app listens for an event and fires the same event shortly after - Wait for :py:class:`~asyncio.Event` to be set by the callback in the app - Clear the :py:class:`~asyncio.Event` @@ -38,8 +38,16 @@ async def test_event_callback(self, run_app_for_time: AsyncTempTest) -> None: """ listen_id = str(uuid.uuid4()) fire_id = str(uuid.uuid4()) - app_args = {"listen_kwargs": {"test_kwarg": listen_id}, "fire_kwargs": {"test_fire_kwarg": fire_id}} - async with run_app_for_time(self.app_name, **app_args) as (ad, caplog): + app_cfgs = { + self.app_name: { + "module": "event_test_app", + "class": "TestEventCallback", + "listen_kwargs": {"test_kwarg": listen_id}, + "fire_kwargs": {"test_fire_kwarg": fire_id}, + } + } + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[self.app_name]) as (ad, caplog): + await ad.utility.app_update_event.wait() match ad.app_management.objects.get(self.app_name): case ManagedObject(object=app_obj): await asyncio.wait_for(app_obj.execute_event.wait(), timeout=0.5) @@ -69,7 +77,7 @@ async def test_event_callback(self, run_app_for_time: AsyncTempTest) -> None: @pytest.mark.parametrize("sign", [True, False]) @pytest.mark.asyncio(loop_scope="session") - async def test_event_callback_filtered(self, run_app_for_time: AsyncTempTest, sign: bool) -> None: + async def test_event_callback_filtered(self, configured_appdaemon: ConfiguredAppDaemonFunc, sign: bool) -> None: """Test the event callback filtering based on keyword arguments. If the event data has a key that matches one of the kwargs provided in the ``listen_event`` call, then the values @@ -80,7 +88,7 @@ async def test_event_callback_filtered(self, run_app_for_time: AsyncTempTest, si then the same value is used for listening to the event. Otherwise (negative case), a different, unique value is used to listen for the event, which will prevent the callback from executing. - The unique fire and listen values are passed to the app as args. - - The ``test_event_app`` app is run until a python :py:class:`~asyncio.Event` is set. + - The ``event_test_app`` app is run until a python :py:class:`~asyncio.Event` is set. - The :py:class:`~asyncio.Event` is created when the app initializes. - The app listens for the event and then fires it after a short delay, using the relevant kwargs for each. - If the callback is executed, :py:class:`~asyncio.Event` is set, and the unique values are printed in the logs. @@ -94,12 +102,17 @@ async def test_event_callback_filtered(self, run_app_for_time: AsyncTempTest, si fire_id = str(uuid.uuid4()) listen_id = fire_id if sign else str(uuid.uuid4()) test_kwarg_name = "test_kwarg" - app_args = { - "listen_kwargs": {test_kwarg_name: listen_id}, - "fire_kwargs": {test_kwarg_name: fire_id}, + app_cfgs = { + self.app_name: { + "module": "event_test_app", + "class": "TestEventCallback", + "listen_kwargs": {test_kwarg_name: listen_id}, + "fire_kwargs": {test_kwarg_name: fire_id}, + } } - async with run_app_for_time(self.app_name, **app_args) as (ad, caplog): + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[self.app_name]) as (ad, caplog): + await ad.utility.app_update_event.wait() match ad.app_management.objects.get(self.app_name): case ManagedObject(object=app_obj): wait_coro = asyncio.wait_for(app_obj.execute_event.wait(), timeout=0.5) @@ -117,7 +130,7 @@ async def test_event_callback_filtered(self, run_app_for_time: AsyncTempTest, si @pytest.mark.parametrize("sign", [True, False]) @pytest.mark.asyncio(loop_scope="session") - async def test_event_callback_namespace(self, run_app_for_time: AsyncTempTest, sign: bool) -> None: + async def test_event_callback_namespace(self, configured_appdaemon: ConfiguredAppDaemonFunc, sign: bool) -> None: """Test the event callback functionality with different namespaces. Event callbacks should only be fired for events in the correct namespace. @@ -133,17 +146,30 @@ async def test_event_callback_namespace(self, run_app_for_time: AsyncTempTest, s The listen and fire namespaces do not match, so the callback is not executed. """ namespace = "test" + base_app_cfg = { + "module": "event_test_app", + "class": "TestEventCallback", + } if sign: # The listen and fire namespaces have to match for the callback to work - app_args = { - "listen_kwargs": {"namespace": namespace}, - "fire_kwargs": {"namespace": namespace}, + app_cfgs = { + self.app_name: { + **base_app_cfg, + "listen_kwargs": {"namespace": namespace}, + "fire_kwargs": {"namespace": namespace}, + } } else: # If the event is listened in a different namespace, then it won't be triggered - app_args = {"listen_kwargs": {"namespace": namespace}} + app_cfgs = { + self.app_name: { + **base_app_cfg, + "listen_kwargs": {"namespace": namespace}, + } + } - async with run_app_for_time(self.app_name, **app_args) as (ad, caplog): + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[self.app_name]) as (ad, caplog): + await ad.utility.app_update_event.wait() match ad.app_management.objects.get(self.app_name): case ManagedObject(object=app_obj): wait_coro = asyncio.wait_for(app_obj.execute_event.wait(), timeout=0.5) @@ -160,7 +186,7 @@ async def test_event_callback_namespace(self, run_app_for_time: AsyncTempTest, s assert "Event callback executed" in caplog.text @pytest.mark.asyncio(loop_scope="session") - async def test_event_callback_oneshot(self, run_app_for_time: AsyncTempTest) -> None: + async def test_event_callback_oneshot(self, configured_appdaemon: ConfiguredAppDaemonFunc) -> None: """Test the oneshot functionality of the event callback. Event callbacks that are registered with the oneshot flag should only be fired once. @@ -172,8 +198,15 @@ async def test_event_callback_oneshot(self, run_app_for_time: AsyncTempTest) -> Coverage: - Event callbacks that are registered with the oneshot flag should only be fired once. """ - app_args = {"listen_kwargs": {"oneshot": True}} - async with run_app_for_time(self.app_name, **app_args) as (ad, caplog): + app_cfgs = { + self.app_name: { + "module": "event_test_app", + "class": "TestEventCallback", + "listen_kwargs": {"oneshot": True}, + } + } + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[self.app_name]) as (ad, caplog): + await ad.utility.app_update_event.wait() match ad.app_management.objects.get(self.app_name): case ManagedObject(object=app_obj): await asyncio.wait_for(app_obj.execute_event.wait(), timeout=0.5) diff --git a/tests/functional/test_namespaces.py b/tests/functional/test_namespaces.py index e98b9147e..52306ec38 100644 --- a/tests/functional/test_namespaces.py +++ b/tests/functional/test_namespaces.py @@ -1,11 +1,13 @@ +import asyncio import logging import uuid import pytest from appdaemon.utils import PersistentDict -from .utils import AsyncTempTest +from tests.conftest import ConfiguredAppDaemonFunc +from tests.utils import wait_for_event logger = logging.getLogger("AppDaemon._test") @@ -16,86 +18,133 @@ ] -def assert_current_namespace(caplog: pytest.LogCaptureFixture, expected_namespace: str) -> None: - """Assert that the expected namespace is in the current namespaces log.""" - for record in caplog.records: - match record: - case logging.LogRecord(levelno=logging.INFO, msg=str(msg), args=[list(namespaces), *_]): - if msg == 'Current namespaces: %s': - assert expected_namespace in namespaces, f'Expected {expected_namespace} in current namespaces' - return - assert False, f"Did not find log record for current namespaces including {expected_namespace}" - - -def assert_non_existence_warning(caplog: pytest.LogCaptureFixture, correct: int = 1) -> None: - """Assert that a warning about non-existence of an entity in a namespace was logged.""" - non_existence_warnings = [ - r for r in caplog.records - if r.levelno >= logging.WARNING and - r.msg == "Entity %s not found in namespace %s" - ] - assert len(non_existence_warnings) == correct, f"Expected {correct} warning(s) about non-existence should be logged" - - @pytest.mark.asyncio(loop_scope="session") -async def test_simple_namespaces(run_app_for_time: AsyncTempTest) -> None: +async def test_simple_namespaces(configured_appdaemon: ConfiguredAppDaemonFunc) -> None: """Test simple namespace functionality.""" test_val = str(uuid.uuid4()) test_ns = "test_namespace" - app_kwargs = { - "custom_namespace": test_ns, - 'start_delay': 0.1, - "test_val": test_val, + app_name = "basic_namespace_app" + app_cfgs = { + app_name: { + "module": "namespace_app", + "class": "BasicNamespaceTester", + 'start_delay': 0.1, + "custom_namespace": test_ns, + "test_val": test_val, + } } - async with run_app_for_time("basic_namespace_app", 0.5, **app_kwargs) as (ad, caplog): - assert "Persistent namespace 'test_namespace' initialized from MainThread" in caplog.text - - # In order for this to be in the log, the state change callback must have fired, which means that the entity - # was created in the correct namespace and the state change was detected. - assert test_val in caplog.text - # The current namespaces should include the custom test namespace - assert_current_namespace(caplog, test_ns) - - # There should only be one warning about non-existence of the entity in the custom namespace - assert_non_existence_warning(caplog) + async with configured_appdaemon(app_cfgs=app_cfgs) as (ad, caplog): + for p in ad.state.namespace_path.iterdir(): + p.unlink() + + await ad.utility.app_update_event.wait() + await wait_for_event(ad, app_name, "changed_event", timeout=1.0) + + init_ts = None + changed_delta = None + non_existence_warning = False + for record in caplog.records: + match record: + case logging.LogRecord( + levelno=logging.INFO, + msg='Initial namespaces: %s', + args=[list() as init_namespaces, *_], + ): + assert test_ns in init_namespaces, f'Expected {test_ns} in initial namespaces' + case logging.LogRecord( + levelno=logging.INFO, + msg="Initialized %s", + created=float(init_ts), + args=[str(app_name_), *_], + ): + assert app_name_ == app_name, f"Expected app name to be {app_name}" + case logging.LogRecord(levelno=logging.WARNING, msg="Entity %s not found in namespace %s"): + non_existence_warning = True + case logging.LogRecord( + levelno=logging.INFO, + msg="Change called from thread %s", + created=float(changed_ts) + ): + assert init_ts is not None, "Initialization timestamp should be set before change timestamp" + changed_delta = changed_ts - init_ts + + assert non_existence_warning, "Expected non-existence warning was not logged" + assert changed_delta is not None, "Changed delta should have been calculated" + logger.info("Changed delta: %s", changed_delta) @pytest.mark.asyncio(loop_scope="session") -async def test_hybrid_writeback(run_app_for_time: AsyncTempTest) -> None: +async def test_hybrid_writeback(configured_appdaemon: ConfiguredAppDaemonFunc) -> None: """Test hybrid namespace functionality. The general idea is to create a namespace with hybrid writeback and ensure that it saves correctly. """ test_val = str(uuid.uuid4()) test_ns = "hybrid_test_ns" - app_kwargs = { - "custom_namespace": test_ns, - "test_val": test_val, - "start_delay": 0.5, - "test_n": 10**3, + app_name = "hybrid_namespace_app" + app_cfgs = { + app_name: { + "module": "namespace_app", + "class": "HybridWritebackTester", + "start_delay": 0.1, + "custom_namespace": test_ns, + "test_val": test_val, + "test_n": 10, + } } - async with run_app_for_time("hybrid_namespace_app", 2.2, **app_kwargs) as (ad, caplog): + + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=["_state"]) as (ad, caplog): + for p in ad.state.namespace_path.iterdir(): + p.unlink() + + await ad.utility.app_update_event.wait() + await asyncio.sleep(2.5) + + state = None + app_initialized = False + namespace_initialized = False + save_count = 0 + dbm_error = False + + for record in caplog.records: + match record: + case logging.LogRecord( + levelno=logging.INFO, + msg="Initialized %s", + args=[str(app_name_), *_], + ): + assert app_name_ == app_name, f"Expected app name to be {app_name}" + app_initialized = True + case logging.LogRecord( + levelno=logging.INFO, + msg="Persistent namespace '%s' initialized from %s", + args=[str(ns), str(thread)], + ): + assert ns == test_ns, f"Expected namespace to be {test_ns}, got {ns}" + assert thread == "MainThread", f"Expected namespace to be initialized from MainThread, got {thread}" + namespace_initialized = True + case logging.LogRecord( + levelno=logging.DEBUG, + msg="Saving hybrid persistent namespace: %s", + args=[str(ns), *_], + ): + if ns == test_ns: + save_count += 1 + case logging.LogRecord(msg=str(msg)) if "dbm.sqlite3.error" in msg: + dbm_error = True + + assert app_initialized, f"App {app_name} should have been initialized" + assert namespace_initialized, f"Persistent namespace '{test_ns}' should have been initialized" + assert save_count >= 2, f"Expected exactly two saves of hybrid persistent namespace, got {save_count}" + match ad.state.state.get(test_ns): case PersistentDict() as state: - def get_files(): - return list(state.filepath.parent.glob(f"{test_ns}*")) - files = get_files() - assert len(files) > 0, f'Namespace files for {test_ns} should exist, but it does not.' + files = list(state.filepath.parent.glob(f"{test_ns}*")) + assert len(files) > 0, f'Namespace files for {test_ns} should exist, but they do not.' case _: assert False, f"Expected a PersistentDict for namespace '{test_ns}'" - assert f"Persistent namespace '{test_ns}' initialized from MainThread" in caplog.text - saves = [ - record - for record in caplog.records - if record.levelno == logging.DEBUG and - record.msg == "Saving hybrid persistent namespace: %s" - ] - assert len(saves) == 2, "Expected exactly two saves of hybrid persistent namespace" - - files = get_files() namespace_files = [f.name for f in state.filepath.parent.iterdir() if f.is_file()] assert not namespace_files, f"Namespace files for {test_ns} should not exist after test, but they do: {namespace_files}" - - assert "dbm.sqlite3.error" not in caplog.text + assert not dbm_error, "dbm.sqlite3.error should not appear in logs" diff --git a/tests/functional/test_pinning.py b/tests/functional/test_pinning.py new file mode 100644 index 000000000..277ab4fc2 --- /dev/null +++ b/tests/functional/test_pinning.py @@ -0,0 +1,173 @@ +import asyncio +import logging +import logging.config +from logging import LogRecord +from random import randint + +import pytest +from appdaemon.parse import parse_timedelta + +from tests.conftest import ConfiguredAppDaemonFunc + +logger = logging.getLogger("AppDaemon._test") + + +def find_app_line(caplog: pytest.LogCaptureFixture, app_name: str, msg: str): + for record in caplog.records: + match record: + case LogRecord( + appname=str(app_name_), + msg=str(msg_), + ) if app_name_ == app_name and msg_ == msg: + return record + return False + + +@pytest.mark.asyncio(loop_scope="session") +async def test_config_pin_thread(configured_appdaemon: ConfiguredAppDaemonFunc) -> None: + run_time = 0.5 + + extra_ad_cfg = { + "total_threads": 7, + "pin_threads": 5 + } + + app_cfgs = { + f'test-app-{n}': { + 'module': 'pin_thread_app', + 'class': 'PinThreadTester', + 'pin_thread': randint(0, extra_ad_cfg['pin_threads']) + } + for n in range(1, 10) + } + + thread_map = { + app_name: f'thread-{cfg["pin_thread"]}' + for app_name, cfg in app_cfgs.items() + } + + async with configured_appdaemon(app_cfgs=app_cfgs, extra_ad_cfg=extra_ad_cfg) as (ad, caplog): + await asyncio.sleep(run_time) + for app_name in app_cfgs: + assert find_app_line(caplog, app_name, "%s initialized") is not False, ( + "Didn't match the app initialization" + ) + + match find_app_line(caplog, app_name, "Example callback: %s"): + case LogRecord(args={"__thread_id": str(thread_name)}): + assert thread_map[app_name] == thread_name + + +@pytest.mark.asyncio(loop_scope="session") +async def test_callback_pin_thread(configured_appdaemon: ConfiguredAppDaemonFunc) -> None: + run_time = 1.0 + n_apps = 10 + + app_cfgs = { + f'test-pin-app-{n}': { + 'module': 'pin_thread_app', + 'class': 'PinThreadTester', + "register_delay": parse_timedelta(0.1), + "cb_pin_thread": randint(0, n_apps - 1), + } + for n in range(n_apps) + } + + thread_map = { + app_name: f'thread-{app_cfg["cb_pin_thread"]}' + for app_name, app_cfg in app_cfgs.items() + } + + extra_ad_cfg = { + 'total_threads': None, + 'pin_threads': None, + 'pin_apps': True + } + + async with configured_appdaemon(app_cfgs=app_cfgs, extra_ad_cfg=extra_ad_cfg) as (ad, caplog): + await asyncio.sleep(run_time) + + assert ad.threading.thread_count == n_apps, "Thread count doesn't match app count" + for app_name in app_cfgs: + assert find_app_line(caplog, app_name, "%s initialized") is not False, ( + "Didn't match the app initialization" + ) + match find_app_line(caplog, app_name, 'Example callback: %s'): + case LogRecord(args={"__thread_id": str(thread_name)}): + assert thread_map[app_name] == thread_name + case _: + assert False + + +@pytest.mark.asyncio(loop_scope="session") +@pytest.mark.parametrize("pin_app", [None, True, False]) +async def test_pin_thread_raises_exception(pin_app: bool | None, configured_appdaemon: ConfiguredAppDaemonFunc) -> None: + pin_thread = -1 + run_time = 1.0 + + app_cfgs = { + 'test-pin-app': { + 'module': 'pin_thread_app', + 'class': 'PinThreadTester', + "register_delay": parse_timedelta(0.1), + "pin_thread": pin_thread, + "pin_app": pin_app + } + } + + extra_ad_cfg = {} + + async with configured_appdaemon(app_cfgs=app_cfgs, extra_ad_cfg=extra_ad_cfg) as (ad, caplog): + await asyncio.sleep(run_time) + if pin_app is not False: + err = [r for r in caplog.records if r.levelname == "ERROR"] + assert err[2].msg.strip() == f"NegativePinThread: Pin threads can't be negative: {pin_thread}" + + +@pytest.mark.asyncio(loop_scope="session") +async def test_new_app_pins(configured_appdaemon: ConfiguredAppDaemonFunc): + n_apps = 5 + + app_cfgs = { + f'test-pin-app-{n}': { + 'module': 'pin_thread_app', + 'class': 'PinThreadTester', + "write_app_file": False, + "register_delay": 0.1, + } + for n in range(n_apps) + } + + async with configured_appdaemon( + loggers=["_threading"], + extra_ad_cfg={ + "total_threads": None, + "pin_threads": None, + "pin_apps": True, + }, + ) as (ad, caplog): + logger.info('=' * 150) + for app_name, app_cfg in app_cfgs.items(): + await asyncio.sleep(0.25) + await ad.services.call_service( + namespace="admin", + domain="app", + service="create", + data={"app": app_name, **app_cfg}, + ) + assert find_app_line(caplog, app_name, "%s initialized") is not False, ( + "Didn't match the app initialization" + ) + await asyncio.sleep(0.5) + assert ad.threading.thread_count == n_apps, "Thread count doesn't match app count" + + logger.info('=' * 150) + + for n, app_name in enumerate(app_cfgs): + match find_app_line(caplog, app_name, 'Example callback: %s'): + case LogRecord(args={"__thread_id": str(thread_name)}): + assert thread_name == f'thread-{n}', ( + f"Called from the wrong thread: {thread_name}" + ) + case _: + assert False diff --git a/tests/functional/test_production_mode.py b/tests/functional/test_production_mode.py index f9483b488..ab5d27499 100644 --- a/tests/functional/test_production_mode.py +++ b/tests/functional/test_production_mode.py @@ -1,9 +1,13 @@ import os +from logging import LogRecord from unittest.mock import AsyncMock import pytest import pytest_asyncio from appdaemon.appdaemon import AppDaemon +from appdaemon.models.internal.app_management import ManagedObject + +from tests.conftest import ConfiguredAppDaemonFunc @pytest_asyncio.fixture(scope="function") @@ -20,29 +24,62 @@ async def ad_production(ad_obj: AppDaemon): @pytest.mark.ci @pytest.mark.functional @pytest.mark.asyncio(loop_scope="session") -async def test_production_mode_loads_apps(ad_production: AppDaemon) -> None: +async def test_production_mode_loads_apps(configured_appdaemon: ConfiguredAppDaemonFunc) -> None: """Test that apps load correctly when production_mode is enabled.""" - # Wait for initialization to complete - await ad_production.utility.app_update_event.wait() - # Check that the app loaded - assert "hello_world" in ad_production.app_management.objects + app_name = "hello_world" + async with configured_appdaemon( + app_cfgs={ + app_name: { + "module": "hello", + "class": "HelloWorld", + } + }, + extra_ad_cfg={"production_mode": True}, + loggers=[app_name], + ) as (ad, caplog): + await ad.utility.app_update_event.wait() + match ad.app_management.objects.get(app_name): + case ManagedObject(type="app", running=True): + return + case _: + pytest.fail("HelloWorld app not found in the app management objects") + + initialized = False + for record in caplog.records: + match record: + case LogRecord(appname=str(app_name), msg=str(msg)) if "initialized" in msg: + initialized = True + break + if not initialized: + pytest.fail("HelloWorld app did not log initialization message") + @pytest.mark.ci @pytest.mark.functional @pytest.mark.asyncio(loop_scope="session") -async def test_production_mode_no_reloading(ad_production: AppDaemon) -> None: +async def test_production_mode_no_reloading(configured_appdaemon: ConfiguredAppDaemonFunc) -> None: """Test that production mode doesn't reload apps when files change.""" - # Wait for initialization to complete - await ad_production.utility.app_update_event.wait() + app_name = "hello_world" + async with configured_appdaemon( + app_cfgs={ + app_name: { + "module": "hello", + "class": "HelloWorld", + } + }, + extra_ad_cfg={"production_mode": True}, + loggers=[app_name], + ) as (ad, caplog): + await ad.utility.app_update_event.wait() - # Mock check_app_updates to track calls from now on - mock = AsyncMock(wraps=ad_production.app_management.check_app_updates) - ad_production.app_management.check_app_updates = mock + # Mock check_app_updates to track calls from now on + mock = AsyncMock(wraps=ad.app_management.check_app_updates) + ad.app_management.check_app_updates = mock - # Touch file and wait for utility loop - ad_production.utility.app_update_event.clear() - os.utime(ad_production.app_dir / "hello.py", None) - await ad_production.utility.app_update_event.wait() + # Touch file and wait for utility loop + ad.utility.app_update_event.clear() + os.utime(ad.app_dir / "hello.py", None) + await ad.utility.app_update_event.wait() - assert not mock.called, "Should not reload in production mode" + assert not mock.called, "Should not reload in production mode" diff --git a/tests/functional/test_run_daily.py b/tests/functional/test_run_daily.py index 925e014e1..49892ca42 100644 --- a/tests/functional/test_run_daily.py +++ b/tests/functional/test_run_daily.py @@ -1,37 +1,54 @@ - import logging from datetime import datetime, time, timedelta import pytest -from tests.conftest import AsyncTempTest +from tests.conftest import ConfiguredAppDaemonFunc logger = logging.getLogger("AppDaemon._test") +@pytest.mark.ci +@pytest.mark.functional class TestRunDaily: app_name: str = "test_run_daily" + register_delay: float = 0.1 - @pytest.mark.asyncio - async def test_run_daily(self, run_app_for_time: AsyncTempTest): + @pytest.mark.asyncio(loop_scope="session") + @pytest.mark.parametrize("time_input", ["12:34:56.789", time(12, 34, 56, 789000)]) + async def test_run_daily(self, time_input: str | time, configured_appdaemon: ConfiguredAppDaemonFunc): """Test run_daily scheduling.""" - async with run_app_for_time(self.app_name, run_time=0.5, time="12:34:56.789") as (ad, caplog): + app_cfgs = { + self.app_name: { + "module": "scheduler_test_app", + "class": "TestSchedulerRunDaily", + "time": time_input, + } + } + async with configured_appdaemon(app_cfgs=app_cfgs) as (ad, caplog): + await ad.utility.app_update_event.wait() match ad.sched.schedule.get(self.app_name): case None: pytest.fail("No schedule found for the app") case dict(entries): + # Don't really care about the keys (callback handles) here for entry in entries.values(): match entry: - case {"interval": interval, "repeat": True, "timestamp": timestamp}: - assert interval == timedelta(days=1).total_seconds() + case {"timestamp": timestamp, "repeat": True, "interval": interval}: + assert interval == timedelta(days=1) assert timestamp.astimezone(ad.tz).time() == time(12, 34, 56, 789000) - break - else: - assert False, "No matching entry found" - @pytest.mark.asyncio - async def test_run_sunrise_offset(self, run_app_for_time: AsyncTempTest): + @pytest.mark.asyncio(loop_scope="session") + async def test_run_sunrise_offset(self, configured_appdaemon: ConfiguredAppDaemonFunc): """Test run_daily scheduling.""" - async with run_app_for_time(self.app_name, run_time=0.5, time="sunrise - 1hr") as (ad, caplog): + app_cfgs = { + self.app_name: { + "module": "scheduler_test_app", + "class": "TestSchedulerRunDaily", + "time": "sunrise - 1 hour", + } + } + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[self.app_name]) as (ad, caplog): + await ad.utility.app_update_event.wait() match ad.sched.schedule.get(self.app_name): case None: pytest.fail("No schedule found for the app") @@ -39,7 +56,7 @@ async def test_run_sunrise_offset(self, run_app_for_time: AsyncTempTest): for entry in entries.values(): match entry: case {"type": "next_rising", "repeat": True, "timestamp": timestamp, "offset": offset}: - assert offset == timedelta(hours=-1).total_seconds() + assert offset == timedelta(hours=-1) assert timestamp.astimezone(ad.tz).date() == (datetime.now(ad.tz) + timedelta(days=1)).date() break else: diff --git a/tests/functional/test_run_every.py b/tests/functional/test_run_every.py index 056194efa..a3f7610df 100644 --- a/tests/functional/test_run_every.py +++ b/tests/functional/test_run_every.py @@ -1,39 +1,99 @@ +import asyncio +import itertools import logging import re import uuid -from datetime import datetime, timedelta -from functools import partial +from collections.abc import Generator +from dataclasses import dataclass, field +from datetime import datetime, time, timedelta from itertools import product -from typing import cast import pytest -import pytz from appdaemon.types import TimeDeltaLike from appdaemon.utils import parse_timedelta -from tests.conftest import AsyncTempTest - -from .utils import check_interval +from tests.conftest import ConfiguredAppDaemonFunc logger = logging.getLogger("AppDaemon._test") -INTERVALS = ["00:0.35", 1, timedelta(seconds=0.87)] +@dataclass +class RunEveryTestResults: + """Container class for the test results. + + This provides some convenience methods for making different calculations. + """ + app_init: datetime | None = None + callback_start: datetime | None = None + call_times: list[datetime] = field(default_factory=list) + + @classmethod + def from_caplog(cls, caplog: pytest.LogCaptureFixture, app_name: str, test_id: str): + results = cls() + for record in caplog.records: + match record: + case logging.LogRecord( + appname=str(app_name), + msg=str(msg), + created=float(created), + ) if "initialized" in msg: + results.app_init = datetime.fromtimestamp(created) + case logging.LogRecord(msg="Registering callbacks every %s", created=float(created)): + results.callback_start = datetime.fromtimestamp(created) + case logging.LogRecord( + appname=str(_app_name), + msg="Run every callback executed with kwargs: %s", + args={"msg": str(msg_id)}, + created=float(created) + ) if _app_name == app_name and msg_id == test_id and results.callback_start is not None: + results.call_times.append(datetime.fromtimestamp(created)) + return results + + @property + def num_calls(self) -> int: + return len(self.call_times) + + @property + def register_delay(self) -> float: + assert self.app_init is not None + assert self.callback_start is not None + return (self.callback_start - self.app_init).total_seconds() + + @property + def start_delay(self) -> timedelta: + assert self.callback_start is not None + assert len(self.call_times) > 0 + return (self.call_times[0] - self.callback_start) + + def rel_times(self) -> Generator[timedelta]: + assert self.callback_start is not None, "Callbacks not started yet" + for ct in self.call_times: + yield ct - self.callback_start + + def diffs(self) -> Generator[timedelta]: + for t1, t2 in itertools.pairwise(self.rel_times()): + yield t2 - t1 + + def errors(self, interval: timedelta) -> Generator[timedelta]: + for diff in self.diffs(): + yield abs(interval - diff) + + +INTERVALS = ["00:0.35", 1, 0.87] STARTS = ["now - 00:00.5", "now", "now + 00:0.5"] - @pytest.mark.asyncio(loop_scope="session") @pytest.mark.parametrize(("start", "interval"), product(STARTS, INTERVALS)) async def test_run_every( - run_app_for_time: AsyncTempTest, + configured_appdaemon: ConfiguredAppDaemonFunc, interval: TimeDeltaLike, start: str, - n: int = 2, + n: int = 3, ) -> None: interval = parse_timedelta(interval) # Calculate base runtime for 'n' occurrences plus a small buffer to account for the delay in registering the callback - register_delay = timedelta(seconds=0.2) + register_delay = timedelta(seconds=0.5) run_time = (interval * (n + 1)) + register_delay # If start time is future "now + offset", add offset to ensure coverage @@ -43,65 +103,101 @@ async def test_run_every( app_name = "scheduler_test_app" test_id = str(uuid.uuid4()) - app_args = dict(start=start, interval=interval, msg=test_id, register_delay=register_delay) - async with run_app_for_time(app_name, run_time=run_time.total_seconds(), **app_args) as (ad, caplog): - check_interval_partial = partial(check_interval, caplog, f"kwargs: {{'msg': '{test_id}',") - check_interval_partial(n, interval) - - cb_count = await ad.state.get_state('test', 'admin', f'app.{app_name}', 'instancecallbacks') - assert cast(int, cb_count) >= n, "Callback didn't get called enough times." - - # diffs = utils.time_diffs(utils.filter_caplog(caplog, test_id)) - # logger.debug(diffs) + app_cfgs = { + app_name: { + "module": "scheduler_test_app", + "class": "RunEveryTestApp", + "start": start, + "interval": interval, + "msg": test_id, + "register_delay": register_delay, + } + } + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[app_name, "_scheduler"]) as (ad, caplog): + await asyncio.sleep(run_time.total_seconds()) + + results = RunEveryTestResults.from_caplog(caplog, app_name, test_id) + assert results.app_init, "App never initialized" + assert results.num_calls >= n, "Callback wasn't executed enough" + for err in results.errors(interval): + assert err < timedelta(seconds=0.01), "Buffer exceeded" @pytest.mark.asyncio(loop_scope="session") @pytest.mark.parametrize("start", ["now", "immediate"]) -async def test_run_every_start_time( - run_app_for_time: AsyncTempTest, +async def test_now_immediate( + configured_appdaemon: ConfiguredAppDaemonFunc, start: str, ) -> None: interval = timedelta(seconds=0.5) run_time = timedelta(seconds=1) register_delay = timedelta(seconds=0.1) + app_name = "scheduler_test_app" + test_id = str(uuid.uuid4()) + app_cfgs = { + app_name: { + "module": "scheduler_test_app", + "class": "RunEveryTestApp", + "start": start, + "interval": interval, + "msg": test_id, + "register_delay": register_delay, + } + } + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[app_name]) as (ad, caplog): + await asyncio.sleep(run_time.total_seconds()) + results = RunEveryTestResults.from_caplog(caplog, app_name, test_id) + assert results.app_init, "App never initialized" + assert results.num_calls > 0 match start: case "now": - n = 1 + assert (results.start_delay - interval) <= timedelta(seconds=0.01) case "immediate": - n = 2 - - app_name = "scheduler_test_app" - test_id = str(uuid.uuid4()) - app_args = dict(start=start, interval=interval, msg=test_id, register_delay=register_delay) - async with run_app_for_time(app_name, run_time=run_time.total_seconds(), **app_args) as (ad, caplog): - check_interval( - caplog, - f"kwargs: {{'msg': '{test_id}',", - n=n, - interval=interval - ) - - cb_count = await ad.state.get_state('test', 'admin', f'app.{app_name}', 'instancecallbacks') - assert cast(int, cb_count) >= (n + 1), "Callback didn't get called enough times." - -now = datetime.now(pytz.utc) -START_TIMES = ["now", now, now.time(), now.isoformat()] + assert results.start_delay <= timedelta(seconds=0.01) + + +@pytest.fixture +def start_time(request): + """Fixture to generate start time values at test runtime, not collection time.""" + match request.param: + case "string-now": + return "now" + case "datetime-object": + return datetime.now() + case "time-object": + return datetime.now().time() + case "isoformat-string": + return datetime.now().isoformat() + case _: + return request.param + +START_TIME_IDS = ["string-now", "datetime-object", "time-object", "isoformat-string"] @pytest.mark.asyncio(loop_scope="session") -@pytest.mark.parametrize("start", START_TIMES) -async def test_run_every_start_time_types( - run_app_for_time: AsyncTempTest, - start: str, +@pytest.mark.parametrize("start_time", START_TIME_IDS, indirect=True, ids=START_TIME_IDS) +async def test_start_time_types( + configured_appdaemon: ConfiguredAppDaemonFunc, + start_time: str | datetime | time, ) -> None: interval = timedelta(seconds=0.25) run_time = timedelta(seconds=1) register_delay = timedelta(seconds=0.1) - n = 3 app_name = "scheduler_test_app" test_id = str(uuid.uuid4()) - app_args = dict(start=start, interval=interval, msg=test_id, register_delay=register_delay) - async with run_app_for_time(app_name, run_time=run_time.total_seconds(), **app_args) as (ad, caplog): - cb_count = await ad.state.get_state('test', 'admin', f'app.{app_name}', 'instancecallbacks') - assert cast(int, cb_count) >= (n + 1), "Callback didn't get called enough times." + app_cfgs = { + app_name: { + "module": "scheduler_test_app", + "class": "RunEveryTestApp", + "start": start_time, + "interval": interval, + "msg": test_id, + "register_delay": register_delay, + } + } + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=[app_name]) as (ad, caplog): + await asyncio.sleep(run_time.total_seconds()) + results = RunEveryTestResults.from_caplog(caplog, app_name, test_id) + assert results.app_init, "App never initialized" + assert results.num_calls > 0 diff --git a/tests/functional/test_run_in.py b/tests/functional/test_run_in.py index 5aee8dc36..19beb33d7 100644 --- a/tests/functional/test_run_in.py +++ b/tests/functional/test_run_in.py @@ -1,9 +1,12 @@ +import asyncio import logging import uuid import pytest from appdaemon.utils import format_timedelta, parse_timedelta +from tests.conftest import ConfiguredAppDaemonFunc + logger = logging.getLogger("AppDaemon._test") ERROR_MARGIN = 0.005 # Allowable error margin for run_in timing @@ -11,32 +14,46 @@ @pytest.mark.parametrize("delay", [None, 0, 0.5, "0.52", "00:0.3", "00:00:0.25", 1]) @pytest.mark.asyncio(loop_scope="session") -async def test_run_in_delay(run_app_for_time, delay): +async def test_run_in_delay(configured_appdaemon: ConfiguredAppDaemonFunc, delay): run_time = parse_timedelta(delay).total_seconds() run_time += ERROR_MARGIN * 10 register_delay = 0.1 run_time += register_delay test_id = str(uuid.uuid4()) # Generate a unique test ID for each run + + app_cfgs = { + "test_run_in": { + "module": "scheduler_test_app", + "class": "RunInTestApp", + } + } + kwargs = dict(delay=delay, test_id=test_id, register_delay=register_delay) - async with run_app_for_time("test_run_in", run_time=run_time, **kwargs) as (ad, caplog): - assert "TestSchedulerRunIn initialized" in caplog.text - for record in caplog.records: - match record: - case logging.LogRecord(msg=msg, created=created_ts) if msg.startswith("Registering run_in"): - # Nothing really needs to happen here. The created_ts variable is set if the record is matched. - # It's already been asserted that this text is somewhere in the caplog text, so this is guaranteed to - # match one of the records. - continue - case logging.LogRecord( - msg="Run in callback executed with kwargs: %s", - args={"test_id": tid}, - created=callback_ts, - ) if tid == test_id: - elapsed_time = callback_ts - created_ts - error = elapsed_time - parse_timedelta(delay).total_seconds() - logger.info(f"Scheduler run_in succeeded with delay {format_timedelta(delay)}, " f"elapsed time {format_timedelta(elapsed_time)}, " f"error {format_timedelta(error)}") - assert error <= ERROR_MARGIN - break - else: - # If it reaches here, no matching record was found - assert False, "Run in callback was not executed" + + async with configured_appdaemon(app_cfgs=app_cfgs, loggers=['test_run_in']) as (ad, caplog): + async with ad.app_management.app_run_context("test_run_in", **kwargs): + logger.info(f"===== Running app test_run_in for {format_timedelta(run_time)}") + if run_time is not None: + await asyncio.sleep(run_time) + + assert "RunInTestApp initialized" in caplog.text + for record in caplog.records: + match record: + case logging.LogRecord(msg=msg, created=created_ts) if msg.startswith("Registering run_in"): + # Nothing really needs to happen here. The created_ts variable is set if the record is matched. + # It's already been asserted that this text is somewhere in the caplog text, so this is guaranteed to + # match one of the records. + continue + case logging.LogRecord( + msg="Run in callback executed with kwargs: %s", + args={"test_id": tid}, + created=callback_ts, + ) if tid == test_id: + elapsed_time = callback_ts - created_ts + error = elapsed_time - parse_timedelta(delay).total_seconds() + logger.info(f"Scheduler run_in succeeded with delay {format_timedelta(delay)}, " f"elapsed time {format_timedelta(elapsed_time)}, " f"error {format_timedelta(error)}") + assert error <= ERROR_MARGIN + break + else: + # If it reaches here, no matching record was found + assert False, "Run in callback was not executed" diff --git a/tests/functional/test_startup.py b/tests/functional/test_startup.py index 9748d70dd..6108ff72f 100644 --- a/tests/functional/test_startup.py +++ b/tests/functional/test_startup.py @@ -1,7 +1,8 @@ import logging import pytest -from appdaemon.appdaemon import AppDaemon + +from tests.conftest import ConfiguredAppDaemonFunc logger = logging.getLogger("AppDaemon._test") @@ -10,15 +11,20 @@ @pytest.mark.functional @pytest.mark.parametrize("app_name", ["hello_world", "another_app"]) @pytest.mark.asyncio(loop_scope="session") -async def test_hello_world(ad: AppDaemon, caplog: pytest.LogCaptureFixture, app_name: str) -> None: +async def test_hello_world(configured_appdaemon: ConfiguredAppDaemonFunc, app_name: str) -> None: """Run one of the hello world apps and ensure that the startup text is in the logs.""" - ad.app_dir = ad.config_dir / "apps/hello_world" - assert ad.app_dir.exists(), "App directory does not exist" + app_cfgs = { + app_name: { + "module": "hello", + "class": "HelloWorld", + } + } + logger.info("Test started") - with caplog.at_level(logging.DEBUG, logger="AppDaemon"): - async with ad.app_management.app_run_context(app_name): - await ad.utility.app_update_event.wait() + async with configured_appdaemon(app_cfgs=app_cfgs) as (ad, caplog): + await ad.utility.app_update_event.wait() + # await asyncio.sleep(1.0) logger.info("Test completed") assert "Hello from AppDaemon" in caplog.text @@ -28,15 +34,18 @@ async def test_hello_world(ad: AppDaemon, caplog: pytest.LogCaptureFixture, app_ @pytest.mark.ci @pytest.mark.functional @pytest.mark.asyncio(loop_scope="session") -async def test_no_plugins(ad_obj: AppDaemon, caplog: pytest.LogCaptureFixture) -> None: +async def test_no_plugins(configured_appdaemon: ConfiguredAppDaemonFunc) -> None: """Ensure that apps start correctly when there are no plugins configured.""" - ad_obj.config.plugins = {} - ad_obj.app_dir = ad_obj.config_dir / "apps/hello_world" + app_name = "hello-world" + + app_cfgs = { + app_name: { + "module": "hello", + "class": "HelloWorld", + } + } - ad_obj.start() - with caplog.at_level(logging.INFO, logger="AppDaemon"): - async with ad_obj.app_management.app_run_context("hello_world"): - await ad_obj.utility.app_update_event.wait() + async with configured_appdaemon(app_cfgs=app_cfgs) as (ad, caplog): + await ad.utility.app_update_event.wait() - await ad_obj.stop() assert not any(r.levelname == "ERROR" for r in caplog.records) diff --git a/tests/functional/test_state.py b/tests/functional/test_state.py index 45c7670a6..29d008680 100644 --- a/tests/functional/test_state.py +++ b/tests/functional/test_state.py @@ -1,35 +1,109 @@ import asyncio import logging import uuid -from time import perf_counter +from dataclasses import dataclass +from datetime import datetime, timedelta +from typing import cast import pytest from appdaemon.app_management import ManagedObject +from appdaemon.utils import format_timedelta -from .utils import AsyncTempTest +from tests.conftest import ConfiguredAppDaemonFunc logger = logging.getLogger("AppDaemon._test") +@dataclass +class StateTestResult: + init_time: datetime + state_change_time: datetime | None + callback_time: datetime + + @classmethod + def from_caplog(cls, caplog: pytest.LogCaptureFixture) -> "StateTestResult": + init_time, state_change_time, callback_time = None, None, None + for record in caplog.records: + match record: + case logging.LogRecord(created=float(created), msg="%s initialized"): + init_time = datetime.fromtimestamp(created) + case logging.LogRecord(created=float(created), msg="Changing state of %s with kwargs: %s"): + state_change_time = datetime.fromtimestamp(created) + case logging.LogRecord(created=float(created), msg="State callback executed successfully"): + callback_time = datetime.fromtimestamp(created) + + assert init_time is not None, "Initialization log time not found" + assert callback_time is not None, "Callback execution log time not found" + + return cls( + init_time=init_time, + state_change_time=state_change_time, + callback_time=callback_time, + ) + + @property + def state_change_delay(self) -> timedelta | None: + match self.state_change_time: + case timedelta() as sct: + return sct - self.init_time + + @property + def change_callback_delay(self) -> timedelta | None: + """Time between changing the state and the callback execution.""" + match self.state_change_time: + case datetime() as sct: + return self.callback_time - sct + + @property + def init_callback_delay(self) -> timedelta | None: + """Time between initializing the app and the callback execution.""" + return self.callback_time - self.init_time + + @pytest.mark.ci @pytest.mark.functional class TestStateCallback: - """Class to group the various tests for state callbacks.""" + """Class to group the various tests for state callbacks. + + - Tests use state_test_app.StateTestApp as the app under test + App Args: + listen_kwargs: Keyword arguments for the state listener (e.g., filters) + state_kwargs: Keyword arguments for setting the state (e.g., new state value) + delay: Delay before changing the state (default: 0.1 seconds) + - Tests use `self._run_callback_test` for common logic + - Registers a callback for a certain state change + - Changes the state after a short delay + - Waits for the callback to set the async Event with a timeout + """ app_name: str = "state_test_app" - timeout: int | float = 0.6 - - async def _run_callback_test(self, run_app_for_time: AsyncTempTest, app_args: dict, sign: bool) -> None: + timeout: float = 0.6 + + async def _run_callback_test( + self, + configured_appdaemon: ConfiguredAppDaemonFunc, + app_args: dict, + sign: bool + ) -> pytest.LogCaptureFixture: """Helper method to run callback tests with common logic. This method provides a shared test pattern for state callback testing where a callback is expected to either fire (sign=True) or not fire (sign=False) based on state matching. """ - start = perf_counter() - async with run_app_for_time(self.app_name, **app_args) as (ad, caplog): + app_cfgs = { + self.app_name: { + "module": "state_test_app", + "class": "StateTestApp", + **app_args, + } + } + + async with configured_appdaemon(app_cfgs=app_cfgs) as (ad, caplog): + await ad.utility.app_update_event.wait() match ad.app_management.objects.get(self.app_name): case ManagedObject(object=app_obj): - wait_coro = asyncio.wait_for(app_obj.execute_event.wait(), timeout=self.timeout) + execute_event = cast(asyncio.Event, app_obj.execute_event) + wait_coro = asyncio.wait_for(execute_event.wait(), timeout=self.timeout) if sign: await wait_coro logger.debug("Callback execute event was set") @@ -40,30 +114,32 @@ async def _run_callback_test(self, run_app_for_time: AsyncTempTest, app_args: di logger.debug("Callback execute event was not set") case _: raise ValueError("App object not found in app management") - logger.debug(f"Test completed in {perf_counter() - start:.3f} seconds") + return caplog @pytest.mark.parametrize("sign", [True, False]) @pytest.mark.asyncio(loop_scope="session") - async def test_new_state_callback(self, run_app_for_time: AsyncTempTest, sign: bool) -> None: + async def test_new_state_callback( + self, + configured_appdaemon: ConfiguredAppDaemonFunc, + sign: bool + ) -> None: """Test the state callback filtering based on new state values. State callbacks should only be fired when the new state matches the filter criteria. + Args: + configured_appdaemon: Factory fixture for creating configured AppDaemon instances + sign: If True, the callback should fire (positive case); if False, it should not (negative case) + Process: - - A unique value is generated for the new state. If the callback is supposed to fire (positive case), - then the same value is used for listening to the state change. Otherwise (negative case), a different, unique - value is used to listen for the state change, which will prevent the callback from executing. - - The unique state and listen values are passed to the app as args. - - The ``state_test_app`` app is run until a python :py:class:`~asyncio.Event` is set. - - The :py:class:`~asyncio.Event` is created when the app initializes. - - The app listens for the state change and then triggers it after a short delay, using the relevant kwargs for each. - - If the callback is executed, :py:class:`~asyncio.Event` is set. + - A unique value is generated for the new state + - If positive case, the same value is used for listening; if negative, a different value is used + - The app listens for the state change and triggers it after a short delay + - An Event is set if the callback executes Coverage: - - Positive - The new state value matches the listen filter, so the callback is executed. - - Negative - The new state value does not match the listen filter, so the callback is not executed. + - Positive: new state value matches the listen filter, callback executes + - Negative: new state value doesn't match the listen filter, callback doesn't execute """ new_state = str(uuid.uuid4()) listen_state = new_state if sign else str(uuid.uuid4()) @@ -71,92 +147,93 @@ async def test_new_state_callback(self, run_app_for_time: AsyncTempTest, sign: b "listen_kwargs": {"new": listen_state}, "state_kwargs": {"state": new_state}, } - await self._run_callback_test(run_app_for_time, app_args, sign) + caplog = await self._run_callback_test(configured_appdaemon, app_args, sign) + if sign: + result = StateTestResult.from_caplog(caplog) + logger.info(format_timedelta(result.change_callback_delay)) @pytest.mark.parametrize("sign", [True, False]) @pytest.mark.asyncio(loop_scope="session") - async def test_old_state_callback(self, run_app_for_time: AsyncTempTest, sign: bool) -> None: + async def test_old_state_callback( + self, + configured_appdaemon: ConfiguredAppDaemonFunc, + sign: bool + ) -> None: """Test the state callback filtering based on old state values. State callbacks should only be fired when the old state matches the filter criteria. + Args: + configured_appdaemon: Factory fixture for creating configured AppDaemon instances + sign: If True, the callback should fire (positive case); if False, it should not (negative case) + Process: - - A unique value is generated for the state. If the callback is supposed to fire (positive case), - then the same value is used for listening to the old state. Otherwise (negative case), a different, unique - value is used to listen for the old state, which will prevent the callback from executing. - - The unique state and listen values are passed to the app as args. - - The ``state_test_app`` app is run and the state is changed twice to trigger an old state condition. - - The app listens for the old state change and waits for the callback execution. - - If the callback is executed, :py:class:`~asyncio.Event` is set. + - A unique value is generated for the state + - If positive case, the same value is used for listening to old state; if negative, a different value is used + - The app changes state twice to trigger an old state condition + - An Event is set if the callback executes Coverage: - - Positive - The old state value matches the listen filter, so the callback is executed. - - Negative - The old state value does not match the listen filter, so the callback is not executed. + - Positive: old state value matches the listen filter, callback executes + - Negative: old state value doesn't match the listen filter, callback doesn't execute """ new_state = str(uuid.uuid4()) - listen_state = new_state if sign else str(uuid.uuid4()) + listen_state = "initialized" if sign else str(uuid.uuid4()) app_args = { "listen_kwargs": {"old": listen_state}, "state_kwargs": {"state": new_state}, } - async with run_app_for_time(self.app_name, **app_args) as (ad, caplog): - match ad.app_management.objects.get(self.app_name): - case ManagedObject(object=app_obj): - app_obj.run_in(app_obj.test_change_state, delay=app_obj.delay * 2, state="abc") - wait_coro = asyncio.wait_for(app_obj.execute_event.wait(), timeout=self.timeout * 2) - if sign: - await wait_coro - logger.debug("Callback execute event was set") - else: - # We expect the timeout because the new state filter doesn't match - with pytest.raises(asyncio.TimeoutError): - await wait_coro - logger.debug("Callback execute event was not set") - case _: - raise ValueError("App object not found in app management") + caplog = await self._run_callback_test(configured_appdaemon, app_args, sign) + if sign: + result = StateTestResult.from_caplog(caplog) + logger.info(format_timedelta(result.change_callback_delay)) @pytest.mark.parametrize("sign", [True, False]) @pytest.mark.asyncio(loop_scope="session") - async def test_attribute_callback(self, run_app_for_time: AsyncTempTest, sign: bool) -> None: + async def test_attribute_callback( + self, + configured_appdaemon: ConfiguredAppDaemonFunc, + sign: bool + ) -> None: """Test the state callback filtering based on attribute values. State callbacks should only be fired when the specified attribute's new value matches the filter criteria. + Args: + configured_appdaemon: Factory fixture for creating configured AppDaemon instances + sign: If True, the callback should fire (positive case); if False, it should not (negative case) + Process: - - A unique value is generated for the attribute. If the callback is supposed to fire (positive case), - then the same value is used for listening to the attribute change. Otherwise (negative case), a different, - unique value is used to listen for the attribute change, which will prevent the callback from executing. - - The unique attribute and listen values are passed to the app as args. - - The ``state_test_app`` app is run until a python :py:class:`~asyncio.Event` is set. - - The :py:class:`~asyncio.Event` is created when the app initializes. - - The app listens for the attribute change and then triggers a state change with the relevant attribute value. - - If the callback is executed, :py:class:`~asyncio.Event` is set. + - A unique value is generated for the attribute + - If positive case, the same value is used for listening to the attribute change; if negative, a different value is used + - The app listens for the attribute change and triggers a state change with the relevant attribute value + - An Event is set if the callback executes Coverage: - - Positive - The attribute's new value matches the listen filter, so the callback is executed. - - Negative - The attribute's new value does not match the listen filter, so the callback is not executed. + - Positive: attribute's new value matches the listen filter, callback executes + - Negative: attribute's new value doesn't match the listen filter, callback doesn't execute """ new_state = str(uuid.uuid4()) listen_state = new_state if sign else str(uuid.uuid4()) - app_args = {"listen_kwargs": {"attribute": "test_attr", "new": listen_state}, "state_kwargs": {"state": "changed", "test_attr": new_state}} - await self._run_callback_test(run_app_for_time, app_args, sign) + app_args = { + "listen_kwargs": {"attribute": "test_attr", "new": listen_state}, + "state_kwargs": {"state": "changed", "test_attr": new_state} + } + caplog = await self._run_callback_test(configured_appdaemon, app_args, sign) + if sign: + result = StateTestResult.from_caplog(caplog) + logger.info(format_timedelta(result.change_callback_delay)) @pytest.mark.asyncio(loop_scope="session") - async def test_immediate_callback(self, run_app_for_time: AsyncTempTest) -> None: + async def test_immediate_callback(self, configured_appdaemon: ConfiguredAppDaemonFunc) -> None: + """Test that the immediate flag on state listeners triggers the callback upon registration. + """ + # new_state = str(uuid.uuid4()) app_args = { - "listen_kwargs": { - "new": "on", - "immediate": True, - }, + "listen_kwargs": {"new": "initialized", "immediate": True}, + # "state_kwargs": {"state": new_state}, } - app_name = "test_immediate_state" - async with run_app_for_time(app_name, **app_args) as (ad, caplog): - match ad.app_management.objects.get(app_name): - case ManagedObject(object=app_obj): - wait_coro = asyncio.wait_for(app_obj.execute_event.wait(), timeout=self.timeout) - await wait_coro - logger.debug("Callback execute event was set") + caplog = await self._run_callback_test(configured_appdaemon, app_args, sign=True) + result = StateTestResult.from_caplog(caplog) + + assert result.init_callback_delay is not None diff --git a/tests/utils.py b/tests/utils.py index 7cec405cd..a790f1d75 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,10 +1,14 @@ +import asyncio import logging from collections.abc import Generator, Iterable from datetime import datetime, timedelta from itertools import pairwise from logging import LogRecord +from typing import cast import pytest +from appdaemon.appdaemon import AppDaemon +from appdaemon.models.internal.app_management import ManagedObject logger = logging.getLogger("AppDaemon._test") @@ -41,3 +45,24 @@ def assert_timedelta( raise # assert all((diff - expected) <= buffer for diff in time_diffs(records)) + + +async def wait_for_event(ad: AppDaemon, app_name: str, event_attr: str, timeout: float = 0.5): + """Wait for an app event to be set. + + Encapsulates a chunk of logic for the pattern of waiting for an app to set an async event during a test. + """ + match ad.app_management.objects.get(app_name): + case ManagedObject(object=app_obj): + event = cast(asyncio.Event, getattr(app_obj, event_attr)) + return await asyncio.wait_for(event.wait(), timeout=timeout) + case None: + pytest.fail(f"App {app_name} not found") + + +def get_app_log_records(caplog: pytest.LogCaptureFixture, app_name: str) -> Generator[LogRecord]: + """Get log records for a specific app containing a search string.""" + for record in caplog.records: + match record: + case LogRecord(appname=str(appname)) if appname == app_name: + yield record